book3s_hv_rmhandlers.S 30 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348
  1. /*
  2. * This program is free software; you can redistribute it and/or modify
  3. * it under the terms of the GNU General Public License, version 2, as
  4. * published by the Free Software Foundation.
  5. *
  6. * This program is distributed in the hope that it will be useful,
  7. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  8. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  9. * GNU General Public License for more details.
  10. *
  11. * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
  12. *
  13. * Derived from book3s_rmhandlers.S and other files, which are:
  14. *
  15. * Copyright SUSE Linux Products GmbH 2009
  16. *
  17. * Authors: Alexander Graf <agraf@suse.de>
  18. */
  19. #include <asm/ppc_asm.h>
  20. #include <asm/kvm_asm.h>
  21. #include <asm/reg.h>
  22. #include <asm/mmu.h>
  23. #include <asm/page.h>
  24. #include <asm/ptrace.h>
  25. #include <asm/hvcall.h>
  26. #include <asm/asm-offsets.h>
  27. #include <asm/exception-64s.h>
  28. /*****************************************************************************
  29. * *
  30. * Real Mode handlers that need to be in the linear mapping *
  31. * *
  32. ****************************************************************************/
  33. .globl kvmppc_skip_interrupt
  34. kvmppc_skip_interrupt:
  35. mfspr r13,SPRN_SRR0
  36. addi r13,r13,4
  37. mtspr SPRN_SRR0,r13
  38. GET_SCRATCH0(r13)
  39. rfid
  40. b .
  41. .globl kvmppc_skip_Hinterrupt
  42. kvmppc_skip_Hinterrupt:
  43. mfspr r13,SPRN_HSRR0
  44. addi r13,r13,4
  45. mtspr SPRN_HSRR0,r13
  46. GET_SCRATCH0(r13)
  47. hrfid
  48. b .
  49. /*
  50. * Call kvmppc_handler_trampoline_enter in real mode.
  51. * Must be called with interrupts hard-disabled.
  52. *
  53. * Input Registers:
  54. *
  55. * LR = return address to continue at after eventually re-enabling MMU
  56. */
  57. _GLOBAL(kvmppc_hv_entry_trampoline)
  58. mfmsr r10
  59. LOAD_REG_ADDR(r5, kvmppc_hv_entry)
  60. li r0,MSR_RI
  61. andc r0,r10,r0
  62. li r6,MSR_IR | MSR_DR
  63. andc r6,r10,r6
  64. mtmsrd r0,1 /* clear RI in MSR */
  65. mtsrr0 r5
  66. mtsrr1 r6
  67. RFI
  68. #define ULONG_SIZE 8
  69. #define VCPU_GPR(n) (VCPU_GPRS + (n * ULONG_SIZE))
  70. /******************************************************************************
  71. * *
  72. * Entry code *
  73. * *
  74. *****************************************************************************/
  75. #define XICS_XIRR 4
  76. #define XICS_QIRR 0xc
  77. /*
  78. * We come in here when wakened from nap mode on a secondary hw thread.
  79. * Relocation is off and most register values are lost.
  80. * r13 points to the PACA.
  81. */
  82. .globl kvm_start_guest
  83. kvm_start_guest:
  84. ld r1,PACAEMERGSP(r13)
  85. subi r1,r1,STACK_FRAME_OVERHEAD
  86. /* get vcpu pointer */
  87. ld r4, HSTATE_KVM_VCPU(r13)
  88. /* We got here with an IPI; clear it */
  89. ld r5, HSTATE_XICS_PHYS(r13)
  90. li r0, 0xff
  91. li r6, XICS_QIRR
  92. li r7, XICS_XIRR
  93. lwzcix r8, r5, r7 /* ack the interrupt */
  94. sync
  95. stbcix r0, r5, r6 /* clear it */
  96. stwcix r8, r5, r7 /* EOI it */
  97. .global kvmppc_hv_entry
  98. kvmppc_hv_entry:
  99. /* Required state:
  100. *
  101. * R4 = vcpu pointer
  102. * MSR = ~IR|DR
  103. * R13 = PACA
  104. * R1 = host R1
  105. * all other volatile GPRS = free
  106. */
  107. mflr r0
  108. std r0, HSTATE_VMHANDLER(r13)
  109. ld r14, VCPU_GPR(r14)(r4)
  110. ld r15, VCPU_GPR(r15)(r4)
  111. ld r16, VCPU_GPR(r16)(r4)
  112. ld r17, VCPU_GPR(r17)(r4)
  113. ld r18, VCPU_GPR(r18)(r4)
  114. ld r19, VCPU_GPR(r19)(r4)
  115. ld r20, VCPU_GPR(r20)(r4)
  116. ld r21, VCPU_GPR(r21)(r4)
  117. ld r22, VCPU_GPR(r22)(r4)
  118. ld r23, VCPU_GPR(r23)(r4)
  119. ld r24, VCPU_GPR(r24)(r4)
  120. ld r25, VCPU_GPR(r25)(r4)
  121. ld r26, VCPU_GPR(r26)(r4)
  122. ld r27, VCPU_GPR(r27)(r4)
  123. ld r28, VCPU_GPR(r28)(r4)
  124. ld r29, VCPU_GPR(r29)(r4)
  125. ld r30, VCPU_GPR(r30)(r4)
  126. ld r31, VCPU_GPR(r31)(r4)
  127. /* Load guest PMU registers */
  128. /* R4 is live here (vcpu pointer) */
  129. li r3, 1
  130. sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */
  131. mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */
  132. isync
  133. lwz r3, VCPU_PMC(r4) /* always load up guest PMU registers */
  134. lwz r5, VCPU_PMC + 4(r4) /* to prevent information leak */
  135. lwz r6, VCPU_PMC + 8(r4)
  136. lwz r7, VCPU_PMC + 12(r4)
  137. lwz r8, VCPU_PMC + 16(r4)
  138. lwz r9, VCPU_PMC + 20(r4)
  139. BEGIN_FTR_SECTION
  140. lwz r10, VCPU_PMC + 24(r4)
  141. lwz r11, VCPU_PMC + 28(r4)
  142. END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
  143. mtspr SPRN_PMC1, r3
  144. mtspr SPRN_PMC2, r5
  145. mtspr SPRN_PMC3, r6
  146. mtspr SPRN_PMC4, r7
  147. mtspr SPRN_PMC5, r8
  148. mtspr SPRN_PMC6, r9
  149. BEGIN_FTR_SECTION
  150. mtspr SPRN_PMC7, r10
  151. mtspr SPRN_PMC8, r11
  152. END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
  153. ld r3, VCPU_MMCR(r4)
  154. ld r5, VCPU_MMCR + 8(r4)
  155. ld r6, VCPU_MMCR + 16(r4)
  156. mtspr SPRN_MMCR1, r5
  157. mtspr SPRN_MMCRA, r6
  158. mtspr SPRN_MMCR0, r3
  159. isync
  160. /* Load up FP, VMX and VSX registers */
  161. bl kvmppc_load_fp
  162. BEGIN_FTR_SECTION
  163. /* Switch DSCR to guest value */
  164. ld r5, VCPU_DSCR(r4)
  165. mtspr SPRN_DSCR, r5
  166. END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
  167. /*
  168. * Set the decrementer to the guest decrementer.
  169. */
  170. ld r8,VCPU_DEC_EXPIRES(r4)
  171. mftb r7
  172. subf r3,r7,r8
  173. mtspr SPRN_DEC,r3
  174. stw r3,VCPU_DEC(r4)
  175. ld r5, VCPU_SPRG0(r4)
  176. ld r6, VCPU_SPRG1(r4)
  177. ld r7, VCPU_SPRG2(r4)
  178. ld r8, VCPU_SPRG3(r4)
  179. mtspr SPRN_SPRG0, r5
  180. mtspr SPRN_SPRG1, r6
  181. mtspr SPRN_SPRG2, r7
  182. mtspr SPRN_SPRG3, r8
  183. /* Save R1 in the PACA */
  184. std r1, HSTATE_HOST_R1(r13)
  185. /* Increment yield count if they have a VPA */
  186. ld r3, VCPU_VPA(r4)
  187. cmpdi r3, 0
  188. beq 25f
  189. lwz r5, LPPACA_YIELDCOUNT(r3)
  190. addi r5, r5, 1
  191. stw r5, LPPACA_YIELDCOUNT(r3)
  192. 25:
  193. /* Load up DAR and DSISR */
  194. ld r5, VCPU_DAR(r4)
  195. lwz r6, VCPU_DSISR(r4)
  196. mtspr SPRN_DAR, r5
  197. mtspr SPRN_DSISR, r6
  198. /* Set partition DABR */
  199. li r5,3
  200. ld r6,VCPU_DABR(r4)
  201. mtspr SPRN_DABRX,r5
  202. mtspr SPRN_DABR,r6
  203. BEGIN_FTR_SECTION
  204. /* Restore AMR and UAMOR, set AMOR to all 1s */
  205. ld r5,VCPU_AMR(r4)
  206. ld r6,VCPU_UAMOR(r4)
  207. li r7,-1
  208. mtspr SPRN_AMR,r5
  209. mtspr SPRN_UAMOR,r6
  210. mtspr SPRN_AMOR,r7
  211. END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
  212. /* Clear out SLB */
  213. li r6,0
  214. slbmte r6,r6
  215. slbia
  216. ptesync
  217. BEGIN_FTR_SECTION
  218. b 30f
  219. END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
  220. /*
  221. * POWER7 host -> guest partition switch code.
  222. * We don't have to lock against concurrent tlbies,
  223. * but we do have to coordinate across hardware threads.
  224. */
  225. /* Increment entry count iff exit count is zero. */
  226. ld r5,HSTATE_KVM_VCORE(r13)
  227. addi r9,r5,VCORE_ENTRY_EXIT
  228. 21: lwarx r3,0,r9
  229. cmpwi r3,0x100 /* any threads starting to exit? */
  230. bge secondary_too_late /* if so we're too late to the party */
  231. addi r3,r3,1
  232. stwcx. r3,0,r9
  233. bne 21b
  234. /* Primary thread switches to guest partition. */
  235. ld r9,VCPU_KVM(r4) /* pointer to struct kvm */
  236. lwz r6,VCPU_PTID(r4)
  237. cmpwi r6,0
  238. bne 20f
  239. ld r6,KVM_SDR1(r9)
  240. lwz r7,KVM_LPID(r9)
  241. li r0,LPID_RSVD /* switch to reserved LPID */
  242. mtspr SPRN_LPID,r0
  243. ptesync
  244. mtspr SPRN_SDR1,r6 /* switch to partition page table */
  245. mtspr SPRN_LPID,r7
  246. isync
  247. li r0,1
  248. stb r0,VCORE_IN_GUEST(r5) /* signal secondaries to continue */
  249. b 10f
  250. /* Secondary threads wait for primary to have done partition switch */
  251. 20: lbz r0,VCORE_IN_GUEST(r5)
  252. cmpwi r0,0
  253. beq 20b
  254. /* Set LPCR. Set the MER bit if there is a pending external irq. */
  255. 10: ld r8,KVM_LPCR(r9)
  256. ld r0,VCPU_PENDING_EXC(r4)
  257. li r7,(1 << BOOK3S_IRQPRIO_EXTERNAL)
  258. oris r7,r7,(1 << BOOK3S_IRQPRIO_EXTERNAL_LEVEL)@h
  259. and. r0,r0,r7
  260. beq 11f
  261. ori r8,r8,LPCR_MER
  262. 11: mtspr SPRN_LPCR,r8
  263. ld r8,KVM_RMOR(r9)
  264. mtspr SPRN_RMOR,r8
  265. isync
  266. /* Check if HDEC expires soon */
  267. mfspr r3,SPRN_HDEC
  268. cmpwi r3,10
  269. li r12,BOOK3S_INTERRUPT_HV_DECREMENTER
  270. mr r9,r4
  271. blt hdec_soon
  272. /*
  273. * Invalidate the TLB if we could possibly have stale TLB
  274. * entries for this partition on this core due to the use
  275. * of tlbiel.
  276. * XXX maybe only need this on primary thread?
  277. */
  278. ld r9,VCPU_KVM(r4) /* pointer to struct kvm */
  279. lwz r5,VCPU_VCPUID(r4)
  280. lhz r6,PACAPACAINDEX(r13)
  281. rldimi r6,r5,0,62 /* XXX map as if threads 1:1 p:v */
  282. lhz r8,VCPU_LAST_CPU(r4)
  283. sldi r7,r6,1 /* see if this is the same vcpu */
  284. add r7,r7,r9 /* as last ran on this pcpu */
  285. lhz r0,KVM_LAST_VCPU(r7)
  286. cmpw r6,r8 /* on the same cpu core as last time? */
  287. bne 3f
  288. cmpw r0,r5 /* same vcpu as this core last ran? */
  289. beq 1f
  290. 3: sth r6,VCPU_LAST_CPU(r4) /* if not, invalidate partition TLB */
  291. sth r5,KVM_LAST_VCPU(r7)
  292. li r6,128
  293. mtctr r6
  294. li r7,0x800 /* IS field = 0b10 */
  295. ptesync
  296. 2: tlbiel r7
  297. addi r7,r7,0x1000
  298. bdnz 2b
  299. ptesync
  300. 1:
  301. /* Save purr/spurr */
  302. mfspr r5,SPRN_PURR
  303. mfspr r6,SPRN_SPURR
  304. std r5,HSTATE_PURR(r13)
  305. std r6,HSTATE_SPURR(r13)
  306. ld r7,VCPU_PURR(r4)
  307. ld r8,VCPU_SPURR(r4)
  308. mtspr SPRN_PURR,r7
  309. mtspr SPRN_SPURR,r8
  310. b 31f
  311. /*
  312. * PPC970 host -> guest partition switch code.
  313. * We have to lock against concurrent tlbies,
  314. * using native_tlbie_lock to lock against host tlbies
  315. * and kvm->arch.tlbie_lock to lock against guest tlbies.
  316. * We also have to invalidate the TLB since its
  317. * entries aren't tagged with the LPID.
  318. */
  319. 30: ld r9,VCPU_KVM(r4) /* pointer to struct kvm */
  320. /* first take native_tlbie_lock */
  321. .section ".toc","aw"
  322. toc_tlbie_lock:
  323. .tc native_tlbie_lock[TC],native_tlbie_lock
  324. .previous
  325. ld r3,toc_tlbie_lock@toc(2)
  326. lwz r8,PACA_LOCK_TOKEN(r13)
  327. 24: lwarx r0,0,r3
  328. cmpwi r0,0
  329. bne 24b
  330. stwcx. r8,0,r3
  331. bne 24b
  332. isync
  333. ld r7,KVM_LPCR(r9) /* use kvm->arch.lpcr to store HID4 */
  334. li r0,0x18f
  335. rotldi r0,r0,HID4_LPID5_SH /* all lpid bits in HID4 = 1 */
  336. or r0,r7,r0
  337. ptesync
  338. sync
  339. mtspr SPRN_HID4,r0 /* switch to reserved LPID */
  340. isync
  341. li r0,0
  342. stw r0,0(r3) /* drop native_tlbie_lock */
  343. /* invalidate the whole TLB */
  344. li r0,256
  345. mtctr r0
  346. li r6,0
  347. 25: tlbiel r6
  348. addi r6,r6,0x1000
  349. bdnz 25b
  350. ptesync
  351. /* Take the guest's tlbie_lock */
  352. addi r3,r9,KVM_TLBIE_LOCK
  353. 24: lwarx r0,0,r3
  354. cmpwi r0,0
  355. bne 24b
  356. stwcx. r8,0,r3
  357. bne 24b
  358. isync
  359. ld r6,KVM_SDR1(r9)
  360. mtspr SPRN_SDR1,r6 /* switch to partition page table */
  361. /* Set up HID4 with the guest's LPID etc. */
  362. sync
  363. mtspr SPRN_HID4,r7
  364. isync
  365. /* drop the guest's tlbie_lock */
  366. li r0,0
  367. stw r0,0(r3)
  368. /* Check if HDEC expires soon */
  369. mfspr r3,SPRN_HDEC
  370. cmpwi r3,10
  371. li r12,BOOK3S_INTERRUPT_HV_DECREMENTER
  372. mr r9,r4
  373. blt hdec_soon
  374. /* Enable HDEC interrupts */
  375. mfspr r0,SPRN_HID0
  376. li r3,1
  377. rldimi r0,r3, HID0_HDICE_SH, 64-HID0_HDICE_SH-1
  378. sync
  379. mtspr SPRN_HID0,r0
  380. mfspr r0,SPRN_HID0
  381. mfspr r0,SPRN_HID0
  382. mfspr r0,SPRN_HID0
  383. mfspr r0,SPRN_HID0
  384. mfspr r0,SPRN_HID0
  385. mfspr r0,SPRN_HID0
  386. /* Load up guest SLB entries */
  387. 31: lwz r5,VCPU_SLB_MAX(r4)
  388. cmpwi r5,0
  389. beq 9f
  390. mtctr r5
  391. addi r6,r4,VCPU_SLB
  392. 1: ld r8,VCPU_SLB_E(r6)
  393. ld r9,VCPU_SLB_V(r6)
  394. slbmte r9,r8
  395. addi r6,r6,VCPU_SLB_SIZE
  396. bdnz 1b
  397. 9:
  398. /* Restore state of CTRL run bit; assume 1 on entry */
  399. lwz r5,VCPU_CTRL(r4)
  400. andi. r5,r5,1
  401. bne 4f
  402. mfspr r6,SPRN_CTRLF
  403. clrrdi r6,r6,1
  404. mtspr SPRN_CTRLT,r6
  405. 4:
  406. ld r6, VCPU_CTR(r4)
  407. lwz r7, VCPU_XER(r4)
  408. mtctr r6
  409. mtxer r7
  410. /* Move SRR0 and SRR1 into the respective regs */
  411. ld r6, VCPU_SRR0(r4)
  412. ld r7, VCPU_SRR1(r4)
  413. mtspr SPRN_SRR0, r6
  414. mtspr SPRN_SRR1, r7
  415. ld r10, VCPU_PC(r4)
  416. ld r11, VCPU_MSR(r4) /* r10 = vcpu->arch.msr & ~MSR_HV */
  417. rldicl r11, r11, 63 - MSR_HV_LG, 1
  418. rotldi r11, r11, 1 + MSR_HV_LG
  419. ori r11, r11, MSR_ME
  420. fast_guest_return:
  421. mtspr SPRN_HSRR0,r10
  422. mtspr SPRN_HSRR1,r11
  423. /* Activate guest mode, so faults get handled by KVM */
  424. li r9, KVM_GUEST_MODE_GUEST
  425. stb r9, HSTATE_IN_GUEST(r13)
  426. /* Enter guest */
  427. ld r5, VCPU_LR(r4)
  428. lwz r6, VCPU_CR(r4)
  429. mtlr r5
  430. mtcr r6
  431. ld r0, VCPU_GPR(r0)(r4)
  432. ld r1, VCPU_GPR(r1)(r4)
  433. ld r2, VCPU_GPR(r2)(r4)
  434. ld r3, VCPU_GPR(r3)(r4)
  435. ld r5, VCPU_GPR(r5)(r4)
  436. ld r6, VCPU_GPR(r6)(r4)
  437. ld r7, VCPU_GPR(r7)(r4)
  438. ld r8, VCPU_GPR(r8)(r4)
  439. ld r9, VCPU_GPR(r9)(r4)
  440. ld r10, VCPU_GPR(r10)(r4)
  441. ld r11, VCPU_GPR(r11)(r4)
  442. ld r12, VCPU_GPR(r12)(r4)
  443. ld r13, VCPU_GPR(r13)(r4)
  444. ld r4, VCPU_GPR(r4)(r4)
  445. hrfid
  446. b .
  447. /******************************************************************************
  448. * *
  449. * Exit code *
  450. * *
  451. *****************************************************************************/
  452. /*
  453. * We come here from the first-level interrupt handlers.
  454. */
  455. .globl kvmppc_interrupt
  456. kvmppc_interrupt:
  457. /*
  458. * Register contents:
  459. * R12 = interrupt vector
  460. * R13 = PACA
  461. * guest CR, R12 saved in shadow VCPU SCRATCH1/0
  462. * guest R13 saved in SPRN_SCRATCH0
  463. */
  464. /* abuse host_r2 as third scratch area; we get r2 from PACATOC(r13) */
  465. std r9, HSTATE_HOST_R2(r13)
  466. ld r9, HSTATE_KVM_VCPU(r13)
  467. /* Save registers */
  468. std r0, VCPU_GPR(r0)(r9)
  469. std r1, VCPU_GPR(r1)(r9)
  470. std r2, VCPU_GPR(r2)(r9)
  471. std r3, VCPU_GPR(r3)(r9)
  472. std r4, VCPU_GPR(r4)(r9)
  473. std r5, VCPU_GPR(r5)(r9)
  474. std r6, VCPU_GPR(r6)(r9)
  475. std r7, VCPU_GPR(r7)(r9)
  476. std r8, VCPU_GPR(r8)(r9)
  477. ld r0, HSTATE_HOST_R2(r13)
  478. std r0, VCPU_GPR(r9)(r9)
  479. std r10, VCPU_GPR(r10)(r9)
  480. std r11, VCPU_GPR(r11)(r9)
  481. ld r3, HSTATE_SCRATCH0(r13)
  482. lwz r4, HSTATE_SCRATCH1(r13)
  483. std r3, VCPU_GPR(r12)(r9)
  484. stw r4, VCPU_CR(r9)
  485. /* Restore R1/R2 so we can handle faults */
  486. ld r1, HSTATE_HOST_R1(r13)
  487. ld r2, PACATOC(r13)
  488. mfspr r10, SPRN_SRR0
  489. mfspr r11, SPRN_SRR1
  490. std r10, VCPU_SRR0(r9)
  491. std r11, VCPU_SRR1(r9)
  492. andi. r0, r12, 2 /* need to read HSRR0/1? */
  493. beq 1f
  494. mfspr r10, SPRN_HSRR0
  495. mfspr r11, SPRN_HSRR1
  496. clrrdi r12, r12, 2
  497. 1: std r10, VCPU_PC(r9)
  498. std r11, VCPU_MSR(r9)
  499. GET_SCRATCH0(r3)
  500. mflr r4
  501. std r3, VCPU_GPR(r13)(r9)
  502. std r4, VCPU_LR(r9)
  503. /* Unset guest mode */
  504. li r0, KVM_GUEST_MODE_NONE
  505. stb r0, HSTATE_IN_GUEST(r13)
  506. stw r12,VCPU_TRAP(r9)
  507. /* See if this is a leftover HDEC interrupt */
  508. cmpwi r12,BOOK3S_INTERRUPT_HV_DECREMENTER
  509. bne 2f
  510. mfspr r3,SPRN_HDEC
  511. cmpwi r3,0
  512. bge ignore_hdec
  513. 2:
  514. /* See if this is something we can handle in real mode */
  515. cmpwi r12,BOOK3S_INTERRUPT_SYSCALL
  516. beq hcall_try_real_mode
  517. hcall_real_cont:
  518. /* Check for mediated interrupts (could be done earlier really ...) */
  519. BEGIN_FTR_SECTION
  520. cmpwi r12,BOOK3S_INTERRUPT_EXTERNAL
  521. bne+ 1f
  522. ld r5,VCPU_KVM(r9)
  523. ld r5,KVM_LPCR(r5)
  524. andi. r0,r11,MSR_EE
  525. beq 1f
  526. andi. r0,r5,LPCR_MER
  527. bne bounce_ext_interrupt
  528. 1:
  529. END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
  530. /* Save DEC */
  531. mfspr r5,SPRN_DEC
  532. mftb r6
  533. extsw r5,r5
  534. add r5,r5,r6
  535. std r5,VCPU_DEC_EXPIRES(r9)
  536. /* Save HEIR (HV emulation assist reg) in last_inst
  537. if this is an HEI (HV emulation interrupt, e40) */
  538. li r3,-1
  539. BEGIN_FTR_SECTION
  540. cmpwi r12,BOOK3S_INTERRUPT_H_EMUL_ASSIST
  541. bne 11f
  542. mfspr r3,SPRN_HEIR
  543. END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
  544. 11: stw r3,VCPU_LAST_INST(r9)
  545. /* Save more register state */
  546. mfxer r5
  547. mfdar r6
  548. mfdsisr r7
  549. mfctr r8
  550. stw r5, VCPU_XER(r9)
  551. std r6, VCPU_DAR(r9)
  552. stw r7, VCPU_DSISR(r9)
  553. std r8, VCPU_CTR(r9)
  554. /* grab HDAR & HDSISR if HV data storage interrupt (HDSI) */
  555. BEGIN_FTR_SECTION
  556. cmpwi r12,BOOK3S_INTERRUPT_H_DATA_STORAGE
  557. beq 6f
  558. END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
  559. 7: std r6, VCPU_FAULT_DAR(r9)
  560. stw r7, VCPU_FAULT_DSISR(r9)
  561. /* Save guest CTRL register, set runlatch to 1 */
  562. mfspr r6,SPRN_CTRLF
  563. stw r6,VCPU_CTRL(r9)
  564. andi. r0,r6,1
  565. bne 4f
  566. ori r6,r6,1
  567. mtspr SPRN_CTRLT,r6
  568. 4:
  569. /* Read the guest SLB and save it away */
  570. lwz r0,VCPU_SLB_NR(r9) /* number of entries in SLB */
  571. mtctr r0
  572. li r6,0
  573. addi r7,r9,VCPU_SLB
  574. li r5,0
  575. 1: slbmfee r8,r6
  576. andis. r0,r8,SLB_ESID_V@h
  577. beq 2f
  578. add r8,r8,r6 /* put index in */
  579. slbmfev r3,r6
  580. std r8,VCPU_SLB_E(r7)
  581. std r3,VCPU_SLB_V(r7)
  582. addi r7,r7,VCPU_SLB_SIZE
  583. addi r5,r5,1
  584. 2: addi r6,r6,1
  585. bdnz 1b
  586. stw r5,VCPU_SLB_MAX(r9)
  587. /*
  588. * Save the guest PURR/SPURR
  589. */
  590. BEGIN_FTR_SECTION
  591. mfspr r5,SPRN_PURR
  592. mfspr r6,SPRN_SPURR
  593. ld r7,VCPU_PURR(r9)
  594. ld r8,VCPU_SPURR(r9)
  595. std r5,VCPU_PURR(r9)
  596. std r6,VCPU_SPURR(r9)
  597. subf r5,r7,r5
  598. subf r6,r8,r6
  599. /*
  600. * Restore host PURR/SPURR and add guest times
  601. * so that the time in the guest gets accounted.
  602. */
  603. ld r3,HSTATE_PURR(r13)
  604. ld r4,HSTATE_SPURR(r13)
  605. add r3,r3,r5
  606. add r4,r4,r6
  607. mtspr SPRN_PURR,r3
  608. mtspr SPRN_SPURR,r4
  609. END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_201)
  610. /* Clear out SLB */
  611. li r5,0
  612. slbmte r5,r5
  613. slbia
  614. ptesync
  615. hdec_soon:
  616. BEGIN_FTR_SECTION
  617. b 32f
  618. END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
  619. /*
  620. * POWER7 guest -> host partition switch code.
  621. * We don't have to lock against tlbies but we do
  622. * have to coordinate the hardware threads.
  623. */
  624. /* Increment the threads-exiting-guest count in the 0xff00
  625. bits of vcore->entry_exit_count */
  626. lwsync
  627. ld r5,HSTATE_KVM_VCORE(r13)
  628. addi r6,r5,VCORE_ENTRY_EXIT
  629. 41: lwarx r3,0,r6
  630. addi r0,r3,0x100
  631. stwcx. r0,0,r6
  632. bne 41b
  633. /*
  634. * At this point we have an interrupt that we have to pass
  635. * up to the kernel or qemu; we can't handle it in real mode.
  636. * Thus we have to do a partition switch, so we have to
  637. * collect the other threads, if we are the first thread
  638. * to take an interrupt. To do this, we set the HDEC to 0,
  639. * which causes an HDEC interrupt in all threads within 2ns
  640. * because the HDEC register is shared between all 4 threads.
  641. * However, we don't need to bother if this is an HDEC
  642. * interrupt, since the other threads will already be on their
  643. * way here in that case.
  644. */
  645. cmpwi r12,BOOK3S_INTERRUPT_HV_DECREMENTER
  646. beq 40f
  647. cmpwi r3,0x100 /* Are we the first here? */
  648. bge 40f
  649. cmpwi r3,1
  650. ble 40f
  651. li r0,0
  652. mtspr SPRN_HDEC,r0
  653. 40:
  654. /* Secondary threads wait for primary to do partition switch */
  655. ld r4,VCPU_KVM(r9) /* pointer to struct kvm */
  656. ld r5,HSTATE_KVM_VCORE(r13)
  657. lwz r3,VCPU_PTID(r9)
  658. cmpwi r3,0
  659. beq 15f
  660. HMT_LOW
  661. 13: lbz r3,VCORE_IN_GUEST(r5)
  662. cmpwi r3,0
  663. bne 13b
  664. HMT_MEDIUM
  665. b 16f
  666. /* Primary thread waits for all the secondaries to exit guest */
  667. 15: lwz r3,VCORE_ENTRY_EXIT(r5)
  668. srwi r0,r3,8
  669. clrldi r3,r3,56
  670. cmpw r3,r0
  671. bne 15b
  672. isync
  673. /* Primary thread switches back to host partition */
  674. ld r6,KVM_HOST_SDR1(r4)
  675. lwz r7,KVM_HOST_LPID(r4)
  676. li r8,LPID_RSVD /* switch to reserved LPID */
  677. mtspr SPRN_LPID,r8
  678. ptesync
  679. mtspr SPRN_SDR1,r6 /* switch to partition page table */
  680. mtspr SPRN_LPID,r7
  681. isync
  682. li r0,0
  683. stb r0,VCORE_IN_GUEST(r5)
  684. lis r8,0x7fff /* MAX_INT@h */
  685. mtspr SPRN_HDEC,r8
  686. 16: ld r8,KVM_HOST_LPCR(r4)
  687. mtspr SPRN_LPCR,r8
  688. isync
  689. b 33f
  690. /*
  691. * PPC970 guest -> host partition switch code.
  692. * We have to lock against concurrent tlbies, and
  693. * we have to flush the whole TLB.
  694. */
  695. 32: ld r4,VCPU_KVM(r9) /* pointer to struct kvm */
  696. /* Take the guest's tlbie_lock */
  697. lwz r8,PACA_LOCK_TOKEN(r13)
  698. addi r3,r4,KVM_TLBIE_LOCK
  699. 24: lwarx r0,0,r3
  700. cmpwi r0,0
  701. bne 24b
  702. stwcx. r8,0,r3
  703. bne 24b
  704. isync
  705. ld r7,KVM_HOST_LPCR(r4) /* use kvm->arch.host_lpcr for HID4 */
  706. li r0,0x18f
  707. rotldi r0,r0,HID4_LPID5_SH /* all lpid bits in HID4 = 1 */
  708. or r0,r7,r0
  709. ptesync
  710. sync
  711. mtspr SPRN_HID4,r0 /* switch to reserved LPID */
  712. isync
  713. li r0,0
  714. stw r0,0(r3) /* drop guest tlbie_lock */
  715. /* invalidate the whole TLB */
  716. li r0,256
  717. mtctr r0
  718. li r6,0
  719. 25: tlbiel r6
  720. addi r6,r6,0x1000
  721. bdnz 25b
  722. ptesync
  723. /* take native_tlbie_lock */
  724. ld r3,toc_tlbie_lock@toc(2)
  725. 24: lwarx r0,0,r3
  726. cmpwi r0,0
  727. bne 24b
  728. stwcx. r8,0,r3
  729. bne 24b
  730. isync
  731. ld r6,KVM_HOST_SDR1(r4)
  732. mtspr SPRN_SDR1,r6 /* switch to host page table */
  733. /* Set up host HID4 value */
  734. sync
  735. mtspr SPRN_HID4,r7
  736. isync
  737. li r0,0
  738. stw r0,0(r3) /* drop native_tlbie_lock */
  739. lis r8,0x7fff /* MAX_INT@h */
  740. mtspr SPRN_HDEC,r8
  741. /* Disable HDEC interrupts */
  742. mfspr r0,SPRN_HID0
  743. li r3,0
  744. rldimi r0,r3, HID0_HDICE_SH, 64-HID0_HDICE_SH-1
  745. sync
  746. mtspr SPRN_HID0,r0
  747. mfspr r0,SPRN_HID0
  748. mfspr r0,SPRN_HID0
  749. mfspr r0,SPRN_HID0
  750. mfspr r0,SPRN_HID0
  751. mfspr r0,SPRN_HID0
  752. mfspr r0,SPRN_HID0
  753. /* load host SLB entries */
  754. 33: ld r8,PACA_SLBSHADOWPTR(r13)
  755. .rept SLB_NUM_BOLTED
  756. ld r5,SLBSHADOW_SAVEAREA(r8)
  757. ld r6,SLBSHADOW_SAVEAREA+8(r8)
  758. andis. r7,r5,SLB_ESID_V@h
  759. beq 1f
  760. slbmte r6,r5
  761. 1: addi r8,r8,16
  762. .endr
  763. /* Save and reset AMR and UAMOR before turning on the MMU */
  764. BEGIN_FTR_SECTION
  765. mfspr r5,SPRN_AMR
  766. mfspr r6,SPRN_UAMOR
  767. std r5,VCPU_AMR(r9)
  768. std r6,VCPU_UAMOR(r9)
  769. li r6,0
  770. mtspr SPRN_AMR,r6
  771. END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
  772. /* Restore host DABR and DABRX */
  773. ld r5,HSTATE_DABR(r13)
  774. li r6,7
  775. mtspr SPRN_DABR,r5
  776. mtspr SPRN_DABRX,r6
  777. /* Switch DSCR back to host value */
  778. BEGIN_FTR_SECTION
  779. mfspr r8, SPRN_DSCR
  780. ld r7, HSTATE_DSCR(r13)
  781. std r8, VCPU_DSCR(r7)
  782. mtspr SPRN_DSCR, r7
  783. END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
  784. /* Save non-volatile GPRs */
  785. std r14, VCPU_GPR(r14)(r9)
  786. std r15, VCPU_GPR(r15)(r9)
  787. std r16, VCPU_GPR(r16)(r9)
  788. std r17, VCPU_GPR(r17)(r9)
  789. std r18, VCPU_GPR(r18)(r9)
  790. std r19, VCPU_GPR(r19)(r9)
  791. std r20, VCPU_GPR(r20)(r9)
  792. std r21, VCPU_GPR(r21)(r9)
  793. std r22, VCPU_GPR(r22)(r9)
  794. std r23, VCPU_GPR(r23)(r9)
  795. std r24, VCPU_GPR(r24)(r9)
  796. std r25, VCPU_GPR(r25)(r9)
  797. std r26, VCPU_GPR(r26)(r9)
  798. std r27, VCPU_GPR(r27)(r9)
  799. std r28, VCPU_GPR(r28)(r9)
  800. std r29, VCPU_GPR(r29)(r9)
  801. std r30, VCPU_GPR(r30)(r9)
  802. std r31, VCPU_GPR(r31)(r9)
  803. /* Save SPRGs */
  804. mfspr r3, SPRN_SPRG0
  805. mfspr r4, SPRN_SPRG1
  806. mfspr r5, SPRN_SPRG2
  807. mfspr r6, SPRN_SPRG3
  808. std r3, VCPU_SPRG0(r9)
  809. std r4, VCPU_SPRG1(r9)
  810. std r5, VCPU_SPRG2(r9)
  811. std r6, VCPU_SPRG3(r9)
  812. /* Increment yield count if they have a VPA */
  813. ld r8, VCPU_VPA(r9) /* do they have a VPA? */
  814. cmpdi r8, 0
  815. beq 25f
  816. lwz r3, LPPACA_YIELDCOUNT(r8)
  817. addi r3, r3, 1
  818. stw r3, LPPACA_YIELDCOUNT(r8)
  819. 25:
  820. /* Save PMU registers if requested */
  821. /* r8 and cr0.eq are live here */
  822. li r3, 1
  823. sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */
  824. mfspr r4, SPRN_MMCR0 /* save MMCR0 */
  825. mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */
  826. isync
  827. beq 21f /* if no VPA, save PMU stuff anyway */
  828. lbz r7, LPPACA_PMCINUSE(r8)
  829. cmpwi r7, 0 /* did they ask for PMU stuff to be saved? */
  830. bne 21f
  831. std r3, VCPU_MMCR(r9) /* if not, set saved MMCR0 to FC */
  832. b 22f
  833. 21: mfspr r5, SPRN_MMCR1
  834. mfspr r6, SPRN_MMCRA
  835. std r4, VCPU_MMCR(r9)
  836. std r5, VCPU_MMCR + 8(r9)
  837. std r6, VCPU_MMCR + 16(r9)
  838. mfspr r3, SPRN_PMC1
  839. mfspr r4, SPRN_PMC2
  840. mfspr r5, SPRN_PMC3
  841. mfspr r6, SPRN_PMC4
  842. mfspr r7, SPRN_PMC5
  843. mfspr r8, SPRN_PMC6
  844. BEGIN_FTR_SECTION
  845. mfspr r10, SPRN_PMC7
  846. mfspr r11, SPRN_PMC8
  847. END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
  848. stw r3, VCPU_PMC(r9)
  849. stw r4, VCPU_PMC + 4(r9)
  850. stw r5, VCPU_PMC + 8(r9)
  851. stw r6, VCPU_PMC + 12(r9)
  852. stw r7, VCPU_PMC + 16(r9)
  853. stw r8, VCPU_PMC + 20(r9)
  854. BEGIN_FTR_SECTION
  855. stw r10, VCPU_PMC + 24(r9)
  856. stw r11, VCPU_PMC + 28(r9)
  857. END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
  858. 22:
  859. /* save FP state */
  860. mr r3, r9
  861. bl .kvmppc_save_fp
  862. /* Secondary threads go off to take a nap on POWER7 */
  863. BEGIN_FTR_SECTION
  864. lwz r0,VCPU_PTID(r3)
  865. cmpwi r0,0
  866. bne secondary_nap
  867. END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
  868. /*
  869. * Reload DEC. HDEC interrupts were disabled when
  870. * we reloaded the host's LPCR value.
  871. */
  872. ld r3, HSTATE_DECEXP(r13)
  873. mftb r4
  874. subf r4, r4, r3
  875. mtspr SPRN_DEC, r4
  876. /* Reload the host's PMU registers */
  877. ld r3, PACALPPACAPTR(r13) /* is the host using the PMU? */
  878. lbz r4, LPPACA_PMCINUSE(r3)
  879. cmpwi r4, 0
  880. beq 23f /* skip if not */
  881. lwz r3, HSTATE_PMC(r13)
  882. lwz r4, HSTATE_PMC + 4(r13)
  883. lwz r5, HSTATE_PMC + 8(r13)
  884. lwz r6, HSTATE_PMC + 12(r13)
  885. lwz r8, HSTATE_PMC + 16(r13)
  886. lwz r9, HSTATE_PMC + 20(r13)
  887. BEGIN_FTR_SECTION
  888. lwz r10, HSTATE_PMC + 24(r13)
  889. lwz r11, HSTATE_PMC + 28(r13)
  890. END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
  891. mtspr SPRN_PMC1, r3
  892. mtspr SPRN_PMC2, r4
  893. mtspr SPRN_PMC3, r5
  894. mtspr SPRN_PMC4, r6
  895. mtspr SPRN_PMC5, r8
  896. mtspr SPRN_PMC6, r9
  897. BEGIN_FTR_SECTION
  898. mtspr SPRN_PMC7, r10
  899. mtspr SPRN_PMC8, r11
  900. END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
  901. ld r3, HSTATE_MMCR(r13)
  902. ld r4, HSTATE_MMCR + 8(r13)
  903. ld r5, HSTATE_MMCR + 16(r13)
  904. mtspr SPRN_MMCR1, r4
  905. mtspr SPRN_MMCRA, r5
  906. mtspr SPRN_MMCR0, r3
  907. isync
  908. 23:
  909. /*
  910. * For external and machine check interrupts, we need
  911. * to call the Linux handler to process the interrupt.
  912. * We do that by jumping to the interrupt vector address
  913. * which we have in r12. The [h]rfid at the end of the
  914. * handler will return to the book3s_hv_interrupts.S code.
  915. * For other interrupts we do the rfid to get back
  916. * to the book3s_interrupts.S code here.
  917. */
  918. ld r8, HSTATE_VMHANDLER(r13)
  919. ld r7, HSTATE_HOST_MSR(r13)
  920. cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL
  921. beq 11f
  922. cmpwi r12, BOOK3S_INTERRUPT_MACHINE_CHECK
  923. /* RFI into the highmem handler, or branch to interrupt handler */
  924. 12: mfmsr r6
  925. mtctr r12
  926. li r0, MSR_RI
  927. andc r6, r6, r0
  928. mtmsrd r6, 1 /* Clear RI in MSR */
  929. mtsrr0 r8
  930. mtsrr1 r7
  931. beqctr
  932. RFI
  933. 11:
  934. BEGIN_FTR_SECTION
  935. b 12b
  936. END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
  937. mtspr SPRN_HSRR0, r8
  938. mtspr SPRN_HSRR1, r7
  939. ba 0x500
  940. 6: mfspr r6,SPRN_HDAR
  941. mfspr r7,SPRN_HDSISR
  942. b 7b
  943. /*
  944. * Try to handle an hcall in real mode.
  945. * Returns to the guest if we handle it, or continues on up to
  946. * the kernel if we can't (i.e. if we don't have a handler for
  947. * it, or if the handler returns H_TOO_HARD).
  948. */
  949. .globl hcall_try_real_mode
  950. hcall_try_real_mode:
  951. ld r3,VCPU_GPR(r3)(r9)
  952. andi. r0,r11,MSR_PR
  953. bne hcall_real_cont
  954. clrrdi r3,r3,2
  955. cmpldi r3,hcall_real_table_end - hcall_real_table
  956. bge hcall_real_cont
  957. LOAD_REG_ADDR(r4, hcall_real_table)
  958. lwzx r3,r3,r4
  959. cmpwi r3,0
  960. beq hcall_real_cont
  961. add r3,r3,r4
  962. mtctr r3
  963. mr r3,r9 /* get vcpu pointer */
  964. ld r4,VCPU_GPR(r4)(r9)
  965. bctrl
  966. cmpdi r3,H_TOO_HARD
  967. beq hcall_real_fallback
  968. ld r4,HSTATE_KVM_VCPU(r13)
  969. std r3,VCPU_GPR(r3)(r4)
  970. ld r10,VCPU_PC(r4)
  971. ld r11,VCPU_MSR(r4)
  972. b fast_guest_return
  973. /* We've attempted a real mode hcall, but it's punted it back
  974. * to userspace. We need to restore some clobbered volatiles
  975. * before resuming the pass-it-to-qemu path */
  976. hcall_real_fallback:
  977. li r12,BOOK3S_INTERRUPT_SYSCALL
  978. ld r9, HSTATE_KVM_VCPU(r13)
  979. ld r11, VCPU_MSR(r9)
  980. b hcall_real_cont
  981. .globl hcall_real_table
  982. hcall_real_table:
  983. .long 0 /* 0 - unused */
  984. .long .kvmppc_h_remove - hcall_real_table
  985. .long .kvmppc_h_enter - hcall_real_table
  986. .long .kvmppc_h_read - hcall_real_table
  987. .long 0 /* 0x10 - H_CLEAR_MOD */
  988. .long 0 /* 0x14 - H_CLEAR_REF */
  989. .long .kvmppc_h_protect - hcall_real_table
  990. .long 0 /* 0x1c - H_GET_TCE */
  991. .long .kvmppc_h_put_tce - hcall_real_table
  992. .long 0 /* 0x24 - H_SET_SPRG0 */
  993. .long .kvmppc_h_set_dabr - hcall_real_table
  994. .long 0 /* 0x2c */
  995. .long 0 /* 0x30 */
  996. .long 0 /* 0x34 */
  997. .long 0 /* 0x38 */
  998. .long 0 /* 0x3c */
  999. .long 0 /* 0x40 */
  1000. .long 0 /* 0x44 */
  1001. .long 0 /* 0x48 */
  1002. .long 0 /* 0x4c */
  1003. .long 0 /* 0x50 */
  1004. .long 0 /* 0x54 */
  1005. .long 0 /* 0x58 */
  1006. .long 0 /* 0x5c */
  1007. .long 0 /* 0x60 */
  1008. .long 0 /* 0x64 */
  1009. .long 0 /* 0x68 */
  1010. .long 0 /* 0x6c */
  1011. .long 0 /* 0x70 */
  1012. .long 0 /* 0x74 */
  1013. .long 0 /* 0x78 */
  1014. .long 0 /* 0x7c */
  1015. .long 0 /* 0x80 */
  1016. .long 0 /* 0x84 */
  1017. .long 0 /* 0x88 */
  1018. .long 0 /* 0x8c */
  1019. .long 0 /* 0x90 */
  1020. .long 0 /* 0x94 */
  1021. .long 0 /* 0x98 */
  1022. .long 0 /* 0x9c */
  1023. .long 0 /* 0xa0 */
  1024. .long 0 /* 0xa4 */
  1025. .long 0 /* 0xa8 */
  1026. .long 0 /* 0xac */
  1027. .long 0 /* 0xb0 */
  1028. .long 0 /* 0xb4 */
  1029. .long 0 /* 0xb8 */
  1030. .long 0 /* 0xbc */
  1031. .long 0 /* 0xc0 */
  1032. .long 0 /* 0xc4 */
  1033. .long 0 /* 0xc8 */
  1034. .long 0 /* 0xcc */
  1035. .long 0 /* 0xd0 */
  1036. .long 0 /* 0xd4 */
  1037. .long 0 /* 0xd8 */
  1038. .long 0 /* 0xdc */
  1039. .long 0 /* 0xe0 */
  1040. .long 0 /* 0xe4 */
  1041. .long 0 /* 0xe8 */
  1042. .long 0 /* 0xec */
  1043. .long 0 /* 0xf0 */
  1044. .long 0 /* 0xf4 */
  1045. .long 0 /* 0xf8 */
  1046. .long 0 /* 0xfc */
  1047. .long 0 /* 0x100 */
  1048. .long 0 /* 0x104 */
  1049. .long 0 /* 0x108 */
  1050. .long 0 /* 0x10c */
  1051. .long 0 /* 0x110 */
  1052. .long 0 /* 0x114 */
  1053. .long 0 /* 0x118 */
  1054. .long 0 /* 0x11c */
  1055. .long 0 /* 0x120 */
  1056. .long .kvmppc_h_bulk_remove - hcall_real_table
  1057. hcall_real_table_end:
  1058. ignore_hdec:
  1059. mr r4,r9
  1060. b fast_guest_return
  1061. bounce_ext_interrupt:
  1062. mr r4,r9
  1063. mtspr SPRN_SRR0,r10
  1064. mtspr SPRN_SRR1,r11
  1065. li r10,BOOK3S_INTERRUPT_EXTERNAL
  1066. LOAD_REG_IMMEDIATE(r11,MSR_SF | MSR_ME);
  1067. b fast_guest_return
  1068. _GLOBAL(kvmppc_h_set_dabr)
  1069. std r4,VCPU_DABR(r3)
  1070. mtspr SPRN_DABR,r4
  1071. li r3,0
  1072. blr
  1073. secondary_too_late:
  1074. ld r5,HSTATE_KVM_VCORE(r13)
  1075. HMT_LOW
  1076. 13: lbz r3,VCORE_IN_GUEST(r5)
  1077. cmpwi r3,0
  1078. bne 13b
  1079. HMT_MEDIUM
  1080. ld r11,PACA_SLBSHADOWPTR(r13)
  1081. .rept SLB_NUM_BOLTED
  1082. ld r5,SLBSHADOW_SAVEAREA(r11)
  1083. ld r6,SLBSHADOW_SAVEAREA+8(r11)
  1084. andis. r7,r5,SLB_ESID_V@h
  1085. beq 1f
  1086. slbmte r6,r5
  1087. 1: addi r11,r11,16
  1088. .endr
  1089. b 50f
  1090. secondary_nap:
  1091. /* Clear any pending IPI */
  1092. 50: ld r5, HSTATE_XICS_PHYS(r13)
  1093. li r0, 0xff
  1094. li r6, XICS_QIRR
  1095. stbcix r0, r5, r6
  1096. /* increment the nap count and then go to nap mode */
  1097. ld r4, HSTATE_KVM_VCORE(r13)
  1098. addi r4, r4, VCORE_NAP_COUNT
  1099. lwsync /* make previous updates visible */
  1100. 51: lwarx r3, 0, r4
  1101. addi r3, r3, 1
  1102. stwcx. r3, 0, r4
  1103. bne 51b
  1104. isync
  1105. mfspr r4, SPRN_LPCR
  1106. li r0, LPCR_PECE
  1107. andc r4, r4, r0
  1108. ori r4, r4, LPCR_PECE0 /* exit nap on interrupt */
  1109. mtspr SPRN_LPCR, r4
  1110. li r0, 0
  1111. std r0, HSTATE_SCRATCH0(r13)
  1112. ptesync
  1113. ld r0, HSTATE_SCRATCH0(r13)
  1114. 1: cmpd r0, r0
  1115. bne 1b
  1116. nap
  1117. b .
  1118. /*
  1119. * Save away FP, VMX and VSX registers.
  1120. * r3 = vcpu pointer
  1121. */
  1122. _GLOBAL(kvmppc_save_fp)
  1123. mfmsr r9
  1124. ori r8,r9,MSR_FP
  1125. #ifdef CONFIG_ALTIVEC
  1126. BEGIN_FTR_SECTION
  1127. oris r8,r8,MSR_VEC@h
  1128. END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
  1129. #endif
  1130. #ifdef CONFIG_VSX
  1131. BEGIN_FTR_SECTION
  1132. oris r8,r8,MSR_VSX@h
  1133. END_FTR_SECTION_IFSET(CPU_FTR_VSX)
  1134. #endif
  1135. mtmsrd r8
  1136. isync
  1137. #ifdef CONFIG_VSX
  1138. BEGIN_FTR_SECTION
  1139. reg = 0
  1140. .rept 32
  1141. li r6,reg*16+VCPU_VSRS
  1142. STXVD2X(reg,r6,r3)
  1143. reg = reg + 1
  1144. .endr
  1145. FTR_SECTION_ELSE
  1146. #endif
  1147. reg = 0
  1148. .rept 32
  1149. stfd reg,reg*8+VCPU_FPRS(r3)
  1150. reg = reg + 1
  1151. .endr
  1152. #ifdef CONFIG_VSX
  1153. ALT_FTR_SECTION_END_IFSET(CPU_FTR_VSX)
  1154. #endif
  1155. mffs fr0
  1156. stfd fr0,VCPU_FPSCR(r3)
  1157. #ifdef CONFIG_ALTIVEC
  1158. BEGIN_FTR_SECTION
  1159. reg = 0
  1160. .rept 32
  1161. li r6,reg*16+VCPU_VRS
  1162. stvx reg,r6,r3
  1163. reg = reg + 1
  1164. .endr
  1165. mfvscr vr0
  1166. li r6,VCPU_VSCR
  1167. stvx vr0,r6,r3
  1168. END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
  1169. #endif
  1170. mfspr r6,SPRN_VRSAVE
  1171. stw r6,VCPU_VRSAVE(r3)
  1172. mtmsrd r9
  1173. isync
  1174. blr
  1175. /*
  1176. * Load up FP, VMX and VSX registers
  1177. * r4 = vcpu pointer
  1178. */
  1179. .globl kvmppc_load_fp
  1180. kvmppc_load_fp:
  1181. mfmsr r9
  1182. ori r8,r9,MSR_FP
  1183. #ifdef CONFIG_ALTIVEC
  1184. BEGIN_FTR_SECTION
  1185. oris r8,r8,MSR_VEC@h
  1186. END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
  1187. #endif
  1188. #ifdef CONFIG_VSX
  1189. BEGIN_FTR_SECTION
  1190. oris r8,r8,MSR_VSX@h
  1191. END_FTR_SECTION_IFSET(CPU_FTR_VSX)
  1192. #endif
  1193. mtmsrd r8
  1194. isync
  1195. lfd fr0,VCPU_FPSCR(r4)
  1196. MTFSF_L(fr0)
  1197. #ifdef CONFIG_VSX
  1198. BEGIN_FTR_SECTION
  1199. reg = 0
  1200. .rept 32
  1201. li r7,reg*16+VCPU_VSRS
  1202. LXVD2X(reg,r7,r4)
  1203. reg = reg + 1
  1204. .endr
  1205. FTR_SECTION_ELSE
  1206. #endif
  1207. reg = 0
  1208. .rept 32
  1209. lfd reg,reg*8+VCPU_FPRS(r4)
  1210. reg = reg + 1
  1211. .endr
  1212. #ifdef CONFIG_VSX
  1213. ALT_FTR_SECTION_END_IFSET(CPU_FTR_VSX)
  1214. #endif
  1215. #ifdef CONFIG_ALTIVEC
  1216. BEGIN_FTR_SECTION
  1217. li r7,VCPU_VSCR
  1218. lvx vr0,r7,r4
  1219. mtvscr vr0
  1220. reg = 0
  1221. .rept 32
  1222. li r7,reg*16+VCPU_VRS
  1223. lvx reg,r7,r4
  1224. reg = reg + 1
  1225. .endr
  1226. END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
  1227. #endif
  1228. lwz r7,VCPU_VRSAVE(r4)
  1229. mtspr SPRN_VRSAVE,r7
  1230. blr