book3s_hv_rmhandlers.S 30 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345
  1. /*
  2. * This program is free software; you can redistribute it and/or modify
  3. * it under the terms of the GNU General Public License, version 2, as
  4. * published by the Free Software Foundation.
  5. *
  6. * This program is distributed in the hope that it will be useful,
  7. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  8. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  9. * GNU General Public License for more details.
  10. *
  11. * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
  12. *
  13. * Derived from book3s_rmhandlers.S and other files, which are:
  14. *
  15. * Copyright SUSE Linux Products GmbH 2009
  16. *
  17. * Authors: Alexander Graf <agraf@suse.de>
  18. */
  19. #include <asm/ppc_asm.h>
  20. #include <asm/kvm_asm.h>
  21. #include <asm/reg.h>
  22. #include <asm/page.h>
  23. #include <asm/asm-offsets.h>
  24. #include <asm/exception-64s.h>
  25. /*****************************************************************************
  26. * *
  27. * Real Mode handlers that need to be in the linear mapping *
  28. * *
  29. ****************************************************************************/
  30. .globl kvmppc_skip_interrupt
  31. kvmppc_skip_interrupt:
  32. mfspr r13,SPRN_SRR0
  33. addi r13,r13,4
  34. mtspr SPRN_SRR0,r13
  35. GET_SCRATCH0(r13)
  36. rfid
  37. b .
  38. .globl kvmppc_skip_Hinterrupt
  39. kvmppc_skip_Hinterrupt:
  40. mfspr r13,SPRN_HSRR0
  41. addi r13,r13,4
  42. mtspr SPRN_HSRR0,r13
  43. GET_SCRATCH0(r13)
  44. hrfid
  45. b .
  46. /*
  47. * Call kvmppc_handler_trampoline_enter in real mode.
  48. * Must be called with interrupts hard-disabled.
  49. *
  50. * Input Registers:
  51. *
  52. * LR = return address to continue at after eventually re-enabling MMU
  53. */
  54. _GLOBAL(kvmppc_hv_entry_trampoline)
  55. mfmsr r10
  56. LOAD_REG_ADDR(r5, kvmppc_hv_entry)
  57. li r0,MSR_RI
  58. andc r0,r10,r0
  59. li r6,MSR_IR | MSR_DR
  60. andc r6,r10,r6
  61. mtmsrd r0,1 /* clear RI in MSR */
  62. mtsrr0 r5
  63. mtsrr1 r6
  64. RFI
  65. #define ULONG_SIZE 8
  66. #define VCPU_GPR(n) (VCPU_GPRS + (n * ULONG_SIZE))
  67. /******************************************************************************
  68. * *
  69. * Entry code *
  70. * *
  71. *****************************************************************************/
  72. #define XICS_XIRR 4
  73. #define XICS_QIRR 0xc
  74. /*
  75. * We come in here when wakened from nap mode on a secondary hw thread.
  76. * Relocation is off and most register values are lost.
  77. * r13 points to the PACA.
  78. */
  79. .globl kvm_start_guest
  80. kvm_start_guest:
  81. ld r1,PACAEMERGSP(r13)
  82. subi r1,r1,STACK_FRAME_OVERHEAD
  83. /* get vcpu pointer */
  84. ld r4, HSTATE_KVM_VCPU(r13)
  85. /* We got here with an IPI; clear it */
  86. ld r5, HSTATE_XICS_PHYS(r13)
  87. li r0, 0xff
  88. li r6, XICS_QIRR
  89. li r7, XICS_XIRR
  90. lwzcix r8, r5, r7 /* ack the interrupt */
  91. sync
  92. stbcix r0, r5, r6 /* clear it */
  93. stwcix r8, r5, r7 /* EOI it */
  94. .global kvmppc_hv_entry
  95. kvmppc_hv_entry:
  96. /* Required state:
  97. *
  98. * R4 = vcpu pointer
  99. * MSR = ~IR|DR
  100. * R13 = PACA
  101. * R1 = host R1
  102. * all other volatile GPRS = free
  103. */
  104. mflr r0
  105. std r0, HSTATE_VMHANDLER(r13)
  106. ld r14, VCPU_GPR(r14)(r4)
  107. ld r15, VCPU_GPR(r15)(r4)
  108. ld r16, VCPU_GPR(r16)(r4)
  109. ld r17, VCPU_GPR(r17)(r4)
  110. ld r18, VCPU_GPR(r18)(r4)
  111. ld r19, VCPU_GPR(r19)(r4)
  112. ld r20, VCPU_GPR(r20)(r4)
  113. ld r21, VCPU_GPR(r21)(r4)
  114. ld r22, VCPU_GPR(r22)(r4)
  115. ld r23, VCPU_GPR(r23)(r4)
  116. ld r24, VCPU_GPR(r24)(r4)
  117. ld r25, VCPU_GPR(r25)(r4)
  118. ld r26, VCPU_GPR(r26)(r4)
  119. ld r27, VCPU_GPR(r27)(r4)
  120. ld r28, VCPU_GPR(r28)(r4)
  121. ld r29, VCPU_GPR(r29)(r4)
  122. ld r30, VCPU_GPR(r30)(r4)
  123. ld r31, VCPU_GPR(r31)(r4)
  124. /* Load guest PMU registers */
  125. /* R4 is live here (vcpu pointer) */
  126. li r3, 1
  127. sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */
  128. mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */
  129. isync
  130. lwz r3, VCPU_PMC(r4) /* always load up guest PMU registers */
  131. lwz r5, VCPU_PMC + 4(r4) /* to prevent information leak */
  132. lwz r6, VCPU_PMC + 8(r4)
  133. lwz r7, VCPU_PMC + 12(r4)
  134. lwz r8, VCPU_PMC + 16(r4)
  135. lwz r9, VCPU_PMC + 20(r4)
  136. BEGIN_FTR_SECTION
  137. lwz r10, VCPU_PMC + 24(r4)
  138. lwz r11, VCPU_PMC + 28(r4)
  139. END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
  140. mtspr SPRN_PMC1, r3
  141. mtspr SPRN_PMC2, r5
  142. mtspr SPRN_PMC3, r6
  143. mtspr SPRN_PMC4, r7
  144. mtspr SPRN_PMC5, r8
  145. mtspr SPRN_PMC6, r9
  146. BEGIN_FTR_SECTION
  147. mtspr SPRN_PMC7, r10
  148. mtspr SPRN_PMC8, r11
  149. END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
  150. ld r3, VCPU_MMCR(r4)
  151. ld r5, VCPU_MMCR + 8(r4)
  152. ld r6, VCPU_MMCR + 16(r4)
  153. mtspr SPRN_MMCR1, r5
  154. mtspr SPRN_MMCRA, r6
  155. mtspr SPRN_MMCR0, r3
  156. isync
  157. /* Load up FP, VMX and VSX registers */
  158. bl kvmppc_load_fp
  159. BEGIN_FTR_SECTION
  160. /* Switch DSCR to guest value */
  161. ld r5, VCPU_DSCR(r4)
  162. mtspr SPRN_DSCR, r5
  163. END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
  164. /*
  165. * Set the decrementer to the guest decrementer.
  166. */
  167. ld r8,VCPU_DEC_EXPIRES(r4)
  168. mftb r7
  169. subf r3,r7,r8
  170. mtspr SPRN_DEC,r3
  171. stw r3,VCPU_DEC(r4)
  172. ld r5, VCPU_SPRG0(r4)
  173. ld r6, VCPU_SPRG1(r4)
  174. ld r7, VCPU_SPRG2(r4)
  175. ld r8, VCPU_SPRG3(r4)
  176. mtspr SPRN_SPRG0, r5
  177. mtspr SPRN_SPRG1, r6
  178. mtspr SPRN_SPRG2, r7
  179. mtspr SPRN_SPRG3, r8
  180. /* Save R1 in the PACA */
  181. std r1, HSTATE_HOST_R1(r13)
  182. /* Increment yield count if they have a VPA */
  183. ld r3, VCPU_VPA(r4)
  184. cmpdi r3, 0
  185. beq 25f
  186. lwz r5, LPPACA_YIELDCOUNT(r3)
  187. addi r5, r5, 1
  188. stw r5, LPPACA_YIELDCOUNT(r3)
  189. 25:
  190. /* Load up DAR and DSISR */
  191. ld r5, VCPU_DAR(r4)
  192. lwz r6, VCPU_DSISR(r4)
  193. mtspr SPRN_DAR, r5
  194. mtspr SPRN_DSISR, r6
  195. /* Set partition DABR */
  196. li r5,3
  197. ld r6,VCPU_DABR(r4)
  198. mtspr SPRN_DABRX,r5
  199. mtspr SPRN_DABR,r6
  200. BEGIN_FTR_SECTION
  201. /* Restore AMR and UAMOR, set AMOR to all 1s */
  202. ld r5,VCPU_AMR(r4)
  203. ld r6,VCPU_UAMOR(r4)
  204. li r7,-1
  205. mtspr SPRN_AMR,r5
  206. mtspr SPRN_UAMOR,r6
  207. mtspr SPRN_AMOR,r7
  208. END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
  209. /* Clear out SLB */
  210. li r6,0
  211. slbmte r6,r6
  212. slbia
  213. ptesync
  214. BEGIN_FTR_SECTION
  215. b 30f
  216. END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
  217. /*
  218. * POWER7 host -> guest partition switch code.
  219. * We don't have to lock against concurrent tlbies,
  220. * but we do have to coordinate across hardware threads.
  221. */
  222. /* Increment entry count iff exit count is zero. */
  223. ld r5,HSTATE_KVM_VCORE(r13)
  224. addi r9,r5,VCORE_ENTRY_EXIT
  225. 21: lwarx r3,0,r9
  226. cmpwi r3,0x100 /* any threads starting to exit? */
  227. bge secondary_too_late /* if so we're too late to the party */
  228. addi r3,r3,1
  229. stwcx. r3,0,r9
  230. bne 21b
  231. /* Primary thread switches to guest partition. */
  232. ld r9,VCPU_KVM(r4) /* pointer to struct kvm */
  233. lwz r6,VCPU_PTID(r4)
  234. cmpwi r6,0
  235. bne 20f
  236. ld r6,KVM_SDR1(r9)
  237. lwz r7,KVM_LPID(r9)
  238. li r0,LPID_RSVD /* switch to reserved LPID */
  239. mtspr SPRN_LPID,r0
  240. ptesync
  241. mtspr SPRN_SDR1,r6 /* switch to partition page table */
  242. mtspr SPRN_LPID,r7
  243. isync
  244. li r0,1
  245. stb r0,VCORE_IN_GUEST(r5) /* signal secondaries to continue */
  246. b 10f
  247. /* Secondary threads wait for primary to have done partition switch */
  248. 20: lbz r0,VCORE_IN_GUEST(r5)
  249. cmpwi r0,0
  250. beq 20b
  251. /* Set LPCR. Set the MER bit if there is a pending external irq. */
  252. 10: ld r8,KVM_LPCR(r9)
  253. ld r0,VCPU_PENDING_EXC(r4)
  254. li r7,(1 << BOOK3S_IRQPRIO_EXTERNAL)
  255. oris r7,r7,(1 << BOOK3S_IRQPRIO_EXTERNAL_LEVEL)@h
  256. and. r0,r0,r7
  257. beq 11f
  258. ori r8,r8,LPCR_MER
  259. 11: mtspr SPRN_LPCR,r8
  260. ld r8,KVM_RMOR(r9)
  261. mtspr SPRN_RMOR,r8
  262. isync
  263. /* Check if HDEC expires soon */
  264. mfspr r3,SPRN_HDEC
  265. cmpwi r3,10
  266. li r12,BOOK3S_INTERRUPT_HV_DECREMENTER
  267. mr r9,r4
  268. blt hdec_soon
  269. /*
  270. * Invalidate the TLB if we could possibly have stale TLB
  271. * entries for this partition on this core due to the use
  272. * of tlbiel.
  273. * XXX maybe only need this on primary thread?
  274. */
  275. ld r9,VCPU_KVM(r4) /* pointer to struct kvm */
  276. lwz r5,VCPU_VCPUID(r4)
  277. lhz r6,PACAPACAINDEX(r13)
  278. rldimi r6,r5,0,62 /* XXX map as if threads 1:1 p:v */
  279. lhz r8,VCPU_LAST_CPU(r4)
  280. sldi r7,r6,1 /* see if this is the same vcpu */
  281. add r7,r7,r9 /* as last ran on this pcpu */
  282. lhz r0,KVM_LAST_VCPU(r7)
  283. cmpw r6,r8 /* on the same cpu core as last time? */
  284. bne 3f
  285. cmpw r0,r5 /* same vcpu as this core last ran? */
  286. beq 1f
  287. 3: sth r6,VCPU_LAST_CPU(r4) /* if not, invalidate partition TLB */
  288. sth r5,KVM_LAST_VCPU(r7)
  289. li r6,128
  290. mtctr r6
  291. li r7,0x800 /* IS field = 0b10 */
  292. ptesync
  293. 2: tlbiel r7
  294. addi r7,r7,0x1000
  295. bdnz 2b
  296. ptesync
  297. 1:
  298. /* Save purr/spurr */
  299. mfspr r5,SPRN_PURR
  300. mfspr r6,SPRN_SPURR
  301. std r5,HSTATE_PURR(r13)
  302. std r6,HSTATE_SPURR(r13)
  303. ld r7,VCPU_PURR(r4)
  304. ld r8,VCPU_SPURR(r4)
  305. mtspr SPRN_PURR,r7
  306. mtspr SPRN_SPURR,r8
  307. b 31f
  308. /*
  309. * PPC970 host -> guest partition switch code.
  310. * We have to lock against concurrent tlbies,
  311. * using native_tlbie_lock to lock against host tlbies
  312. * and kvm->arch.tlbie_lock to lock against guest tlbies.
  313. * We also have to invalidate the TLB since its
  314. * entries aren't tagged with the LPID.
  315. */
  316. 30: ld r9,VCPU_KVM(r4) /* pointer to struct kvm */
  317. /* first take native_tlbie_lock */
  318. .section ".toc","aw"
  319. toc_tlbie_lock:
  320. .tc native_tlbie_lock[TC],native_tlbie_lock
  321. .previous
  322. ld r3,toc_tlbie_lock@toc(2)
  323. lwz r8,PACA_LOCK_TOKEN(r13)
  324. 24: lwarx r0,0,r3
  325. cmpwi r0,0
  326. bne 24b
  327. stwcx. r8,0,r3
  328. bne 24b
  329. isync
  330. ld r7,KVM_LPCR(r9) /* use kvm->arch.lpcr to store HID4 */
  331. li r0,0x18f
  332. rotldi r0,r0,HID4_LPID5_SH /* all lpid bits in HID4 = 1 */
  333. or r0,r7,r0
  334. ptesync
  335. sync
  336. mtspr SPRN_HID4,r0 /* switch to reserved LPID */
  337. isync
  338. li r0,0
  339. stw r0,0(r3) /* drop native_tlbie_lock */
  340. /* invalidate the whole TLB */
  341. li r0,256
  342. mtctr r0
  343. li r6,0
  344. 25: tlbiel r6
  345. addi r6,r6,0x1000
  346. bdnz 25b
  347. ptesync
  348. /* Take the guest's tlbie_lock */
  349. addi r3,r9,KVM_TLBIE_LOCK
  350. 24: lwarx r0,0,r3
  351. cmpwi r0,0
  352. bne 24b
  353. stwcx. r8,0,r3
  354. bne 24b
  355. isync
  356. ld r6,KVM_SDR1(r9)
  357. mtspr SPRN_SDR1,r6 /* switch to partition page table */
  358. /* Set up HID4 with the guest's LPID etc. */
  359. sync
  360. mtspr SPRN_HID4,r7
  361. isync
  362. /* drop the guest's tlbie_lock */
  363. li r0,0
  364. stw r0,0(r3)
  365. /* Check if HDEC expires soon */
  366. mfspr r3,SPRN_HDEC
  367. cmpwi r3,10
  368. li r12,BOOK3S_INTERRUPT_HV_DECREMENTER
  369. mr r9,r4
  370. blt hdec_soon
  371. /* Enable HDEC interrupts */
  372. mfspr r0,SPRN_HID0
  373. li r3,1
  374. rldimi r0,r3, HID0_HDICE_SH, 64-HID0_HDICE_SH-1
  375. sync
  376. mtspr SPRN_HID0,r0
  377. mfspr r0,SPRN_HID0
  378. mfspr r0,SPRN_HID0
  379. mfspr r0,SPRN_HID0
  380. mfspr r0,SPRN_HID0
  381. mfspr r0,SPRN_HID0
  382. mfspr r0,SPRN_HID0
  383. /* Load up guest SLB entries */
  384. 31: lwz r5,VCPU_SLB_MAX(r4)
  385. cmpwi r5,0
  386. beq 9f
  387. mtctr r5
  388. addi r6,r4,VCPU_SLB
  389. 1: ld r8,VCPU_SLB_E(r6)
  390. ld r9,VCPU_SLB_V(r6)
  391. slbmte r9,r8
  392. addi r6,r6,VCPU_SLB_SIZE
  393. bdnz 1b
  394. 9:
  395. /* Restore state of CTRL run bit; assume 1 on entry */
  396. lwz r5,VCPU_CTRL(r4)
  397. andi. r5,r5,1
  398. bne 4f
  399. mfspr r6,SPRN_CTRLF
  400. clrrdi r6,r6,1
  401. mtspr SPRN_CTRLT,r6
  402. 4:
  403. ld r6, VCPU_CTR(r4)
  404. lwz r7, VCPU_XER(r4)
  405. mtctr r6
  406. mtxer r7
  407. /* Move SRR0 and SRR1 into the respective regs */
  408. ld r6, VCPU_SRR0(r4)
  409. ld r7, VCPU_SRR1(r4)
  410. mtspr SPRN_SRR0, r6
  411. mtspr SPRN_SRR1, r7
  412. ld r10, VCPU_PC(r4)
  413. ld r11, VCPU_MSR(r4) /* r10 = vcpu->arch.msr & ~MSR_HV */
  414. rldicl r11, r11, 63 - MSR_HV_LG, 1
  415. rotldi r11, r11, 1 + MSR_HV_LG
  416. ori r11, r11, MSR_ME
  417. fast_guest_return:
  418. mtspr SPRN_HSRR0,r10
  419. mtspr SPRN_HSRR1,r11
  420. /* Activate guest mode, so faults get handled by KVM */
  421. li r9, KVM_GUEST_MODE_GUEST
  422. stb r9, HSTATE_IN_GUEST(r13)
  423. /* Enter guest */
  424. ld r5, VCPU_LR(r4)
  425. lwz r6, VCPU_CR(r4)
  426. mtlr r5
  427. mtcr r6
  428. ld r0, VCPU_GPR(r0)(r4)
  429. ld r1, VCPU_GPR(r1)(r4)
  430. ld r2, VCPU_GPR(r2)(r4)
  431. ld r3, VCPU_GPR(r3)(r4)
  432. ld r5, VCPU_GPR(r5)(r4)
  433. ld r6, VCPU_GPR(r6)(r4)
  434. ld r7, VCPU_GPR(r7)(r4)
  435. ld r8, VCPU_GPR(r8)(r4)
  436. ld r9, VCPU_GPR(r9)(r4)
  437. ld r10, VCPU_GPR(r10)(r4)
  438. ld r11, VCPU_GPR(r11)(r4)
  439. ld r12, VCPU_GPR(r12)(r4)
  440. ld r13, VCPU_GPR(r13)(r4)
  441. ld r4, VCPU_GPR(r4)(r4)
  442. hrfid
  443. b .
  444. /******************************************************************************
  445. * *
  446. * Exit code *
  447. * *
  448. *****************************************************************************/
  449. /*
  450. * We come here from the first-level interrupt handlers.
  451. */
  452. .globl kvmppc_interrupt
  453. kvmppc_interrupt:
  454. /*
  455. * Register contents:
  456. * R12 = interrupt vector
  457. * R13 = PACA
  458. * guest CR, R12 saved in shadow VCPU SCRATCH1/0
  459. * guest R13 saved in SPRN_SCRATCH0
  460. */
  461. /* abuse host_r2 as third scratch area; we get r2 from PACATOC(r13) */
  462. std r9, HSTATE_HOST_R2(r13)
  463. ld r9, HSTATE_KVM_VCPU(r13)
  464. /* Save registers */
  465. std r0, VCPU_GPR(r0)(r9)
  466. std r1, VCPU_GPR(r1)(r9)
  467. std r2, VCPU_GPR(r2)(r9)
  468. std r3, VCPU_GPR(r3)(r9)
  469. std r4, VCPU_GPR(r4)(r9)
  470. std r5, VCPU_GPR(r5)(r9)
  471. std r6, VCPU_GPR(r6)(r9)
  472. std r7, VCPU_GPR(r7)(r9)
  473. std r8, VCPU_GPR(r8)(r9)
  474. ld r0, HSTATE_HOST_R2(r13)
  475. std r0, VCPU_GPR(r9)(r9)
  476. std r10, VCPU_GPR(r10)(r9)
  477. std r11, VCPU_GPR(r11)(r9)
  478. ld r3, HSTATE_SCRATCH0(r13)
  479. lwz r4, HSTATE_SCRATCH1(r13)
  480. std r3, VCPU_GPR(r12)(r9)
  481. stw r4, VCPU_CR(r9)
  482. /* Restore R1/R2 so we can handle faults */
  483. ld r1, HSTATE_HOST_R1(r13)
  484. ld r2, PACATOC(r13)
  485. mfspr r10, SPRN_SRR0
  486. mfspr r11, SPRN_SRR1
  487. std r10, VCPU_SRR0(r9)
  488. std r11, VCPU_SRR1(r9)
  489. andi. r0, r12, 2 /* need to read HSRR0/1? */
  490. beq 1f
  491. mfspr r10, SPRN_HSRR0
  492. mfspr r11, SPRN_HSRR1
  493. clrrdi r12, r12, 2
  494. 1: std r10, VCPU_PC(r9)
  495. std r11, VCPU_MSR(r9)
  496. GET_SCRATCH0(r3)
  497. mflr r4
  498. std r3, VCPU_GPR(r13)(r9)
  499. std r4, VCPU_LR(r9)
  500. /* Unset guest mode */
  501. li r0, KVM_GUEST_MODE_NONE
  502. stb r0, HSTATE_IN_GUEST(r13)
  503. stw r12,VCPU_TRAP(r9)
  504. /* See if this is a leftover HDEC interrupt */
  505. cmpwi r12,BOOK3S_INTERRUPT_HV_DECREMENTER
  506. bne 2f
  507. mfspr r3,SPRN_HDEC
  508. cmpwi r3,0
  509. bge ignore_hdec
  510. 2:
  511. /* See if this is something we can handle in real mode */
  512. cmpwi r12,BOOK3S_INTERRUPT_SYSCALL
  513. beq hcall_try_real_mode
  514. hcall_real_cont:
  515. /* Check for mediated interrupts (could be done earlier really ...) */
  516. BEGIN_FTR_SECTION
  517. cmpwi r12,BOOK3S_INTERRUPT_EXTERNAL
  518. bne+ 1f
  519. ld r5,VCPU_KVM(r9)
  520. ld r5,KVM_LPCR(r5)
  521. andi. r0,r11,MSR_EE
  522. beq 1f
  523. andi. r0,r5,LPCR_MER
  524. bne bounce_ext_interrupt
  525. 1:
  526. END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
  527. /* Save DEC */
  528. mfspr r5,SPRN_DEC
  529. mftb r6
  530. extsw r5,r5
  531. add r5,r5,r6
  532. std r5,VCPU_DEC_EXPIRES(r9)
  533. /* Save HEIR (HV emulation assist reg) in last_inst
  534. if this is an HEI (HV emulation interrupt, e40) */
  535. li r3,-1
  536. BEGIN_FTR_SECTION
  537. cmpwi r12,BOOK3S_INTERRUPT_H_EMUL_ASSIST
  538. bne 11f
  539. mfspr r3,SPRN_HEIR
  540. END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
  541. 11: stw r3,VCPU_LAST_INST(r9)
  542. /* Save more register state */
  543. mfxer r5
  544. mfdar r6
  545. mfdsisr r7
  546. mfctr r8
  547. stw r5, VCPU_XER(r9)
  548. std r6, VCPU_DAR(r9)
  549. stw r7, VCPU_DSISR(r9)
  550. std r8, VCPU_CTR(r9)
  551. /* grab HDAR & HDSISR if HV data storage interrupt (HDSI) */
  552. BEGIN_FTR_SECTION
  553. cmpwi r12,BOOK3S_INTERRUPT_H_DATA_STORAGE
  554. beq 6f
  555. END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
  556. 7: std r6, VCPU_FAULT_DAR(r9)
  557. stw r7, VCPU_FAULT_DSISR(r9)
  558. /* Save guest CTRL register, set runlatch to 1 */
  559. mfspr r6,SPRN_CTRLF
  560. stw r6,VCPU_CTRL(r9)
  561. andi. r0,r6,1
  562. bne 4f
  563. ori r6,r6,1
  564. mtspr SPRN_CTRLT,r6
  565. 4:
  566. /* Read the guest SLB and save it away */
  567. lwz r0,VCPU_SLB_NR(r9) /* number of entries in SLB */
  568. mtctr r0
  569. li r6,0
  570. addi r7,r9,VCPU_SLB
  571. li r5,0
  572. 1: slbmfee r8,r6
  573. andis. r0,r8,SLB_ESID_V@h
  574. beq 2f
  575. add r8,r8,r6 /* put index in */
  576. slbmfev r3,r6
  577. std r8,VCPU_SLB_E(r7)
  578. std r3,VCPU_SLB_V(r7)
  579. addi r7,r7,VCPU_SLB_SIZE
  580. addi r5,r5,1
  581. 2: addi r6,r6,1
  582. bdnz 1b
  583. stw r5,VCPU_SLB_MAX(r9)
  584. /*
  585. * Save the guest PURR/SPURR
  586. */
  587. BEGIN_FTR_SECTION
  588. mfspr r5,SPRN_PURR
  589. mfspr r6,SPRN_SPURR
  590. ld r7,VCPU_PURR(r9)
  591. ld r8,VCPU_SPURR(r9)
  592. std r5,VCPU_PURR(r9)
  593. std r6,VCPU_SPURR(r9)
  594. subf r5,r7,r5
  595. subf r6,r8,r6
  596. /*
  597. * Restore host PURR/SPURR and add guest times
  598. * so that the time in the guest gets accounted.
  599. */
  600. ld r3,HSTATE_PURR(r13)
  601. ld r4,HSTATE_SPURR(r13)
  602. add r3,r3,r5
  603. add r4,r4,r6
  604. mtspr SPRN_PURR,r3
  605. mtspr SPRN_SPURR,r4
  606. END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_201)
  607. /* Clear out SLB */
  608. li r5,0
  609. slbmte r5,r5
  610. slbia
  611. ptesync
  612. hdec_soon:
  613. BEGIN_FTR_SECTION
  614. b 32f
  615. END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
  616. /*
  617. * POWER7 guest -> host partition switch code.
  618. * We don't have to lock against tlbies but we do
  619. * have to coordinate the hardware threads.
  620. */
  621. /* Increment the threads-exiting-guest count in the 0xff00
  622. bits of vcore->entry_exit_count */
  623. lwsync
  624. ld r5,HSTATE_KVM_VCORE(r13)
  625. addi r6,r5,VCORE_ENTRY_EXIT
  626. 41: lwarx r3,0,r6
  627. addi r0,r3,0x100
  628. stwcx. r0,0,r6
  629. bne 41b
  630. /*
  631. * At this point we have an interrupt that we have to pass
  632. * up to the kernel or qemu; we can't handle it in real mode.
  633. * Thus we have to do a partition switch, so we have to
  634. * collect the other threads, if we are the first thread
  635. * to take an interrupt. To do this, we set the HDEC to 0,
  636. * which causes an HDEC interrupt in all threads within 2ns
  637. * because the HDEC register is shared between all 4 threads.
  638. * However, we don't need to bother if this is an HDEC
  639. * interrupt, since the other threads will already be on their
  640. * way here in that case.
  641. */
  642. cmpwi r12,BOOK3S_INTERRUPT_HV_DECREMENTER
  643. beq 40f
  644. cmpwi r3,0x100 /* Are we the first here? */
  645. bge 40f
  646. cmpwi r3,1
  647. ble 40f
  648. li r0,0
  649. mtspr SPRN_HDEC,r0
  650. 40:
  651. /* Secondary threads wait for primary to do partition switch */
  652. ld r4,VCPU_KVM(r9) /* pointer to struct kvm */
  653. ld r5,HSTATE_KVM_VCORE(r13)
  654. lwz r3,VCPU_PTID(r9)
  655. cmpwi r3,0
  656. beq 15f
  657. HMT_LOW
  658. 13: lbz r3,VCORE_IN_GUEST(r5)
  659. cmpwi r3,0
  660. bne 13b
  661. HMT_MEDIUM
  662. b 16f
  663. /* Primary thread waits for all the secondaries to exit guest */
  664. 15: lwz r3,VCORE_ENTRY_EXIT(r5)
  665. srwi r0,r3,8
  666. clrldi r3,r3,56
  667. cmpw r3,r0
  668. bne 15b
  669. isync
  670. /* Primary thread switches back to host partition */
  671. ld r6,KVM_HOST_SDR1(r4)
  672. lwz r7,KVM_HOST_LPID(r4)
  673. li r8,LPID_RSVD /* switch to reserved LPID */
  674. mtspr SPRN_LPID,r8
  675. ptesync
  676. mtspr SPRN_SDR1,r6 /* switch to partition page table */
  677. mtspr SPRN_LPID,r7
  678. isync
  679. li r0,0
  680. stb r0,VCORE_IN_GUEST(r5)
  681. lis r8,0x7fff /* MAX_INT@h */
  682. mtspr SPRN_HDEC,r8
  683. 16: ld r8,KVM_HOST_LPCR(r4)
  684. mtspr SPRN_LPCR,r8
  685. isync
  686. b 33f
  687. /*
  688. * PPC970 guest -> host partition switch code.
  689. * We have to lock against concurrent tlbies, and
  690. * we have to flush the whole TLB.
  691. */
  692. 32: ld r4,VCPU_KVM(r9) /* pointer to struct kvm */
  693. /* Take the guest's tlbie_lock */
  694. lwz r8,PACA_LOCK_TOKEN(r13)
  695. addi r3,r4,KVM_TLBIE_LOCK
  696. 24: lwarx r0,0,r3
  697. cmpwi r0,0
  698. bne 24b
  699. stwcx. r8,0,r3
  700. bne 24b
  701. isync
  702. ld r7,KVM_HOST_LPCR(r4) /* use kvm->arch.host_lpcr for HID4 */
  703. li r0,0x18f
  704. rotldi r0,r0,HID4_LPID5_SH /* all lpid bits in HID4 = 1 */
  705. or r0,r7,r0
  706. ptesync
  707. sync
  708. mtspr SPRN_HID4,r0 /* switch to reserved LPID */
  709. isync
  710. li r0,0
  711. stw r0,0(r3) /* drop guest tlbie_lock */
  712. /* invalidate the whole TLB */
  713. li r0,256
  714. mtctr r0
  715. li r6,0
  716. 25: tlbiel r6
  717. addi r6,r6,0x1000
  718. bdnz 25b
  719. ptesync
  720. /* take native_tlbie_lock */
  721. ld r3,toc_tlbie_lock@toc(2)
  722. 24: lwarx r0,0,r3
  723. cmpwi r0,0
  724. bne 24b
  725. stwcx. r8,0,r3
  726. bne 24b
  727. isync
  728. ld r6,KVM_HOST_SDR1(r4)
  729. mtspr SPRN_SDR1,r6 /* switch to host page table */
  730. /* Set up host HID4 value */
  731. sync
  732. mtspr SPRN_HID4,r7
  733. isync
  734. li r0,0
  735. stw r0,0(r3) /* drop native_tlbie_lock */
  736. lis r8,0x7fff /* MAX_INT@h */
  737. mtspr SPRN_HDEC,r8
  738. /* Disable HDEC interrupts */
  739. mfspr r0,SPRN_HID0
  740. li r3,0
  741. rldimi r0,r3, HID0_HDICE_SH, 64-HID0_HDICE_SH-1
  742. sync
  743. mtspr SPRN_HID0,r0
  744. mfspr r0,SPRN_HID0
  745. mfspr r0,SPRN_HID0
  746. mfspr r0,SPRN_HID0
  747. mfspr r0,SPRN_HID0
  748. mfspr r0,SPRN_HID0
  749. mfspr r0,SPRN_HID0
  750. /* load host SLB entries */
  751. 33: ld r8,PACA_SLBSHADOWPTR(r13)
  752. .rept SLB_NUM_BOLTED
  753. ld r5,SLBSHADOW_SAVEAREA(r8)
  754. ld r6,SLBSHADOW_SAVEAREA+8(r8)
  755. andis. r7,r5,SLB_ESID_V@h
  756. beq 1f
  757. slbmte r6,r5
  758. 1: addi r8,r8,16
  759. .endr
  760. /* Save and reset AMR and UAMOR before turning on the MMU */
  761. BEGIN_FTR_SECTION
  762. mfspr r5,SPRN_AMR
  763. mfspr r6,SPRN_UAMOR
  764. std r5,VCPU_AMR(r9)
  765. std r6,VCPU_UAMOR(r9)
  766. li r6,0
  767. mtspr SPRN_AMR,r6
  768. END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
  769. /* Restore host DABR and DABRX */
  770. ld r5,HSTATE_DABR(r13)
  771. li r6,7
  772. mtspr SPRN_DABR,r5
  773. mtspr SPRN_DABRX,r6
  774. /* Switch DSCR back to host value */
  775. BEGIN_FTR_SECTION
  776. mfspr r8, SPRN_DSCR
  777. ld r7, HSTATE_DSCR(r13)
  778. std r8, VCPU_DSCR(r7)
  779. mtspr SPRN_DSCR, r7
  780. END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
  781. /* Save non-volatile GPRs */
  782. std r14, VCPU_GPR(r14)(r9)
  783. std r15, VCPU_GPR(r15)(r9)
  784. std r16, VCPU_GPR(r16)(r9)
  785. std r17, VCPU_GPR(r17)(r9)
  786. std r18, VCPU_GPR(r18)(r9)
  787. std r19, VCPU_GPR(r19)(r9)
  788. std r20, VCPU_GPR(r20)(r9)
  789. std r21, VCPU_GPR(r21)(r9)
  790. std r22, VCPU_GPR(r22)(r9)
  791. std r23, VCPU_GPR(r23)(r9)
  792. std r24, VCPU_GPR(r24)(r9)
  793. std r25, VCPU_GPR(r25)(r9)
  794. std r26, VCPU_GPR(r26)(r9)
  795. std r27, VCPU_GPR(r27)(r9)
  796. std r28, VCPU_GPR(r28)(r9)
  797. std r29, VCPU_GPR(r29)(r9)
  798. std r30, VCPU_GPR(r30)(r9)
  799. std r31, VCPU_GPR(r31)(r9)
  800. /* Save SPRGs */
  801. mfspr r3, SPRN_SPRG0
  802. mfspr r4, SPRN_SPRG1
  803. mfspr r5, SPRN_SPRG2
  804. mfspr r6, SPRN_SPRG3
  805. std r3, VCPU_SPRG0(r9)
  806. std r4, VCPU_SPRG1(r9)
  807. std r5, VCPU_SPRG2(r9)
  808. std r6, VCPU_SPRG3(r9)
  809. /* Increment yield count if they have a VPA */
  810. ld r8, VCPU_VPA(r9) /* do they have a VPA? */
  811. cmpdi r8, 0
  812. beq 25f
  813. lwz r3, LPPACA_YIELDCOUNT(r8)
  814. addi r3, r3, 1
  815. stw r3, LPPACA_YIELDCOUNT(r8)
  816. 25:
  817. /* Save PMU registers if requested */
  818. /* r8 and cr0.eq are live here */
  819. li r3, 1
  820. sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */
  821. mfspr r4, SPRN_MMCR0 /* save MMCR0 */
  822. mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */
  823. isync
  824. beq 21f /* if no VPA, save PMU stuff anyway */
  825. lbz r7, LPPACA_PMCINUSE(r8)
  826. cmpwi r7, 0 /* did they ask for PMU stuff to be saved? */
  827. bne 21f
  828. std r3, VCPU_MMCR(r9) /* if not, set saved MMCR0 to FC */
  829. b 22f
  830. 21: mfspr r5, SPRN_MMCR1
  831. mfspr r6, SPRN_MMCRA
  832. std r4, VCPU_MMCR(r9)
  833. std r5, VCPU_MMCR + 8(r9)
  834. std r6, VCPU_MMCR + 16(r9)
  835. mfspr r3, SPRN_PMC1
  836. mfspr r4, SPRN_PMC2
  837. mfspr r5, SPRN_PMC3
  838. mfspr r6, SPRN_PMC4
  839. mfspr r7, SPRN_PMC5
  840. mfspr r8, SPRN_PMC6
  841. BEGIN_FTR_SECTION
  842. mfspr r10, SPRN_PMC7
  843. mfspr r11, SPRN_PMC8
  844. END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
  845. stw r3, VCPU_PMC(r9)
  846. stw r4, VCPU_PMC + 4(r9)
  847. stw r5, VCPU_PMC + 8(r9)
  848. stw r6, VCPU_PMC + 12(r9)
  849. stw r7, VCPU_PMC + 16(r9)
  850. stw r8, VCPU_PMC + 20(r9)
  851. BEGIN_FTR_SECTION
  852. stw r10, VCPU_PMC + 24(r9)
  853. stw r11, VCPU_PMC + 28(r9)
  854. END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
  855. 22:
  856. /* save FP state */
  857. mr r3, r9
  858. bl .kvmppc_save_fp
  859. /* Secondary threads go off to take a nap on POWER7 */
  860. BEGIN_FTR_SECTION
  861. lwz r0,VCPU_PTID(r3)
  862. cmpwi r0,0
  863. bne secondary_nap
  864. END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
  865. /*
  866. * Reload DEC. HDEC interrupts were disabled when
  867. * we reloaded the host's LPCR value.
  868. */
  869. ld r3, HSTATE_DECEXP(r13)
  870. mftb r4
  871. subf r4, r4, r3
  872. mtspr SPRN_DEC, r4
  873. /* Reload the host's PMU registers */
  874. ld r3, PACALPPACAPTR(r13) /* is the host using the PMU? */
  875. lbz r4, LPPACA_PMCINUSE(r3)
  876. cmpwi r4, 0
  877. beq 23f /* skip if not */
  878. lwz r3, HSTATE_PMC(r13)
  879. lwz r4, HSTATE_PMC + 4(r13)
  880. lwz r5, HSTATE_PMC + 8(r13)
  881. lwz r6, HSTATE_PMC + 12(r13)
  882. lwz r8, HSTATE_PMC + 16(r13)
  883. lwz r9, HSTATE_PMC + 20(r13)
  884. BEGIN_FTR_SECTION
  885. lwz r10, HSTATE_PMC + 24(r13)
  886. lwz r11, HSTATE_PMC + 28(r13)
  887. END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
  888. mtspr SPRN_PMC1, r3
  889. mtspr SPRN_PMC2, r4
  890. mtspr SPRN_PMC3, r5
  891. mtspr SPRN_PMC4, r6
  892. mtspr SPRN_PMC5, r8
  893. mtspr SPRN_PMC6, r9
  894. BEGIN_FTR_SECTION
  895. mtspr SPRN_PMC7, r10
  896. mtspr SPRN_PMC8, r11
  897. END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
  898. ld r3, HSTATE_MMCR(r13)
  899. ld r4, HSTATE_MMCR + 8(r13)
  900. ld r5, HSTATE_MMCR + 16(r13)
  901. mtspr SPRN_MMCR1, r4
  902. mtspr SPRN_MMCRA, r5
  903. mtspr SPRN_MMCR0, r3
  904. isync
  905. 23:
  906. /*
  907. * For external and machine check interrupts, we need
  908. * to call the Linux handler to process the interrupt.
  909. * We do that by jumping to the interrupt vector address
  910. * which we have in r12. The [h]rfid at the end of the
  911. * handler will return to the book3s_hv_interrupts.S code.
  912. * For other interrupts we do the rfid to get back
  913. * to the book3s_interrupts.S code here.
  914. */
  915. ld r8, HSTATE_VMHANDLER(r13)
  916. ld r7, HSTATE_HOST_MSR(r13)
  917. cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL
  918. beq 11f
  919. cmpwi r12, BOOK3S_INTERRUPT_MACHINE_CHECK
  920. /* RFI into the highmem handler, or branch to interrupt handler */
  921. 12: mfmsr r6
  922. mtctr r12
  923. li r0, MSR_RI
  924. andc r6, r6, r0
  925. mtmsrd r6, 1 /* Clear RI in MSR */
  926. mtsrr0 r8
  927. mtsrr1 r7
  928. beqctr
  929. RFI
  930. 11:
  931. BEGIN_FTR_SECTION
  932. b 12b
  933. END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
  934. mtspr SPRN_HSRR0, r8
  935. mtspr SPRN_HSRR1, r7
  936. ba 0x500
  937. 6: mfspr r6,SPRN_HDAR
  938. mfspr r7,SPRN_HDSISR
  939. b 7b
  940. /*
  941. * Try to handle an hcall in real mode.
  942. * Returns to the guest if we handle it, or continues on up to
  943. * the kernel if we can't (i.e. if we don't have a handler for
  944. * it, or if the handler returns H_TOO_HARD).
  945. */
  946. .globl hcall_try_real_mode
  947. hcall_try_real_mode:
  948. ld r3,VCPU_GPR(r3)(r9)
  949. andi. r0,r11,MSR_PR
  950. bne hcall_real_cont
  951. clrrdi r3,r3,2
  952. cmpldi r3,hcall_real_table_end - hcall_real_table
  953. bge hcall_real_cont
  954. LOAD_REG_ADDR(r4, hcall_real_table)
  955. lwzx r3,r3,r4
  956. cmpwi r3,0
  957. beq hcall_real_cont
  958. add r3,r3,r4
  959. mtctr r3
  960. mr r3,r9 /* get vcpu pointer */
  961. ld r4,VCPU_GPR(r4)(r9)
  962. bctrl
  963. cmpdi r3,H_TOO_HARD
  964. beq hcall_real_fallback
  965. ld r4,HSTATE_KVM_VCPU(r13)
  966. std r3,VCPU_GPR(r3)(r4)
  967. ld r10,VCPU_PC(r4)
  968. ld r11,VCPU_MSR(r4)
  969. b fast_guest_return
  970. /* We've attempted a real mode hcall, but it's punted it back
  971. * to userspace. We need to restore some clobbered volatiles
  972. * before resuming the pass-it-to-qemu path */
  973. hcall_real_fallback:
  974. li r12,BOOK3S_INTERRUPT_SYSCALL
  975. ld r9, HSTATE_KVM_VCPU(r13)
  976. ld r11, VCPU_MSR(r9)
  977. b hcall_real_cont
  978. .globl hcall_real_table
  979. hcall_real_table:
  980. .long 0 /* 0 - unused */
  981. .long .kvmppc_h_remove - hcall_real_table
  982. .long .kvmppc_h_enter - hcall_real_table
  983. .long .kvmppc_h_read - hcall_real_table
  984. .long 0 /* 0x10 - H_CLEAR_MOD */
  985. .long 0 /* 0x14 - H_CLEAR_REF */
  986. .long .kvmppc_h_protect - hcall_real_table
  987. .long 0 /* 0x1c - H_GET_TCE */
  988. .long .kvmppc_h_put_tce - hcall_real_table
  989. .long 0 /* 0x24 - H_SET_SPRG0 */
  990. .long .kvmppc_h_set_dabr - hcall_real_table
  991. .long 0 /* 0x2c */
  992. .long 0 /* 0x30 */
  993. .long 0 /* 0x34 */
  994. .long 0 /* 0x38 */
  995. .long 0 /* 0x3c */
  996. .long 0 /* 0x40 */
  997. .long 0 /* 0x44 */
  998. .long 0 /* 0x48 */
  999. .long 0 /* 0x4c */
  1000. .long 0 /* 0x50 */
  1001. .long 0 /* 0x54 */
  1002. .long 0 /* 0x58 */
  1003. .long 0 /* 0x5c */
  1004. .long 0 /* 0x60 */
  1005. .long 0 /* 0x64 */
  1006. .long 0 /* 0x68 */
  1007. .long 0 /* 0x6c */
  1008. .long 0 /* 0x70 */
  1009. .long 0 /* 0x74 */
  1010. .long 0 /* 0x78 */
  1011. .long 0 /* 0x7c */
  1012. .long 0 /* 0x80 */
  1013. .long 0 /* 0x84 */
  1014. .long 0 /* 0x88 */
  1015. .long 0 /* 0x8c */
  1016. .long 0 /* 0x90 */
  1017. .long 0 /* 0x94 */
  1018. .long 0 /* 0x98 */
  1019. .long 0 /* 0x9c */
  1020. .long 0 /* 0xa0 */
  1021. .long 0 /* 0xa4 */
  1022. .long 0 /* 0xa8 */
  1023. .long 0 /* 0xac */
  1024. .long 0 /* 0xb0 */
  1025. .long 0 /* 0xb4 */
  1026. .long 0 /* 0xb8 */
  1027. .long 0 /* 0xbc */
  1028. .long 0 /* 0xc0 */
  1029. .long 0 /* 0xc4 */
  1030. .long 0 /* 0xc8 */
  1031. .long 0 /* 0xcc */
  1032. .long 0 /* 0xd0 */
  1033. .long 0 /* 0xd4 */
  1034. .long 0 /* 0xd8 */
  1035. .long 0 /* 0xdc */
  1036. .long 0 /* 0xe0 */
  1037. .long 0 /* 0xe4 */
  1038. .long 0 /* 0xe8 */
  1039. .long 0 /* 0xec */
  1040. .long 0 /* 0xf0 */
  1041. .long 0 /* 0xf4 */
  1042. .long 0 /* 0xf8 */
  1043. .long 0 /* 0xfc */
  1044. .long 0 /* 0x100 */
  1045. .long 0 /* 0x104 */
  1046. .long 0 /* 0x108 */
  1047. .long 0 /* 0x10c */
  1048. .long 0 /* 0x110 */
  1049. .long 0 /* 0x114 */
  1050. .long 0 /* 0x118 */
  1051. .long 0 /* 0x11c */
  1052. .long 0 /* 0x120 */
  1053. .long .kvmppc_h_bulk_remove - hcall_real_table
  1054. hcall_real_table_end:
  1055. ignore_hdec:
  1056. mr r4,r9
  1057. b fast_guest_return
  1058. bounce_ext_interrupt:
  1059. mr r4,r9
  1060. mtspr SPRN_SRR0,r10
  1061. mtspr SPRN_SRR1,r11
  1062. li r10,BOOK3S_INTERRUPT_EXTERNAL
  1063. LOAD_REG_IMMEDIATE(r11,MSR_SF | MSR_ME);
  1064. b fast_guest_return
  1065. _GLOBAL(kvmppc_h_set_dabr)
  1066. std r4,VCPU_DABR(r3)
  1067. mtspr SPRN_DABR,r4
  1068. li r3,0
  1069. blr
  1070. secondary_too_late:
  1071. ld r5,HSTATE_KVM_VCORE(r13)
  1072. HMT_LOW
  1073. 13: lbz r3,VCORE_IN_GUEST(r5)
  1074. cmpwi r3,0
  1075. bne 13b
  1076. HMT_MEDIUM
  1077. ld r11,PACA_SLBSHADOWPTR(r13)
  1078. .rept SLB_NUM_BOLTED
  1079. ld r5,SLBSHADOW_SAVEAREA(r11)
  1080. ld r6,SLBSHADOW_SAVEAREA+8(r11)
  1081. andis. r7,r5,SLB_ESID_V@h
  1082. beq 1f
  1083. slbmte r6,r5
  1084. 1: addi r11,r11,16
  1085. .endr
  1086. b 50f
  1087. secondary_nap:
  1088. /* Clear any pending IPI */
  1089. 50: ld r5, HSTATE_XICS_PHYS(r13)
  1090. li r0, 0xff
  1091. li r6, XICS_QIRR
  1092. stbcix r0, r5, r6
  1093. /* increment the nap count and then go to nap mode */
  1094. ld r4, HSTATE_KVM_VCORE(r13)
  1095. addi r4, r4, VCORE_NAP_COUNT
  1096. lwsync /* make previous updates visible */
  1097. 51: lwarx r3, 0, r4
  1098. addi r3, r3, 1
  1099. stwcx. r3, 0, r4
  1100. bne 51b
  1101. isync
  1102. mfspr r4, SPRN_LPCR
  1103. li r0, LPCR_PECE
  1104. andc r4, r4, r0
  1105. ori r4, r4, LPCR_PECE0 /* exit nap on interrupt */
  1106. mtspr SPRN_LPCR, r4
  1107. li r0, 0
  1108. std r0, HSTATE_SCRATCH0(r13)
  1109. ptesync
  1110. ld r0, HSTATE_SCRATCH0(r13)
  1111. 1: cmpd r0, r0
  1112. bne 1b
  1113. nap
  1114. b .
  1115. /*
  1116. * Save away FP, VMX and VSX registers.
  1117. * r3 = vcpu pointer
  1118. */
  1119. _GLOBAL(kvmppc_save_fp)
  1120. mfmsr r9
  1121. ori r8,r9,MSR_FP
  1122. #ifdef CONFIG_ALTIVEC
  1123. BEGIN_FTR_SECTION
  1124. oris r8,r8,MSR_VEC@h
  1125. END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
  1126. #endif
  1127. #ifdef CONFIG_VSX
  1128. BEGIN_FTR_SECTION
  1129. oris r8,r8,MSR_VSX@h
  1130. END_FTR_SECTION_IFSET(CPU_FTR_VSX)
  1131. #endif
  1132. mtmsrd r8
  1133. isync
  1134. #ifdef CONFIG_VSX
  1135. BEGIN_FTR_SECTION
  1136. reg = 0
  1137. .rept 32
  1138. li r6,reg*16+VCPU_VSRS
  1139. stxvd2x reg,r6,r3
  1140. reg = reg + 1
  1141. .endr
  1142. FTR_SECTION_ELSE
  1143. #endif
  1144. reg = 0
  1145. .rept 32
  1146. stfd reg,reg*8+VCPU_FPRS(r3)
  1147. reg = reg + 1
  1148. .endr
  1149. #ifdef CONFIG_VSX
  1150. ALT_FTR_SECTION_END_IFSET(CPU_FTR_VSX)
  1151. #endif
  1152. mffs fr0
  1153. stfd fr0,VCPU_FPSCR(r3)
  1154. #ifdef CONFIG_ALTIVEC
  1155. BEGIN_FTR_SECTION
  1156. reg = 0
  1157. .rept 32
  1158. li r6,reg*16+VCPU_VRS
  1159. stvx reg,r6,r3
  1160. reg = reg + 1
  1161. .endr
  1162. mfvscr vr0
  1163. li r6,VCPU_VSCR
  1164. stvx vr0,r6,r3
  1165. END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
  1166. #endif
  1167. mfspr r6,SPRN_VRSAVE
  1168. stw r6,VCPU_VRSAVE(r3)
  1169. mtmsrd r9
  1170. isync
  1171. blr
  1172. /*
  1173. * Load up FP, VMX and VSX registers
  1174. * r4 = vcpu pointer
  1175. */
  1176. .globl kvmppc_load_fp
  1177. kvmppc_load_fp:
  1178. mfmsr r9
  1179. ori r8,r9,MSR_FP
  1180. #ifdef CONFIG_ALTIVEC
  1181. BEGIN_FTR_SECTION
  1182. oris r8,r8,MSR_VEC@h
  1183. END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
  1184. #endif
  1185. #ifdef CONFIG_VSX
  1186. BEGIN_FTR_SECTION
  1187. oris r8,r8,MSR_VSX@h
  1188. END_FTR_SECTION_IFSET(CPU_FTR_VSX)
  1189. #endif
  1190. mtmsrd r8
  1191. isync
  1192. lfd fr0,VCPU_FPSCR(r4)
  1193. MTFSF_L(fr0)
  1194. #ifdef CONFIG_VSX
  1195. BEGIN_FTR_SECTION
  1196. reg = 0
  1197. .rept 32
  1198. li r7,reg*16+VCPU_VSRS
  1199. lxvd2x reg,r7,r4
  1200. reg = reg + 1
  1201. .endr
  1202. FTR_SECTION_ELSE
  1203. #endif
  1204. reg = 0
  1205. .rept 32
  1206. lfd reg,reg*8+VCPU_FPRS(r4)
  1207. reg = reg + 1
  1208. .endr
  1209. #ifdef CONFIG_VSX
  1210. ALT_FTR_SECTION_END_IFSET(CPU_FTR_VSX)
  1211. #endif
  1212. #ifdef CONFIG_ALTIVEC
  1213. BEGIN_FTR_SECTION
  1214. li r7,VCPU_VSCR
  1215. lvx vr0,r7,r4
  1216. mtvscr vr0
  1217. reg = 0
  1218. .rept 32
  1219. li r7,reg*16+VCPU_VRS
  1220. lvx reg,r7,r4
  1221. reg = reg + 1
  1222. .endr
  1223. END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
  1224. #endif
  1225. lwz r7,VCPU_VRSAVE(r4)
  1226. mtspr SPRN_VRSAVE,r7
  1227. blr