book3s_hv_rmhandlers.S 46 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975
  1. /*
  2. * This program is free software; you can redistribute it and/or modify
  3. * it under the terms of the GNU General Public License, version 2, as
  4. * published by the Free Software Foundation.
  5. *
  6. * This program is distributed in the hope that it will be useful,
  7. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  8. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  9. * GNU General Public License for more details.
  10. *
  11. * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
  12. *
  13. * Derived from book3s_rmhandlers.S and other files, which are:
  14. *
  15. * Copyright SUSE Linux Products GmbH 2009
  16. *
  17. * Authors: Alexander Graf <agraf@suse.de>
  18. */
  19. #include <asm/ppc_asm.h>
  20. #include <asm/kvm_asm.h>
  21. #include <asm/reg.h>
  22. #include <asm/mmu.h>
  23. #include <asm/page.h>
  24. #include <asm/ptrace.h>
  25. #include <asm/hvcall.h>
  26. #include <asm/asm-offsets.h>
  27. #include <asm/exception-64s.h>
  28. #include <asm/kvm_book3s_asm.h>
  29. #include <asm/mmu-hash64.h>
  30. #ifdef __LITTLE_ENDIAN__
  31. #error Need to fix lppaca and SLB shadow accesses in little endian mode
  32. #endif
  33. /*
  34. * Call kvmppc_hv_entry in real mode.
  35. * Must be called with interrupts hard-disabled.
  36. *
  37. * Input Registers:
  38. *
  39. * LR = return address to continue at after eventually re-enabling MMU
  40. */
  41. _GLOBAL(kvmppc_hv_entry_trampoline)
  42. mflr r0
  43. std r0, PPC_LR_STKOFF(r1)
  44. stdu r1, -112(r1)
  45. mfmsr r10
  46. LOAD_REG_ADDR(r5, kvmppc_call_hv_entry)
  47. li r0,MSR_RI
  48. andc r0,r10,r0
  49. li r6,MSR_IR | MSR_DR
  50. andc r6,r10,r6
  51. mtmsrd r0,1 /* clear RI in MSR */
  52. mtsrr0 r5
  53. mtsrr1 r6
  54. RFI
  55. kvmppc_call_hv_entry:
  56. bl kvmppc_hv_entry
  57. /* Back from guest - restore host state and return to caller */
  58. /* Restore host DABR and DABRX */
  59. ld r5,HSTATE_DABR(r13)
  60. li r6,7
  61. mtspr SPRN_DABR,r5
  62. mtspr SPRN_DABRX,r6
  63. /* Restore SPRG3 */
  64. ld r3,PACA_SPRG3(r13)
  65. mtspr SPRN_SPRG3,r3
  66. /*
  67. * Reload DEC. HDEC interrupts were disabled when
  68. * we reloaded the host's LPCR value.
  69. */
  70. ld r3, HSTATE_DECEXP(r13)
  71. mftb r4
  72. subf r4, r4, r3
  73. mtspr SPRN_DEC, r4
  74. /* Reload the host's PMU registers */
  75. ld r3, PACALPPACAPTR(r13) /* is the host using the PMU? */
  76. lbz r4, LPPACA_PMCINUSE(r3)
  77. cmpwi r4, 0
  78. beq 23f /* skip if not */
  79. lwz r3, HSTATE_PMC(r13)
  80. lwz r4, HSTATE_PMC + 4(r13)
  81. lwz r5, HSTATE_PMC + 8(r13)
  82. lwz r6, HSTATE_PMC + 12(r13)
  83. lwz r8, HSTATE_PMC + 16(r13)
  84. lwz r9, HSTATE_PMC + 20(r13)
  85. BEGIN_FTR_SECTION
  86. lwz r10, HSTATE_PMC + 24(r13)
  87. lwz r11, HSTATE_PMC + 28(r13)
  88. END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
  89. mtspr SPRN_PMC1, r3
  90. mtspr SPRN_PMC2, r4
  91. mtspr SPRN_PMC3, r5
  92. mtspr SPRN_PMC4, r6
  93. mtspr SPRN_PMC5, r8
  94. mtspr SPRN_PMC6, r9
  95. BEGIN_FTR_SECTION
  96. mtspr SPRN_PMC7, r10
  97. mtspr SPRN_PMC8, r11
  98. END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
  99. ld r3, HSTATE_MMCR(r13)
  100. ld r4, HSTATE_MMCR + 8(r13)
  101. ld r5, HSTATE_MMCR + 16(r13)
  102. mtspr SPRN_MMCR1, r4
  103. mtspr SPRN_MMCRA, r5
  104. mtspr SPRN_MMCR0, r3
  105. isync
  106. 23:
  107. /*
  108. * For external and machine check interrupts, we need
  109. * to call the Linux handler to process the interrupt.
  110. * We do that by jumping to absolute address 0x500 for
  111. * external interrupts, or the machine_check_fwnmi label
  112. * for machine checks (since firmware might have patched
  113. * the vector area at 0x200). The [h]rfid at the end of the
  114. * handler will return to the book3s_hv_interrupts.S code.
  115. * For other interrupts we do the rfid to get back
  116. * to the book3s_hv_interrupts.S code here.
  117. */
  118. ld r8, 112+PPC_LR_STKOFF(r1)
  119. addi r1, r1, 112
  120. ld r7, HSTATE_HOST_MSR(r13)
  121. cmpwi cr1, r12, BOOK3S_INTERRUPT_MACHINE_CHECK
  122. cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL
  123. BEGIN_FTR_SECTION
  124. beq 11f
  125. END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
  126. /* RFI into the highmem handler, or branch to interrupt handler */
  127. mfmsr r6
  128. li r0, MSR_RI
  129. andc r6, r6, r0
  130. mtmsrd r6, 1 /* Clear RI in MSR */
  131. mtsrr0 r8
  132. mtsrr1 r7
  133. beqa 0x500 /* external interrupt (PPC970) */
  134. beq cr1, 13f /* machine check */
  135. RFI
  136. /* On POWER7, we have external interrupts set to use HSRR0/1 */
  137. 11: mtspr SPRN_HSRR0, r8
  138. mtspr SPRN_HSRR1, r7
  139. ba 0x500
  140. 13: b machine_check_fwnmi
  141. /*
  142. * We come in here when wakened from nap mode on a secondary hw thread.
  143. * Relocation is off and most register values are lost.
  144. * r13 points to the PACA.
  145. */
  146. .globl kvm_start_guest
  147. kvm_start_guest:
  148. ld r1,PACAEMERGSP(r13)
  149. subi r1,r1,STACK_FRAME_OVERHEAD
  150. ld r2,PACATOC(r13)
  151. li r0,KVM_HWTHREAD_IN_KVM
  152. stb r0,HSTATE_HWTHREAD_STATE(r13)
  153. /* NV GPR values from power7_idle() will no longer be valid */
  154. li r0,1
  155. stb r0,PACA_NAPSTATELOST(r13)
  156. /* were we napping due to cede? */
  157. lbz r0,HSTATE_NAPPING(r13)
  158. cmpwi r0,0
  159. bne kvm_end_cede
  160. /*
  161. * We weren't napping due to cede, so this must be a secondary
  162. * thread being woken up to run a guest, or being woken up due
  163. * to a stray IPI. (Or due to some machine check or hypervisor
  164. * maintenance interrupt while the core is in KVM.)
  165. */
  166. /* Check the wake reason in SRR1 to see why we got here */
  167. mfspr r3,SPRN_SRR1
  168. rlwinm r3,r3,44-31,0x7 /* extract wake reason field */
  169. cmpwi r3,4 /* was it an external interrupt? */
  170. bne 27f /* if not */
  171. ld r5,HSTATE_XICS_PHYS(r13)
  172. li r7,XICS_XIRR /* if it was an external interrupt, */
  173. lwzcix r8,r5,r7 /* get and ack the interrupt */
  174. sync
  175. clrldi. r9,r8,40 /* get interrupt source ID. */
  176. beq 28f /* none there? */
  177. cmpwi r9,XICS_IPI /* was it an IPI? */
  178. bne 29f
  179. li r0,0xff
  180. li r6,XICS_MFRR
  181. stbcix r0,r5,r6 /* clear IPI */
  182. stwcix r8,r5,r7 /* EOI the interrupt */
  183. sync /* order loading of vcpu after that */
  184. /* get vcpu pointer, NULL if we have no vcpu to run */
  185. ld r4,HSTATE_KVM_VCPU(r13)
  186. cmpdi r4,0
  187. /* if we have no vcpu to run, go back to sleep */
  188. beq kvm_no_guest
  189. b 30f
  190. 27: /* XXX should handle hypervisor maintenance interrupts etc. here */
  191. b kvm_no_guest
  192. 28: /* SRR1 said external but ICP said nope?? */
  193. b kvm_no_guest
  194. 29: /* External non-IPI interrupt to offline secondary thread? help?? */
  195. stw r8,HSTATE_SAVED_XIRR(r13)
  196. b kvm_no_guest
  197. 30: bl kvmppc_hv_entry
  198. /* Back from the guest, go back to nap */
  199. /* Clear our vcpu pointer so we don't come back in early */
  200. li r0, 0
  201. std r0, HSTATE_KVM_VCPU(r13)
  202. lwsync
  203. /* Clear any pending IPI - we're an offline thread */
  204. ld r5, HSTATE_XICS_PHYS(r13)
  205. li r7, XICS_XIRR
  206. lwzcix r3, r5, r7 /* ack any pending interrupt */
  207. rlwinm. r0, r3, 0, 0xffffff /* any pending? */
  208. beq 37f
  209. sync
  210. li r0, 0xff
  211. li r6, XICS_MFRR
  212. stbcix r0, r5, r6 /* clear the IPI */
  213. stwcix r3, r5, r7 /* EOI it */
  214. 37: sync
  215. /* increment the nap count and then go to nap mode */
  216. ld r4, HSTATE_KVM_VCORE(r13)
  217. addi r4, r4, VCORE_NAP_COUNT
  218. lwsync /* make previous updates visible */
  219. 51: lwarx r3, 0, r4
  220. addi r3, r3, 1
  221. stwcx. r3, 0, r4
  222. bne 51b
  223. kvm_no_guest:
  224. li r0, KVM_HWTHREAD_IN_NAP
  225. stb r0, HSTATE_HWTHREAD_STATE(r13)
  226. li r3, LPCR_PECE0
  227. mfspr r4, SPRN_LPCR
  228. rlwimi r4, r3, 0, LPCR_PECE0 | LPCR_PECE1
  229. mtspr SPRN_LPCR, r4
  230. isync
  231. std r0, HSTATE_SCRATCH0(r13)
  232. ptesync
  233. ld r0, HSTATE_SCRATCH0(r13)
  234. 1: cmpd r0, r0
  235. bne 1b
  236. nap
  237. b .
  238. /******************************************************************************
  239. * *
  240. * Entry code *
  241. * *
  242. *****************************************************************************/
  243. .global kvmppc_hv_entry
  244. kvmppc_hv_entry:
  245. /* Required state:
  246. *
  247. * R4 = vcpu pointer
  248. * MSR = ~IR|DR
  249. * R13 = PACA
  250. * R1 = host R1
  251. * all other volatile GPRS = free
  252. */
  253. mflr r0
  254. std r0, PPC_LR_STKOFF(r1)
  255. stdu r1, -112(r1)
  256. /* Set partition DABR */
  257. /* Do this before re-enabling PMU to avoid P7 DABR corruption bug */
  258. li r5,3
  259. ld r6,VCPU_DABR(r4)
  260. mtspr SPRN_DABRX,r5
  261. mtspr SPRN_DABR,r6
  262. BEGIN_FTR_SECTION
  263. isync
  264. END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
  265. /* Load guest PMU registers */
  266. /* R4 is live here (vcpu pointer) */
  267. li r3, 1
  268. sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */
  269. mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */
  270. isync
  271. lwz r3, VCPU_PMC(r4) /* always load up guest PMU registers */
  272. lwz r5, VCPU_PMC + 4(r4) /* to prevent information leak */
  273. lwz r6, VCPU_PMC + 8(r4)
  274. lwz r7, VCPU_PMC + 12(r4)
  275. lwz r8, VCPU_PMC + 16(r4)
  276. lwz r9, VCPU_PMC + 20(r4)
  277. BEGIN_FTR_SECTION
  278. lwz r10, VCPU_PMC + 24(r4)
  279. lwz r11, VCPU_PMC + 28(r4)
  280. END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
  281. mtspr SPRN_PMC1, r3
  282. mtspr SPRN_PMC2, r5
  283. mtspr SPRN_PMC3, r6
  284. mtspr SPRN_PMC4, r7
  285. mtspr SPRN_PMC5, r8
  286. mtspr SPRN_PMC6, r9
  287. BEGIN_FTR_SECTION
  288. mtspr SPRN_PMC7, r10
  289. mtspr SPRN_PMC8, r11
  290. END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
  291. ld r3, VCPU_MMCR(r4)
  292. ld r5, VCPU_MMCR + 8(r4)
  293. ld r6, VCPU_MMCR + 16(r4)
  294. ld r7, VCPU_SIAR(r4)
  295. ld r8, VCPU_SDAR(r4)
  296. mtspr SPRN_MMCR1, r5
  297. mtspr SPRN_MMCRA, r6
  298. mtspr SPRN_SIAR, r7
  299. mtspr SPRN_SDAR, r8
  300. mtspr SPRN_MMCR0, r3
  301. isync
  302. /* Load up FP, VMX and VSX registers */
  303. bl kvmppc_load_fp
  304. ld r14, VCPU_GPR(R14)(r4)
  305. ld r15, VCPU_GPR(R15)(r4)
  306. ld r16, VCPU_GPR(R16)(r4)
  307. ld r17, VCPU_GPR(R17)(r4)
  308. ld r18, VCPU_GPR(R18)(r4)
  309. ld r19, VCPU_GPR(R19)(r4)
  310. ld r20, VCPU_GPR(R20)(r4)
  311. ld r21, VCPU_GPR(R21)(r4)
  312. ld r22, VCPU_GPR(R22)(r4)
  313. ld r23, VCPU_GPR(R23)(r4)
  314. ld r24, VCPU_GPR(R24)(r4)
  315. ld r25, VCPU_GPR(R25)(r4)
  316. ld r26, VCPU_GPR(R26)(r4)
  317. ld r27, VCPU_GPR(R27)(r4)
  318. ld r28, VCPU_GPR(R28)(r4)
  319. ld r29, VCPU_GPR(R29)(r4)
  320. ld r30, VCPU_GPR(R30)(r4)
  321. ld r31, VCPU_GPR(R31)(r4)
  322. BEGIN_FTR_SECTION
  323. /* Switch DSCR to guest value */
  324. ld r5, VCPU_DSCR(r4)
  325. mtspr SPRN_DSCR, r5
  326. END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
  327. /*
  328. * Set the decrementer to the guest decrementer.
  329. */
  330. ld r8,VCPU_DEC_EXPIRES(r4)
  331. mftb r7
  332. subf r3,r7,r8
  333. mtspr SPRN_DEC,r3
  334. stw r3,VCPU_DEC(r4)
  335. ld r5, VCPU_SPRG0(r4)
  336. ld r6, VCPU_SPRG1(r4)
  337. ld r7, VCPU_SPRG2(r4)
  338. ld r8, VCPU_SPRG3(r4)
  339. mtspr SPRN_SPRG0, r5
  340. mtspr SPRN_SPRG1, r6
  341. mtspr SPRN_SPRG2, r7
  342. mtspr SPRN_SPRG3, r8
  343. /* Save R1 in the PACA */
  344. std r1, HSTATE_HOST_R1(r13)
  345. /* Load up DAR and DSISR */
  346. ld r5, VCPU_DAR(r4)
  347. lwz r6, VCPU_DSISR(r4)
  348. mtspr SPRN_DAR, r5
  349. mtspr SPRN_DSISR, r6
  350. li r6, KVM_GUEST_MODE_HOST_HV
  351. stb r6, HSTATE_IN_GUEST(r13)
  352. BEGIN_FTR_SECTION
  353. /* Restore AMR and UAMOR, set AMOR to all 1s */
  354. ld r5,VCPU_AMR(r4)
  355. ld r6,VCPU_UAMOR(r4)
  356. li r7,-1
  357. mtspr SPRN_AMR,r5
  358. mtspr SPRN_UAMOR,r6
  359. mtspr SPRN_AMOR,r7
  360. END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
  361. /* Clear out SLB */
  362. li r6,0
  363. slbmte r6,r6
  364. slbia
  365. ptesync
  366. BEGIN_FTR_SECTION
  367. b 30f
  368. END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
  369. /*
  370. * POWER7 host -> guest partition switch code.
  371. * We don't have to lock against concurrent tlbies,
  372. * but we do have to coordinate across hardware threads.
  373. */
  374. /* Increment entry count iff exit count is zero. */
  375. ld r5,HSTATE_KVM_VCORE(r13)
  376. addi r9,r5,VCORE_ENTRY_EXIT
  377. 21: lwarx r3,0,r9
  378. cmpwi r3,0x100 /* any threads starting to exit? */
  379. bge secondary_too_late /* if so we're too late to the party */
  380. addi r3,r3,1
  381. stwcx. r3,0,r9
  382. bne 21b
  383. /* Primary thread switches to guest partition. */
  384. ld r9,VCPU_KVM(r4) /* pointer to struct kvm */
  385. lwz r6,VCPU_PTID(r4)
  386. cmpwi r6,0
  387. bne 20f
  388. ld r6,KVM_SDR1(r9)
  389. lwz r7,KVM_LPID(r9)
  390. li r0,LPID_RSVD /* switch to reserved LPID */
  391. mtspr SPRN_LPID,r0
  392. ptesync
  393. mtspr SPRN_SDR1,r6 /* switch to partition page table */
  394. mtspr SPRN_LPID,r7
  395. isync
  396. /* See if we need to flush the TLB */
  397. lhz r6,PACAPACAINDEX(r13) /* test_bit(cpu, need_tlb_flush) */
  398. clrldi r7,r6,64-6 /* extract bit number (6 bits) */
  399. srdi r6,r6,6 /* doubleword number */
  400. sldi r6,r6,3 /* address offset */
  401. add r6,r6,r9
  402. addi r6,r6,KVM_NEED_FLUSH /* dword in kvm->arch.need_tlb_flush */
  403. li r0,1
  404. sld r0,r0,r7
  405. ld r7,0(r6)
  406. and. r7,r7,r0
  407. beq 22f
  408. 23: ldarx r7,0,r6 /* if set, clear the bit */
  409. andc r7,r7,r0
  410. stdcx. r7,0,r6
  411. bne 23b
  412. li r6,128 /* and flush the TLB */
  413. mtctr r6
  414. li r7,0x800 /* IS field = 0b10 */
  415. ptesync
  416. 28: tlbiel r7
  417. addi r7,r7,0x1000
  418. bdnz 28b
  419. ptesync
  420. /* Add timebase offset onto timebase */
  421. 22: ld r8,VCORE_TB_OFFSET(r5)
  422. cmpdi r8,0
  423. beq 37f
  424. mftb r6 /* current host timebase */
  425. add r8,r8,r6
  426. mtspr SPRN_TBU40,r8 /* update upper 40 bits */
  427. mftb r7 /* check if lower 24 bits overflowed */
  428. clrldi r6,r6,40
  429. clrldi r7,r7,40
  430. cmpld r7,r6
  431. bge 37f
  432. addis r8,r8,0x100 /* if so, increment upper 40 bits */
  433. mtspr SPRN_TBU40,r8
  434. /* Load guest PCR value to select appropriate compat mode */
  435. 37: ld r7, VCORE_PCR(r5)
  436. cmpdi r7, 0
  437. beq 38f
  438. mtspr SPRN_PCR, r7
  439. 38:
  440. li r0,1
  441. stb r0,VCORE_IN_GUEST(r5) /* signal secondaries to continue */
  442. b 10f
  443. /* Secondary threads wait for primary to have done partition switch */
  444. 20: lbz r0,VCORE_IN_GUEST(r5)
  445. cmpwi r0,0
  446. beq 20b
  447. /* Set LPCR and RMOR. */
  448. 10: ld r8,VCORE_LPCR(r5)
  449. mtspr SPRN_LPCR,r8
  450. ld r8,KVM_RMOR(r9)
  451. mtspr SPRN_RMOR,r8
  452. isync
  453. /* Increment yield count if they have a VPA */
  454. ld r3, VCPU_VPA(r4)
  455. cmpdi r3, 0
  456. beq 25f
  457. lwz r5, LPPACA_YIELDCOUNT(r3)
  458. addi r5, r5, 1
  459. stw r5, LPPACA_YIELDCOUNT(r3)
  460. li r6, 1
  461. stb r6, VCPU_VPA_DIRTY(r4)
  462. 25:
  463. /* Check if HDEC expires soon */
  464. mfspr r3,SPRN_HDEC
  465. cmpwi r3,10
  466. li r12,BOOK3S_INTERRUPT_HV_DECREMENTER
  467. mr r9,r4
  468. blt hdec_soon
  469. /* Save purr/spurr */
  470. mfspr r5,SPRN_PURR
  471. mfspr r6,SPRN_SPURR
  472. std r5,HSTATE_PURR(r13)
  473. std r6,HSTATE_SPURR(r13)
  474. ld r7,VCPU_PURR(r4)
  475. ld r8,VCPU_SPURR(r4)
  476. mtspr SPRN_PURR,r7
  477. mtspr SPRN_SPURR,r8
  478. b 31f
  479. /*
  480. * PPC970 host -> guest partition switch code.
  481. * We have to lock against concurrent tlbies,
  482. * using native_tlbie_lock to lock against host tlbies
  483. * and kvm->arch.tlbie_lock to lock against guest tlbies.
  484. * We also have to invalidate the TLB since its
  485. * entries aren't tagged with the LPID.
  486. */
  487. 30: ld r9,VCPU_KVM(r4) /* pointer to struct kvm */
  488. /* first take native_tlbie_lock */
  489. .section ".toc","aw"
  490. toc_tlbie_lock:
  491. .tc native_tlbie_lock[TC],native_tlbie_lock
  492. .previous
  493. ld r3,toc_tlbie_lock@toc(2)
  494. #ifdef __BIG_ENDIAN__
  495. lwz r8,PACA_LOCK_TOKEN(r13)
  496. #else
  497. lwz r8,PACAPACAINDEX(r13)
  498. #endif
  499. 24: lwarx r0,0,r3
  500. cmpwi r0,0
  501. bne 24b
  502. stwcx. r8,0,r3
  503. bne 24b
  504. isync
  505. ld r5,HSTATE_KVM_VCORE(r13)
  506. ld r7,VCORE_LPCR(r5) /* use vcore->lpcr to store HID4 */
  507. li r0,0x18f
  508. rotldi r0,r0,HID4_LPID5_SH /* all lpid bits in HID4 = 1 */
  509. or r0,r7,r0
  510. ptesync
  511. sync
  512. mtspr SPRN_HID4,r0 /* switch to reserved LPID */
  513. isync
  514. li r0,0
  515. stw r0,0(r3) /* drop native_tlbie_lock */
  516. /* invalidate the whole TLB */
  517. li r0,256
  518. mtctr r0
  519. li r6,0
  520. 25: tlbiel r6
  521. addi r6,r6,0x1000
  522. bdnz 25b
  523. ptesync
  524. /* Take the guest's tlbie_lock */
  525. addi r3,r9,KVM_TLBIE_LOCK
  526. 24: lwarx r0,0,r3
  527. cmpwi r0,0
  528. bne 24b
  529. stwcx. r8,0,r3
  530. bne 24b
  531. isync
  532. ld r6,KVM_SDR1(r9)
  533. mtspr SPRN_SDR1,r6 /* switch to partition page table */
  534. /* Set up HID4 with the guest's LPID etc. */
  535. sync
  536. mtspr SPRN_HID4,r7
  537. isync
  538. /* drop the guest's tlbie_lock */
  539. li r0,0
  540. stw r0,0(r3)
  541. /* Check if HDEC expires soon */
  542. mfspr r3,SPRN_HDEC
  543. cmpwi r3,10
  544. li r12,BOOK3S_INTERRUPT_HV_DECREMENTER
  545. mr r9,r4
  546. blt hdec_soon
  547. /* Enable HDEC interrupts */
  548. mfspr r0,SPRN_HID0
  549. li r3,1
  550. rldimi r0,r3, HID0_HDICE_SH, 64-HID0_HDICE_SH-1
  551. sync
  552. mtspr SPRN_HID0,r0
  553. mfspr r0,SPRN_HID0
  554. mfspr r0,SPRN_HID0
  555. mfspr r0,SPRN_HID0
  556. mfspr r0,SPRN_HID0
  557. mfspr r0,SPRN_HID0
  558. mfspr r0,SPRN_HID0
  559. /* Load up guest SLB entries */
  560. 31: lwz r5,VCPU_SLB_MAX(r4)
  561. cmpwi r5,0
  562. beq 9f
  563. mtctr r5
  564. addi r6,r4,VCPU_SLB
  565. 1: ld r8,VCPU_SLB_E(r6)
  566. ld r9,VCPU_SLB_V(r6)
  567. slbmte r9,r8
  568. addi r6,r6,VCPU_SLB_SIZE
  569. bdnz 1b
  570. 9:
  571. /* Restore state of CTRL run bit; assume 1 on entry */
  572. lwz r5,VCPU_CTRL(r4)
  573. andi. r5,r5,1
  574. bne 4f
  575. mfspr r6,SPRN_CTRLF
  576. clrrdi r6,r6,1
  577. mtspr SPRN_CTRLT,r6
  578. 4:
  579. ld r6, VCPU_CTR(r4)
  580. lwz r7, VCPU_XER(r4)
  581. mtctr r6
  582. mtxer r7
  583. ld r10, VCPU_PC(r4)
  584. ld r11, VCPU_MSR(r4)
  585. kvmppc_cede_reentry: /* r4 = vcpu, r13 = paca */
  586. ld r6, VCPU_SRR0(r4)
  587. ld r7, VCPU_SRR1(r4)
  588. /* r11 = vcpu->arch.msr & ~MSR_HV */
  589. rldicl r11, r11, 63 - MSR_HV_LG, 1
  590. rotldi r11, r11, 1 + MSR_HV_LG
  591. ori r11, r11, MSR_ME
  592. /* Check if we can deliver an external or decrementer interrupt now */
  593. ld r0,VCPU_PENDING_EXC(r4)
  594. lis r8,(1 << BOOK3S_IRQPRIO_EXTERNAL_LEVEL)@h
  595. and r0,r0,r8
  596. cmpdi cr1,r0,0
  597. andi. r0,r11,MSR_EE
  598. beq cr1,11f
  599. BEGIN_FTR_SECTION
  600. mfspr r8,SPRN_LPCR
  601. ori r8,r8,LPCR_MER
  602. mtspr SPRN_LPCR,r8
  603. isync
  604. END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
  605. beq 5f
  606. li r0,BOOK3S_INTERRUPT_EXTERNAL
  607. 12: mr r6,r10
  608. mr r10,r0
  609. mr r7,r11
  610. li r11,(MSR_ME << 1) | 1 /* synthesize MSR_SF | MSR_ME */
  611. rotldi r11,r11,63
  612. b 5f
  613. 11: beq 5f
  614. mfspr r0,SPRN_DEC
  615. cmpwi r0,0
  616. li r0,BOOK3S_INTERRUPT_DECREMENTER
  617. blt 12b
  618. /* Move SRR0 and SRR1 into the respective regs */
  619. 5: mtspr SPRN_SRR0, r6
  620. mtspr SPRN_SRR1, r7
  621. fast_guest_return:
  622. li r0,0
  623. stb r0,VCPU_CEDED(r4) /* cancel cede */
  624. mtspr SPRN_HSRR0,r10
  625. mtspr SPRN_HSRR1,r11
  626. /* Activate guest mode, so faults get handled by KVM */
  627. li r9, KVM_GUEST_MODE_GUEST_HV
  628. stb r9, HSTATE_IN_GUEST(r13)
  629. /* Enter guest */
  630. BEGIN_FTR_SECTION
  631. ld r5, VCPU_CFAR(r4)
  632. mtspr SPRN_CFAR, r5
  633. END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
  634. BEGIN_FTR_SECTION
  635. ld r0, VCPU_PPR(r4)
  636. END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
  637. ld r5, VCPU_LR(r4)
  638. lwz r6, VCPU_CR(r4)
  639. mtlr r5
  640. mtcr r6
  641. ld r1, VCPU_GPR(R1)(r4)
  642. ld r2, VCPU_GPR(R2)(r4)
  643. ld r3, VCPU_GPR(R3)(r4)
  644. ld r5, VCPU_GPR(R5)(r4)
  645. ld r6, VCPU_GPR(R6)(r4)
  646. ld r7, VCPU_GPR(R7)(r4)
  647. ld r8, VCPU_GPR(R8)(r4)
  648. ld r9, VCPU_GPR(R9)(r4)
  649. ld r10, VCPU_GPR(R10)(r4)
  650. ld r11, VCPU_GPR(R11)(r4)
  651. ld r12, VCPU_GPR(R12)(r4)
  652. ld r13, VCPU_GPR(R13)(r4)
  653. BEGIN_FTR_SECTION
  654. mtspr SPRN_PPR, r0
  655. END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
  656. ld r0, VCPU_GPR(R0)(r4)
  657. ld r4, VCPU_GPR(R4)(r4)
  658. hrfid
  659. b .
  660. /******************************************************************************
  661. * *
  662. * Exit code *
  663. * *
  664. *****************************************************************************/
  665. /*
  666. * We come here from the first-level interrupt handlers.
  667. */
  668. .globl kvmppc_interrupt_hv
  669. kvmppc_interrupt_hv:
  670. /*
  671. * Register contents:
  672. * R12 = interrupt vector
  673. * R13 = PACA
  674. * guest CR, R12 saved in shadow VCPU SCRATCH1/0
  675. * guest R13 saved in SPRN_SCRATCH0
  676. */
  677. /* abuse host_r2 as third scratch area; we get r2 from PACATOC(r13) */
  678. std r9, HSTATE_HOST_R2(r13)
  679. lbz r9, HSTATE_IN_GUEST(r13)
  680. cmpwi r9, KVM_GUEST_MODE_HOST_HV
  681. beq kvmppc_bad_host_intr
  682. #ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
  683. cmpwi r9, KVM_GUEST_MODE_GUEST
  684. ld r9, HSTATE_HOST_R2(r13)
  685. beq kvmppc_interrupt_pr
  686. #endif
  687. /* We're now back in the host but in guest MMU context */
  688. li r9, KVM_GUEST_MODE_HOST_HV
  689. stb r9, HSTATE_IN_GUEST(r13)
  690. ld r9, HSTATE_KVM_VCPU(r13)
  691. /* Save registers */
  692. std r0, VCPU_GPR(R0)(r9)
  693. std r1, VCPU_GPR(R1)(r9)
  694. std r2, VCPU_GPR(R2)(r9)
  695. std r3, VCPU_GPR(R3)(r9)
  696. std r4, VCPU_GPR(R4)(r9)
  697. std r5, VCPU_GPR(R5)(r9)
  698. std r6, VCPU_GPR(R6)(r9)
  699. std r7, VCPU_GPR(R7)(r9)
  700. std r8, VCPU_GPR(R8)(r9)
  701. ld r0, HSTATE_HOST_R2(r13)
  702. std r0, VCPU_GPR(R9)(r9)
  703. std r10, VCPU_GPR(R10)(r9)
  704. std r11, VCPU_GPR(R11)(r9)
  705. ld r3, HSTATE_SCRATCH0(r13)
  706. lwz r4, HSTATE_SCRATCH1(r13)
  707. std r3, VCPU_GPR(R12)(r9)
  708. stw r4, VCPU_CR(r9)
  709. BEGIN_FTR_SECTION
  710. ld r3, HSTATE_CFAR(r13)
  711. std r3, VCPU_CFAR(r9)
  712. END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
  713. BEGIN_FTR_SECTION
  714. ld r4, HSTATE_PPR(r13)
  715. std r4, VCPU_PPR(r9)
  716. END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
  717. /* Restore R1/R2 so we can handle faults */
  718. ld r1, HSTATE_HOST_R1(r13)
  719. ld r2, PACATOC(r13)
  720. mfspr r10, SPRN_SRR0
  721. mfspr r11, SPRN_SRR1
  722. std r10, VCPU_SRR0(r9)
  723. std r11, VCPU_SRR1(r9)
  724. andi. r0, r12, 2 /* need to read HSRR0/1? */
  725. beq 1f
  726. mfspr r10, SPRN_HSRR0
  727. mfspr r11, SPRN_HSRR1
  728. clrrdi r12, r12, 2
  729. 1: std r10, VCPU_PC(r9)
  730. std r11, VCPU_MSR(r9)
  731. GET_SCRATCH0(r3)
  732. mflr r4
  733. std r3, VCPU_GPR(R13)(r9)
  734. std r4, VCPU_LR(r9)
  735. stw r12,VCPU_TRAP(r9)
  736. /* Save HEIR (HV emulation assist reg) in last_inst
  737. if this is an HEI (HV emulation interrupt, e40) */
  738. li r3,KVM_INST_FETCH_FAILED
  739. BEGIN_FTR_SECTION
  740. cmpwi r12,BOOK3S_INTERRUPT_H_EMUL_ASSIST
  741. bne 11f
  742. mfspr r3,SPRN_HEIR
  743. END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
  744. 11: stw r3,VCPU_LAST_INST(r9)
  745. /* these are volatile across C function calls */
  746. mfctr r3
  747. mfxer r4
  748. std r3, VCPU_CTR(r9)
  749. stw r4, VCPU_XER(r9)
  750. BEGIN_FTR_SECTION
  751. /* If this is a page table miss then see if it's theirs or ours */
  752. cmpwi r12, BOOK3S_INTERRUPT_H_DATA_STORAGE
  753. beq kvmppc_hdsi
  754. cmpwi r12, BOOK3S_INTERRUPT_H_INST_STORAGE
  755. beq kvmppc_hisi
  756. END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
  757. /* See if this is a leftover HDEC interrupt */
  758. cmpwi r12,BOOK3S_INTERRUPT_HV_DECREMENTER
  759. bne 2f
  760. mfspr r3,SPRN_HDEC
  761. cmpwi r3,0
  762. bge ignore_hdec
  763. 2:
  764. /* See if this is an hcall we can handle in real mode */
  765. cmpwi r12,BOOK3S_INTERRUPT_SYSCALL
  766. beq hcall_try_real_mode
  767. /* Only handle external interrupts here on arch 206 and later */
  768. BEGIN_FTR_SECTION
  769. b ext_interrupt_to_host
  770. END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206)
  771. /* External interrupt ? */
  772. cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL
  773. bne+ ext_interrupt_to_host
  774. /* External interrupt, first check for host_ipi. If this is
  775. * set, we know the host wants us out so let's do it now
  776. */
  777. do_ext_interrupt:
  778. bl kvmppc_read_intr
  779. cmpdi r3, 0
  780. bgt ext_interrupt_to_host
  781. /* Allright, looks like an IPI for the guest, we need to set MER */
  782. /* Check if any CPU is heading out to the host, if so head out too */
  783. ld r5, HSTATE_KVM_VCORE(r13)
  784. lwz r0, VCORE_ENTRY_EXIT(r5)
  785. cmpwi r0, 0x100
  786. bge ext_interrupt_to_host
  787. /* See if there is a pending interrupt for the guest */
  788. mfspr r8, SPRN_LPCR
  789. ld r0, VCPU_PENDING_EXC(r9)
  790. /* Insert EXTERNAL_LEVEL bit into LPCR at the MER bit position */
  791. rldicl. r0, r0, 64 - BOOK3S_IRQPRIO_EXTERNAL_LEVEL, 63
  792. rldimi r8, r0, LPCR_MER_SH, 63 - LPCR_MER_SH
  793. beq 2f
  794. /* And if the guest EE is set, we can deliver immediately, else
  795. * we return to the guest with MER set
  796. */
  797. andi. r0, r11, MSR_EE
  798. beq 2f
  799. mtspr SPRN_SRR0, r10
  800. mtspr SPRN_SRR1, r11
  801. li r10, BOOK3S_INTERRUPT_EXTERNAL
  802. li r11, (MSR_ME << 1) | 1 /* synthesize MSR_SF | MSR_ME */
  803. rotldi r11, r11, 63
  804. 2: mr r4, r9
  805. mtspr SPRN_LPCR, r8
  806. b fast_guest_return
  807. ext_interrupt_to_host:
  808. guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */
  809. /* Save more register state */
  810. mfdar r6
  811. mfdsisr r7
  812. std r6, VCPU_DAR(r9)
  813. stw r7, VCPU_DSISR(r9)
  814. BEGIN_FTR_SECTION
  815. /* don't overwrite fault_dar/fault_dsisr if HDSI */
  816. cmpwi r12,BOOK3S_INTERRUPT_H_DATA_STORAGE
  817. beq 6f
  818. END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
  819. std r6, VCPU_FAULT_DAR(r9)
  820. stw r7, VCPU_FAULT_DSISR(r9)
  821. /* See if it is a machine check */
  822. cmpwi r12, BOOK3S_INTERRUPT_MACHINE_CHECK
  823. beq machine_check_realmode
  824. mc_cont:
  825. /* Save guest CTRL register, set runlatch to 1 */
  826. 6: mfspr r6,SPRN_CTRLF
  827. stw r6,VCPU_CTRL(r9)
  828. andi. r0,r6,1
  829. bne 4f
  830. ori r6,r6,1
  831. mtspr SPRN_CTRLT,r6
  832. 4:
  833. /* Read the guest SLB and save it away */
  834. lwz r0,VCPU_SLB_NR(r9) /* number of entries in SLB */
  835. mtctr r0
  836. li r6,0
  837. addi r7,r9,VCPU_SLB
  838. li r5,0
  839. 1: slbmfee r8,r6
  840. andis. r0,r8,SLB_ESID_V@h
  841. beq 2f
  842. add r8,r8,r6 /* put index in */
  843. slbmfev r3,r6
  844. std r8,VCPU_SLB_E(r7)
  845. std r3,VCPU_SLB_V(r7)
  846. addi r7,r7,VCPU_SLB_SIZE
  847. addi r5,r5,1
  848. 2: addi r6,r6,1
  849. bdnz 1b
  850. stw r5,VCPU_SLB_MAX(r9)
  851. /*
  852. * Save the guest PURR/SPURR
  853. */
  854. BEGIN_FTR_SECTION
  855. mfspr r5,SPRN_PURR
  856. mfspr r6,SPRN_SPURR
  857. ld r7,VCPU_PURR(r9)
  858. ld r8,VCPU_SPURR(r9)
  859. std r5,VCPU_PURR(r9)
  860. std r6,VCPU_SPURR(r9)
  861. subf r5,r7,r5
  862. subf r6,r8,r6
  863. /*
  864. * Restore host PURR/SPURR and add guest times
  865. * so that the time in the guest gets accounted.
  866. */
  867. ld r3,HSTATE_PURR(r13)
  868. ld r4,HSTATE_SPURR(r13)
  869. add r3,r3,r5
  870. add r4,r4,r6
  871. mtspr SPRN_PURR,r3
  872. mtspr SPRN_SPURR,r4
  873. END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_201)
  874. /* Clear out SLB */
  875. li r5,0
  876. slbmte r5,r5
  877. slbia
  878. ptesync
  879. hdec_soon: /* r9 = vcpu, r12 = trap, r13 = paca */
  880. BEGIN_FTR_SECTION
  881. b 32f
  882. END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
  883. /*
  884. * POWER7 guest -> host partition switch code.
  885. * We don't have to lock against tlbies but we do
  886. * have to coordinate the hardware threads.
  887. */
  888. /* Increment the threads-exiting-guest count in the 0xff00
  889. bits of vcore->entry_exit_count */
  890. lwsync
  891. ld r5,HSTATE_KVM_VCORE(r13)
  892. addi r6,r5,VCORE_ENTRY_EXIT
  893. 41: lwarx r3,0,r6
  894. addi r0,r3,0x100
  895. stwcx. r0,0,r6
  896. bne 41b
  897. lwsync
  898. /*
  899. * At this point we have an interrupt that we have to pass
  900. * up to the kernel or qemu; we can't handle it in real mode.
  901. * Thus we have to do a partition switch, so we have to
  902. * collect the other threads, if we are the first thread
  903. * to take an interrupt. To do this, we set the HDEC to 0,
  904. * which causes an HDEC interrupt in all threads within 2ns
  905. * because the HDEC register is shared between all 4 threads.
  906. * However, we don't need to bother if this is an HDEC
  907. * interrupt, since the other threads will already be on their
  908. * way here in that case.
  909. */
  910. cmpwi r3,0x100 /* Are we the first here? */
  911. bge 43f
  912. cmpwi r3,1 /* Are any other threads in the guest? */
  913. ble 43f
  914. cmpwi r12,BOOK3S_INTERRUPT_HV_DECREMENTER
  915. beq 40f
  916. li r0,0
  917. mtspr SPRN_HDEC,r0
  918. 40:
  919. /*
  920. * Send an IPI to any napping threads, since an HDEC interrupt
  921. * doesn't wake CPUs up from nap.
  922. */
  923. lwz r3,VCORE_NAPPING_THREADS(r5)
  924. lwz r4,VCPU_PTID(r9)
  925. li r0,1
  926. sld r0,r0,r4
  927. andc. r3,r3,r0 /* no sense IPI'ing ourselves */
  928. beq 43f
  929. mulli r4,r4,PACA_SIZE /* get paca for thread 0 */
  930. subf r6,r4,r13
  931. 42: andi. r0,r3,1
  932. beq 44f
  933. ld r8,HSTATE_XICS_PHYS(r6) /* get thread's XICS reg addr */
  934. li r0,IPI_PRIORITY
  935. li r7,XICS_MFRR
  936. stbcix r0,r7,r8 /* trigger the IPI */
  937. 44: srdi. r3,r3,1
  938. addi r6,r6,PACA_SIZE
  939. bne 42b
  940. /* Secondary threads wait for primary to do partition switch */
  941. 43: ld r4,VCPU_KVM(r9) /* pointer to struct kvm */
  942. ld r5,HSTATE_KVM_VCORE(r13)
  943. lwz r3,VCPU_PTID(r9)
  944. cmpwi r3,0
  945. beq 15f
  946. HMT_LOW
  947. 13: lbz r3,VCORE_IN_GUEST(r5)
  948. cmpwi r3,0
  949. bne 13b
  950. HMT_MEDIUM
  951. b 16f
  952. /* Primary thread waits for all the secondaries to exit guest */
  953. 15: lwz r3,VCORE_ENTRY_EXIT(r5)
  954. srwi r0,r3,8
  955. clrldi r3,r3,56
  956. cmpw r3,r0
  957. bne 15b
  958. isync
  959. /* Primary thread switches back to host partition */
  960. ld r6,KVM_HOST_SDR1(r4)
  961. lwz r7,KVM_HOST_LPID(r4)
  962. li r8,LPID_RSVD /* switch to reserved LPID */
  963. mtspr SPRN_LPID,r8
  964. ptesync
  965. mtspr SPRN_SDR1,r6 /* switch to partition page table */
  966. mtspr SPRN_LPID,r7
  967. isync
  968. /* Subtract timebase offset from timebase */
  969. ld r8,VCORE_TB_OFFSET(r5)
  970. cmpdi r8,0
  971. beq 17f
  972. mftb r6 /* current host timebase */
  973. subf r8,r8,r6
  974. mtspr SPRN_TBU40,r8 /* update upper 40 bits */
  975. mftb r7 /* check if lower 24 bits overflowed */
  976. clrldi r6,r6,40
  977. clrldi r7,r7,40
  978. cmpld r7,r6
  979. bge 17f
  980. addis r8,r8,0x100 /* if so, increment upper 40 bits */
  981. mtspr SPRN_TBU40,r8
  982. /* Reset PCR */
  983. 17: ld r0, VCORE_PCR(r5)
  984. cmpdi r0, 0
  985. beq 18f
  986. li r0, 0
  987. mtspr SPRN_PCR, r0
  988. 18:
  989. /* Signal secondary CPUs to continue */
  990. stb r0,VCORE_IN_GUEST(r5)
  991. lis r8,0x7fff /* MAX_INT@h */
  992. mtspr SPRN_HDEC,r8
  993. 16: ld r8,KVM_HOST_LPCR(r4)
  994. mtspr SPRN_LPCR,r8
  995. isync
  996. b 33f
  997. /*
  998. * PPC970 guest -> host partition switch code.
  999. * We have to lock against concurrent tlbies, and
  1000. * we have to flush the whole TLB.
  1001. */
  1002. 32: ld r4,VCPU_KVM(r9) /* pointer to struct kvm */
  1003. /* Take the guest's tlbie_lock */
  1004. #ifdef __BIG_ENDIAN__
  1005. lwz r8,PACA_LOCK_TOKEN(r13)
  1006. #else
  1007. lwz r8,PACAPACAINDEX(r13)
  1008. #endif
  1009. addi r3,r4,KVM_TLBIE_LOCK
  1010. 24: lwarx r0,0,r3
  1011. cmpwi r0,0
  1012. bne 24b
  1013. stwcx. r8,0,r3
  1014. bne 24b
  1015. isync
  1016. ld r7,KVM_HOST_LPCR(r4) /* use kvm->arch.host_lpcr for HID4 */
  1017. li r0,0x18f
  1018. rotldi r0,r0,HID4_LPID5_SH /* all lpid bits in HID4 = 1 */
  1019. or r0,r7,r0
  1020. ptesync
  1021. sync
  1022. mtspr SPRN_HID4,r0 /* switch to reserved LPID */
  1023. isync
  1024. li r0,0
  1025. stw r0,0(r3) /* drop guest tlbie_lock */
  1026. /* invalidate the whole TLB */
  1027. li r0,256
  1028. mtctr r0
  1029. li r6,0
  1030. 25: tlbiel r6
  1031. addi r6,r6,0x1000
  1032. bdnz 25b
  1033. ptesync
  1034. /* take native_tlbie_lock */
  1035. ld r3,toc_tlbie_lock@toc(2)
  1036. 24: lwarx r0,0,r3
  1037. cmpwi r0,0
  1038. bne 24b
  1039. stwcx. r8,0,r3
  1040. bne 24b
  1041. isync
  1042. ld r6,KVM_HOST_SDR1(r4)
  1043. mtspr SPRN_SDR1,r6 /* switch to host page table */
  1044. /* Set up host HID4 value */
  1045. sync
  1046. mtspr SPRN_HID4,r7
  1047. isync
  1048. li r0,0
  1049. stw r0,0(r3) /* drop native_tlbie_lock */
  1050. lis r8,0x7fff /* MAX_INT@h */
  1051. mtspr SPRN_HDEC,r8
  1052. /* Disable HDEC interrupts */
  1053. mfspr r0,SPRN_HID0
  1054. li r3,0
  1055. rldimi r0,r3, HID0_HDICE_SH, 64-HID0_HDICE_SH-1
  1056. sync
  1057. mtspr SPRN_HID0,r0
  1058. mfspr r0,SPRN_HID0
  1059. mfspr r0,SPRN_HID0
  1060. mfspr r0,SPRN_HID0
  1061. mfspr r0,SPRN_HID0
  1062. mfspr r0,SPRN_HID0
  1063. mfspr r0,SPRN_HID0
  1064. /* load host SLB entries */
  1065. 33: ld r8,PACA_SLBSHADOWPTR(r13)
  1066. .rept SLB_NUM_BOLTED
  1067. ld r5,SLBSHADOW_SAVEAREA(r8)
  1068. ld r6,SLBSHADOW_SAVEAREA+8(r8)
  1069. andis. r7,r5,SLB_ESID_V@h
  1070. beq 1f
  1071. slbmte r6,r5
  1072. 1: addi r8,r8,16
  1073. .endr
  1074. /* Save DEC */
  1075. mfspr r5,SPRN_DEC
  1076. mftb r6
  1077. extsw r5,r5
  1078. add r5,r5,r6
  1079. std r5,VCPU_DEC_EXPIRES(r9)
  1080. /* Save and reset AMR and UAMOR before turning on the MMU */
  1081. BEGIN_FTR_SECTION
  1082. mfspr r5,SPRN_AMR
  1083. mfspr r6,SPRN_UAMOR
  1084. std r5,VCPU_AMR(r9)
  1085. std r6,VCPU_UAMOR(r9)
  1086. li r6,0
  1087. mtspr SPRN_AMR,r6
  1088. END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
  1089. /* Unset guest mode */
  1090. li r0, KVM_GUEST_MODE_NONE
  1091. stb r0, HSTATE_IN_GUEST(r13)
  1092. /* Switch DSCR back to host value */
  1093. BEGIN_FTR_SECTION
  1094. mfspr r8, SPRN_DSCR
  1095. ld r7, HSTATE_DSCR(r13)
  1096. std r8, VCPU_DSCR(r9)
  1097. mtspr SPRN_DSCR, r7
  1098. END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
  1099. /* Save non-volatile GPRs */
  1100. std r14, VCPU_GPR(R14)(r9)
  1101. std r15, VCPU_GPR(R15)(r9)
  1102. std r16, VCPU_GPR(R16)(r9)
  1103. std r17, VCPU_GPR(R17)(r9)
  1104. std r18, VCPU_GPR(R18)(r9)
  1105. std r19, VCPU_GPR(R19)(r9)
  1106. std r20, VCPU_GPR(R20)(r9)
  1107. std r21, VCPU_GPR(R21)(r9)
  1108. std r22, VCPU_GPR(R22)(r9)
  1109. std r23, VCPU_GPR(R23)(r9)
  1110. std r24, VCPU_GPR(R24)(r9)
  1111. std r25, VCPU_GPR(R25)(r9)
  1112. std r26, VCPU_GPR(R26)(r9)
  1113. std r27, VCPU_GPR(R27)(r9)
  1114. std r28, VCPU_GPR(R28)(r9)
  1115. std r29, VCPU_GPR(R29)(r9)
  1116. std r30, VCPU_GPR(R30)(r9)
  1117. std r31, VCPU_GPR(R31)(r9)
  1118. /* Save SPRGs */
  1119. mfspr r3, SPRN_SPRG0
  1120. mfspr r4, SPRN_SPRG1
  1121. mfspr r5, SPRN_SPRG2
  1122. mfspr r6, SPRN_SPRG3
  1123. std r3, VCPU_SPRG0(r9)
  1124. std r4, VCPU_SPRG1(r9)
  1125. std r5, VCPU_SPRG2(r9)
  1126. std r6, VCPU_SPRG3(r9)
  1127. /* save FP state */
  1128. mr r3, r9
  1129. bl .kvmppc_save_fp
  1130. /* Increment yield count if they have a VPA */
  1131. ld r8, VCPU_VPA(r9) /* do they have a VPA? */
  1132. cmpdi r8, 0
  1133. beq 25f
  1134. lwz r3, LPPACA_YIELDCOUNT(r8)
  1135. addi r3, r3, 1
  1136. stw r3, LPPACA_YIELDCOUNT(r8)
  1137. li r3, 1
  1138. stb r3, VCPU_VPA_DIRTY(r9)
  1139. 25:
  1140. /* Save PMU registers if requested */
  1141. /* r8 and cr0.eq are live here */
  1142. li r3, 1
  1143. sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */
  1144. mfspr r4, SPRN_MMCR0 /* save MMCR0 */
  1145. mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */
  1146. mfspr r6, SPRN_MMCRA
  1147. BEGIN_FTR_SECTION
  1148. /* On P7, clear MMCRA in order to disable SDAR updates */
  1149. li r7, 0
  1150. mtspr SPRN_MMCRA, r7
  1151. END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
  1152. isync
  1153. beq 21f /* if no VPA, save PMU stuff anyway */
  1154. lbz r7, LPPACA_PMCINUSE(r8)
  1155. cmpwi r7, 0 /* did they ask for PMU stuff to be saved? */
  1156. bne 21f
  1157. std r3, VCPU_MMCR(r9) /* if not, set saved MMCR0 to FC */
  1158. b 22f
  1159. 21: mfspr r5, SPRN_MMCR1
  1160. mfspr r7, SPRN_SIAR
  1161. mfspr r8, SPRN_SDAR
  1162. std r4, VCPU_MMCR(r9)
  1163. std r5, VCPU_MMCR + 8(r9)
  1164. std r6, VCPU_MMCR + 16(r9)
  1165. std r7, VCPU_SIAR(r9)
  1166. std r8, VCPU_SDAR(r9)
  1167. mfspr r3, SPRN_PMC1
  1168. mfspr r4, SPRN_PMC2
  1169. mfspr r5, SPRN_PMC3
  1170. mfspr r6, SPRN_PMC4
  1171. mfspr r7, SPRN_PMC5
  1172. mfspr r8, SPRN_PMC6
  1173. BEGIN_FTR_SECTION
  1174. mfspr r10, SPRN_PMC7
  1175. mfspr r11, SPRN_PMC8
  1176. END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
  1177. stw r3, VCPU_PMC(r9)
  1178. stw r4, VCPU_PMC + 4(r9)
  1179. stw r5, VCPU_PMC + 8(r9)
  1180. stw r6, VCPU_PMC + 12(r9)
  1181. stw r7, VCPU_PMC + 16(r9)
  1182. stw r8, VCPU_PMC + 20(r9)
  1183. BEGIN_FTR_SECTION
  1184. stw r10, VCPU_PMC + 24(r9)
  1185. stw r11, VCPU_PMC + 28(r9)
  1186. END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
  1187. 22:
  1188. ld r0, 112+PPC_LR_STKOFF(r1)
  1189. addi r1, r1, 112
  1190. mtlr r0
  1191. blr
  1192. secondary_too_late:
  1193. ld r5,HSTATE_KVM_VCORE(r13)
  1194. HMT_LOW
  1195. 13: lbz r3,VCORE_IN_GUEST(r5)
  1196. cmpwi r3,0
  1197. bne 13b
  1198. HMT_MEDIUM
  1199. li r0, KVM_GUEST_MODE_NONE
  1200. stb r0, HSTATE_IN_GUEST(r13)
  1201. ld r11,PACA_SLBSHADOWPTR(r13)
  1202. .rept SLB_NUM_BOLTED
  1203. ld r5,SLBSHADOW_SAVEAREA(r11)
  1204. ld r6,SLBSHADOW_SAVEAREA+8(r11)
  1205. andis. r7,r5,SLB_ESID_V@h
  1206. beq 1f
  1207. slbmte r6,r5
  1208. 1: addi r11,r11,16
  1209. .endr
  1210. b 22b
  1211. /*
  1212. * Check whether an HDSI is an HPTE not found fault or something else.
  1213. * If it is an HPTE not found fault that is due to the guest accessing
  1214. * a page that they have mapped but which we have paged out, then
  1215. * we continue on with the guest exit path. In all other cases,
  1216. * reflect the HDSI to the guest as a DSI.
  1217. */
  1218. kvmppc_hdsi:
  1219. mfspr r4, SPRN_HDAR
  1220. mfspr r6, SPRN_HDSISR
  1221. /* HPTE not found fault or protection fault? */
  1222. andis. r0, r6, (DSISR_NOHPTE | DSISR_PROTFAULT)@h
  1223. beq 1f /* if not, send it to the guest */
  1224. andi. r0, r11, MSR_DR /* data relocation enabled? */
  1225. beq 3f
  1226. clrrdi r0, r4, 28
  1227. PPC_SLBFEE_DOT(R5, R0) /* if so, look up SLB */
  1228. bne 1f /* if no SLB entry found */
  1229. 4: std r4, VCPU_FAULT_DAR(r9)
  1230. stw r6, VCPU_FAULT_DSISR(r9)
  1231. /* Search the hash table. */
  1232. mr r3, r9 /* vcpu pointer */
  1233. li r7, 1 /* data fault */
  1234. bl .kvmppc_hpte_hv_fault
  1235. ld r9, HSTATE_KVM_VCPU(r13)
  1236. ld r10, VCPU_PC(r9)
  1237. ld r11, VCPU_MSR(r9)
  1238. li r12, BOOK3S_INTERRUPT_H_DATA_STORAGE
  1239. cmpdi r3, 0 /* retry the instruction */
  1240. beq 6f
  1241. cmpdi r3, -1 /* handle in kernel mode */
  1242. beq guest_exit_cont
  1243. cmpdi r3, -2 /* MMIO emulation; need instr word */
  1244. beq 2f
  1245. /* Synthesize a DSI for the guest */
  1246. ld r4, VCPU_FAULT_DAR(r9)
  1247. mr r6, r3
  1248. 1: mtspr SPRN_DAR, r4
  1249. mtspr SPRN_DSISR, r6
  1250. mtspr SPRN_SRR0, r10
  1251. mtspr SPRN_SRR1, r11
  1252. li r10, BOOK3S_INTERRUPT_DATA_STORAGE
  1253. li r11, (MSR_ME << 1) | 1 /* synthesize MSR_SF | MSR_ME */
  1254. rotldi r11, r11, 63
  1255. fast_interrupt_c_return:
  1256. 6: ld r7, VCPU_CTR(r9)
  1257. lwz r8, VCPU_XER(r9)
  1258. mtctr r7
  1259. mtxer r8
  1260. mr r4, r9
  1261. b fast_guest_return
  1262. 3: ld r5, VCPU_KVM(r9) /* not relocated, use VRMA */
  1263. ld r5, KVM_VRMA_SLB_V(r5)
  1264. b 4b
  1265. /* If this is for emulated MMIO, load the instruction word */
  1266. 2: li r8, KVM_INST_FETCH_FAILED /* In case lwz faults */
  1267. /* Set guest mode to 'jump over instruction' so if lwz faults
  1268. * we'll just continue at the next IP. */
  1269. li r0, KVM_GUEST_MODE_SKIP
  1270. stb r0, HSTATE_IN_GUEST(r13)
  1271. /* Do the access with MSR:DR enabled */
  1272. mfmsr r3
  1273. ori r4, r3, MSR_DR /* Enable paging for data */
  1274. mtmsrd r4
  1275. lwz r8, 0(r10)
  1276. mtmsrd r3
  1277. /* Store the result */
  1278. stw r8, VCPU_LAST_INST(r9)
  1279. /* Unset guest mode. */
  1280. li r0, KVM_GUEST_MODE_HOST_HV
  1281. stb r0, HSTATE_IN_GUEST(r13)
  1282. b guest_exit_cont
  1283. /*
  1284. * Similarly for an HISI, reflect it to the guest as an ISI unless
  1285. * it is an HPTE not found fault for a page that we have paged out.
  1286. */
  1287. kvmppc_hisi:
  1288. andis. r0, r11, SRR1_ISI_NOPT@h
  1289. beq 1f
  1290. andi. r0, r11, MSR_IR /* instruction relocation enabled? */
  1291. beq 3f
  1292. clrrdi r0, r10, 28
  1293. PPC_SLBFEE_DOT(R5, R0) /* if so, look up SLB */
  1294. bne 1f /* if no SLB entry found */
  1295. 4:
  1296. /* Search the hash table. */
  1297. mr r3, r9 /* vcpu pointer */
  1298. mr r4, r10
  1299. mr r6, r11
  1300. li r7, 0 /* instruction fault */
  1301. bl .kvmppc_hpte_hv_fault
  1302. ld r9, HSTATE_KVM_VCPU(r13)
  1303. ld r10, VCPU_PC(r9)
  1304. ld r11, VCPU_MSR(r9)
  1305. li r12, BOOK3S_INTERRUPT_H_INST_STORAGE
  1306. cmpdi r3, 0 /* retry the instruction */
  1307. beq fast_interrupt_c_return
  1308. cmpdi r3, -1 /* handle in kernel mode */
  1309. beq guest_exit_cont
  1310. /* Synthesize an ISI for the guest */
  1311. mr r11, r3
  1312. 1: mtspr SPRN_SRR0, r10
  1313. mtspr SPRN_SRR1, r11
  1314. li r10, BOOK3S_INTERRUPT_INST_STORAGE
  1315. li r11, (MSR_ME << 1) | 1 /* synthesize MSR_SF | MSR_ME */
  1316. rotldi r11, r11, 63
  1317. b fast_interrupt_c_return
  1318. 3: ld r6, VCPU_KVM(r9) /* not relocated, use VRMA */
  1319. ld r5, KVM_VRMA_SLB_V(r6)
  1320. b 4b
  1321. /*
  1322. * Try to handle an hcall in real mode.
  1323. * Returns to the guest if we handle it, or continues on up to
  1324. * the kernel if we can't (i.e. if we don't have a handler for
  1325. * it, or if the handler returns H_TOO_HARD).
  1326. */
  1327. .globl hcall_try_real_mode
  1328. hcall_try_real_mode:
  1329. ld r3,VCPU_GPR(R3)(r9)
  1330. andi. r0,r11,MSR_PR
  1331. bne guest_exit_cont
  1332. clrrdi r3,r3,2
  1333. cmpldi r3,hcall_real_table_end - hcall_real_table
  1334. bge guest_exit_cont
  1335. LOAD_REG_ADDR(r4, hcall_real_table)
  1336. lwax r3,r3,r4
  1337. cmpwi r3,0
  1338. beq guest_exit_cont
  1339. add r3,r3,r4
  1340. mtctr r3
  1341. mr r3,r9 /* get vcpu pointer */
  1342. ld r4,VCPU_GPR(R4)(r9)
  1343. bctrl
  1344. cmpdi r3,H_TOO_HARD
  1345. beq hcall_real_fallback
  1346. ld r4,HSTATE_KVM_VCPU(r13)
  1347. std r3,VCPU_GPR(R3)(r4)
  1348. ld r10,VCPU_PC(r4)
  1349. ld r11,VCPU_MSR(r4)
  1350. b fast_guest_return
  1351. /* We've attempted a real mode hcall, but it's punted it back
  1352. * to userspace. We need to restore some clobbered volatiles
  1353. * before resuming the pass-it-to-qemu path */
  1354. hcall_real_fallback:
  1355. li r12,BOOK3S_INTERRUPT_SYSCALL
  1356. ld r9, HSTATE_KVM_VCPU(r13)
  1357. b guest_exit_cont
  1358. .globl hcall_real_table
  1359. hcall_real_table:
  1360. .long 0 /* 0 - unused */
  1361. .long .kvmppc_h_remove - hcall_real_table
  1362. .long .kvmppc_h_enter - hcall_real_table
  1363. .long .kvmppc_h_read - hcall_real_table
  1364. .long 0 /* 0x10 - H_CLEAR_MOD */
  1365. .long 0 /* 0x14 - H_CLEAR_REF */
  1366. .long .kvmppc_h_protect - hcall_real_table
  1367. .long 0 /* 0x1c - H_GET_TCE */
  1368. .long .kvmppc_h_put_tce - hcall_real_table
  1369. .long 0 /* 0x24 - H_SET_SPRG0 */
  1370. .long .kvmppc_h_set_dabr - hcall_real_table
  1371. .long 0 /* 0x2c */
  1372. .long 0 /* 0x30 */
  1373. .long 0 /* 0x34 */
  1374. .long 0 /* 0x38 */
  1375. .long 0 /* 0x3c */
  1376. .long 0 /* 0x40 */
  1377. .long 0 /* 0x44 */
  1378. .long 0 /* 0x48 */
  1379. .long 0 /* 0x4c */
  1380. .long 0 /* 0x50 */
  1381. .long 0 /* 0x54 */
  1382. .long 0 /* 0x58 */
  1383. .long 0 /* 0x5c */
  1384. .long 0 /* 0x60 */
  1385. #ifdef CONFIG_KVM_XICS
  1386. .long .kvmppc_rm_h_eoi - hcall_real_table
  1387. .long .kvmppc_rm_h_cppr - hcall_real_table
  1388. .long .kvmppc_rm_h_ipi - hcall_real_table
  1389. .long 0 /* 0x70 - H_IPOLL */
  1390. .long .kvmppc_rm_h_xirr - hcall_real_table
  1391. #else
  1392. .long 0 /* 0x64 - H_EOI */
  1393. .long 0 /* 0x68 - H_CPPR */
  1394. .long 0 /* 0x6c - H_IPI */
  1395. .long 0 /* 0x70 - H_IPOLL */
  1396. .long 0 /* 0x74 - H_XIRR */
  1397. #endif
  1398. .long 0 /* 0x78 */
  1399. .long 0 /* 0x7c */
  1400. .long 0 /* 0x80 */
  1401. .long 0 /* 0x84 */
  1402. .long 0 /* 0x88 */
  1403. .long 0 /* 0x8c */
  1404. .long 0 /* 0x90 */
  1405. .long 0 /* 0x94 */
  1406. .long 0 /* 0x98 */
  1407. .long 0 /* 0x9c */
  1408. .long 0 /* 0xa0 */
  1409. .long 0 /* 0xa4 */
  1410. .long 0 /* 0xa8 */
  1411. .long 0 /* 0xac */
  1412. .long 0 /* 0xb0 */
  1413. .long 0 /* 0xb4 */
  1414. .long 0 /* 0xb8 */
  1415. .long 0 /* 0xbc */
  1416. .long 0 /* 0xc0 */
  1417. .long 0 /* 0xc4 */
  1418. .long 0 /* 0xc8 */
  1419. .long 0 /* 0xcc */
  1420. .long 0 /* 0xd0 */
  1421. .long 0 /* 0xd4 */
  1422. .long 0 /* 0xd8 */
  1423. .long 0 /* 0xdc */
  1424. .long .kvmppc_h_cede - hcall_real_table
  1425. .long 0 /* 0xe4 */
  1426. .long 0 /* 0xe8 */
  1427. .long 0 /* 0xec */
  1428. .long 0 /* 0xf0 */
  1429. .long 0 /* 0xf4 */
  1430. .long 0 /* 0xf8 */
  1431. .long 0 /* 0xfc */
  1432. .long 0 /* 0x100 */
  1433. .long 0 /* 0x104 */
  1434. .long 0 /* 0x108 */
  1435. .long 0 /* 0x10c */
  1436. .long 0 /* 0x110 */
  1437. .long 0 /* 0x114 */
  1438. .long 0 /* 0x118 */
  1439. .long 0 /* 0x11c */
  1440. .long 0 /* 0x120 */
  1441. .long .kvmppc_h_bulk_remove - hcall_real_table
  1442. hcall_real_table_end:
  1443. ignore_hdec:
  1444. mr r4,r9
  1445. b fast_guest_return
  1446. _GLOBAL(kvmppc_h_set_dabr)
  1447. std r4,VCPU_DABR(r3)
  1448. /* Work around P7 bug where DABR can get corrupted on mtspr */
  1449. 1: mtspr SPRN_DABR,r4
  1450. mfspr r5, SPRN_DABR
  1451. cmpd r4, r5
  1452. bne 1b
  1453. isync
  1454. li r3,0
  1455. blr
  1456. _GLOBAL(kvmppc_h_cede)
  1457. ori r11,r11,MSR_EE
  1458. std r11,VCPU_MSR(r3)
  1459. li r0,1
  1460. stb r0,VCPU_CEDED(r3)
  1461. sync /* order setting ceded vs. testing prodded */
  1462. lbz r5,VCPU_PRODDED(r3)
  1463. cmpwi r5,0
  1464. bne kvm_cede_prodded
  1465. li r0,0 /* set trap to 0 to say hcall is handled */
  1466. stw r0,VCPU_TRAP(r3)
  1467. li r0,H_SUCCESS
  1468. std r0,VCPU_GPR(R3)(r3)
  1469. BEGIN_FTR_SECTION
  1470. b kvm_cede_exit /* just send it up to host on 970 */
  1471. END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206)
  1472. /*
  1473. * Set our bit in the bitmask of napping threads unless all the
  1474. * other threads are already napping, in which case we send this
  1475. * up to the host.
  1476. */
  1477. ld r5,HSTATE_KVM_VCORE(r13)
  1478. lwz r6,VCPU_PTID(r3)
  1479. lwz r8,VCORE_ENTRY_EXIT(r5)
  1480. clrldi r8,r8,56
  1481. li r0,1
  1482. sld r0,r0,r6
  1483. addi r6,r5,VCORE_NAPPING_THREADS
  1484. 31: lwarx r4,0,r6
  1485. or r4,r4,r0
  1486. PPC_POPCNTW(R7,R4)
  1487. cmpw r7,r8
  1488. bge kvm_cede_exit
  1489. stwcx. r4,0,r6
  1490. bne 31b
  1491. li r0,1
  1492. stb r0,HSTATE_NAPPING(r13)
  1493. /* order napping_threads update vs testing entry_exit_count */
  1494. lwsync
  1495. mr r4,r3
  1496. lwz r7,VCORE_ENTRY_EXIT(r5)
  1497. cmpwi r7,0x100
  1498. bge 33f /* another thread already exiting */
  1499. /*
  1500. * Although not specifically required by the architecture, POWER7
  1501. * preserves the following registers in nap mode, even if an SMT mode
  1502. * switch occurs: SLB entries, PURR, SPURR, AMOR, UAMOR, AMR, SPRG0-3,
  1503. * DAR, DSISR, DABR, DABRX, DSCR, PMCx, MMCRx, SIAR, SDAR.
  1504. */
  1505. /* Save non-volatile GPRs */
  1506. std r14, VCPU_GPR(R14)(r3)
  1507. std r15, VCPU_GPR(R15)(r3)
  1508. std r16, VCPU_GPR(R16)(r3)
  1509. std r17, VCPU_GPR(R17)(r3)
  1510. std r18, VCPU_GPR(R18)(r3)
  1511. std r19, VCPU_GPR(R19)(r3)
  1512. std r20, VCPU_GPR(R20)(r3)
  1513. std r21, VCPU_GPR(R21)(r3)
  1514. std r22, VCPU_GPR(R22)(r3)
  1515. std r23, VCPU_GPR(R23)(r3)
  1516. std r24, VCPU_GPR(R24)(r3)
  1517. std r25, VCPU_GPR(R25)(r3)
  1518. std r26, VCPU_GPR(R26)(r3)
  1519. std r27, VCPU_GPR(R27)(r3)
  1520. std r28, VCPU_GPR(R28)(r3)
  1521. std r29, VCPU_GPR(R29)(r3)
  1522. std r30, VCPU_GPR(R30)(r3)
  1523. std r31, VCPU_GPR(R31)(r3)
  1524. /* save FP state */
  1525. bl .kvmppc_save_fp
  1526. /*
  1527. * Take a nap until a decrementer or external interrupt occurs,
  1528. * with PECE1 (wake on decr) and PECE0 (wake on external) set in LPCR
  1529. */
  1530. li r0,1
  1531. stb r0,HSTATE_HWTHREAD_REQ(r13)
  1532. mfspr r5,SPRN_LPCR
  1533. ori r5,r5,LPCR_PECE0 | LPCR_PECE1
  1534. mtspr SPRN_LPCR,r5
  1535. isync
  1536. li r0, 0
  1537. std r0, HSTATE_SCRATCH0(r13)
  1538. ptesync
  1539. ld r0, HSTATE_SCRATCH0(r13)
  1540. 1: cmpd r0, r0
  1541. bne 1b
  1542. nap
  1543. b .
  1544. kvm_end_cede:
  1545. /* get vcpu pointer */
  1546. ld r4, HSTATE_KVM_VCPU(r13)
  1547. /* Woken by external or decrementer interrupt */
  1548. ld r1, HSTATE_HOST_R1(r13)
  1549. /* load up FP state */
  1550. bl kvmppc_load_fp
  1551. /* Load NV GPRS */
  1552. ld r14, VCPU_GPR(R14)(r4)
  1553. ld r15, VCPU_GPR(R15)(r4)
  1554. ld r16, VCPU_GPR(R16)(r4)
  1555. ld r17, VCPU_GPR(R17)(r4)
  1556. ld r18, VCPU_GPR(R18)(r4)
  1557. ld r19, VCPU_GPR(R19)(r4)
  1558. ld r20, VCPU_GPR(R20)(r4)
  1559. ld r21, VCPU_GPR(R21)(r4)
  1560. ld r22, VCPU_GPR(R22)(r4)
  1561. ld r23, VCPU_GPR(R23)(r4)
  1562. ld r24, VCPU_GPR(R24)(r4)
  1563. ld r25, VCPU_GPR(R25)(r4)
  1564. ld r26, VCPU_GPR(R26)(r4)
  1565. ld r27, VCPU_GPR(R27)(r4)
  1566. ld r28, VCPU_GPR(R28)(r4)
  1567. ld r29, VCPU_GPR(R29)(r4)
  1568. ld r30, VCPU_GPR(R30)(r4)
  1569. ld r31, VCPU_GPR(R31)(r4)
  1570. /* clear our bit in vcore->napping_threads */
  1571. 33: ld r5,HSTATE_KVM_VCORE(r13)
  1572. lwz r3,VCPU_PTID(r4)
  1573. li r0,1
  1574. sld r0,r0,r3
  1575. addi r6,r5,VCORE_NAPPING_THREADS
  1576. 32: lwarx r7,0,r6
  1577. andc r7,r7,r0
  1578. stwcx. r7,0,r6
  1579. bne 32b
  1580. li r0,0
  1581. stb r0,HSTATE_NAPPING(r13)
  1582. /* Check the wake reason in SRR1 to see why we got here */
  1583. mfspr r3, SPRN_SRR1
  1584. rlwinm r3, r3, 44-31, 0x7 /* extract wake reason field */
  1585. cmpwi r3, 4 /* was it an external interrupt? */
  1586. li r12, BOOK3S_INTERRUPT_EXTERNAL
  1587. mr r9, r4
  1588. ld r10, VCPU_PC(r9)
  1589. ld r11, VCPU_MSR(r9)
  1590. beq do_ext_interrupt /* if so */
  1591. /* see if any other thread is already exiting */
  1592. lwz r0,VCORE_ENTRY_EXIT(r5)
  1593. cmpwi r0,0x100
  1594. blt kvmppc_cede_reentry /* if not go back to guest */
  1595. /* some threads are exiting, so go to the guest exit path */
  1596. b hcall_real_fallback
  1597. /* cede when already previously prodded case */
  1598. kvm_cede_prodded:
  1599. li r0,0
  1600. stb r0,VCPU_PRODDED(r3)
  1601. sync /* order testing prodded vs. clearing ceded */
  1602. stb r0,VCPU_CEDED(r3)
  1603. li r3,H_SUCCESS
  1604. blr
  1605. /* we've ceded but we want to give control to the host */
  1606. kvm_cede_exit:
  1607. b hcall_real_fallback
  1608. /* Try to handle a machine check in real mode */
  1609. machine_check_realmode:
  1610. mr r3, r9 /* get vcpu pointer */
  1611. bl .kvmppc_realmode_machine_check
  1612. nop
  1613. cmpdi r3, 0 /* continue exiting from guest? */
  1614. ld r9, HSTATE_KVM_VCPU(r13)
  1615. li r12, BOOK3S_INTERRUPT_MACHINE_CHECK
  1616. beq mc_cont
  1617. /* If not, deliver a machine check. SRR0/1 are already set */
  1618. li r10, BOOK3S_INTERRUPT_MACHINE_CHECK
  1619. li r11, (MSR_ME << 1) | 1 /* synthesize MSR_SF | MSR_ME */
  1620. rotldi r11, r11, 63
  1621. b fast_interrupt_c_return
  1622. /*
  1623. * Determine what sort of external interrupt is pending (if any).
  1624. * Returns:
  1625. * 0 if no interrupt is pending
  1626. * 1 if an interrupt is pending that needs to be handled by the host
  1627. * -1 if there was a guest wakeup IPI (which has now been cleared)
  1628. */
  1629. kvmppc_read_intr:
  1630. /* see if a host IPI is pending */
  1631. li r3, 1
  1632. lbz r0, HSTATE_HOST_IPI(r13)
  1633. cmpwi r0, 0
  1634. bne 1f
  1635. /* Now read the interrupt from the ICP */
  1636. ld r6, HSTATE_XICS_PHYS(r13)
  1637. li r7, XICS_XIRR
  1638. cmpdi r6, 0
  1639. beq- 1f
  1640. lwzcix r0, r6, r7
  1641. rlwinm. r3, r0, 0, 0xffffff
  1642. sync
  1643. beq 1f /* if nothing pending in the ICP */
  1644. /* We found something in the ICP...
  1645. *
  1646. * If it's not an IPI, stash it in the PACA and return to
  1647. * the host, we don't (yet) handle directing real external
  1648. * interrupts directly to the guest
  1649. */
  1650. cmpwi r3, XICS_IPI /* if there is, is it an IPI? */
  1651. li r3, 1
  1652. bne 42f
  1653. /* It's an IPI, clear the MFRR and EOI it */
  1654. li r3, 0xff
  1655. li r8, XICS_MFRR
  1656. stbcix r3, r6, r8 /* clear the IPI */
  1657. stwcix r0, r6, r7 /* EOI it */
  1658. sync
  1659. /* We need to re-check host IPI now in case it got set in the
  1660. * meantime. If it's clear, we bounce the interrupt to the
  1661. * guest
  1662. */
  1663. lbz r0, HSTATE_HOST_IPI(r13)
  1664. cmpwi r0, 0
  1665. bne- 43f
  1666. /* OK, it's an IPI for us */
  1667. li r3, -1
  1668. 1: blr
  1669. 42: /* It's not an IPI and it's for the host, stash it in the PACA
  1670. * before exit, it will be picked up by the host ICP driver
  1671. */
  1672. stw r0, HSTATE_SAVED_XIRR(r13)
  1673. b 1b
  1674. 43: /* We raced with the host, we need to resend that IPI, bummer */
  1675. li r0, IPI_PRIORITY
  1676. stbcix r0, r6, r8 /* set the IPI */
  1677. sync
  1678. b 1b
  1679. /*
  1680. * Save away FP, VMX and VSX registers.
  1681. * r3 = vcpu pointer
  1682. */
  1683. _GLOBAL(kvmppc_save_fp)
  1684. mfmsr r5
  1685. ori r8,r5,MSR_FP
  1686. #ifdef CONFIG_ALTIVEC
  1687. BEGIN_FTR_SECTION
  1688. oris r8,r8,MSR_VEC@h
  1689. END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
  1690. #endif
  1691. #ifdef CONFIG_VSX
  1692. BEGIN_FTR_SECTION
  1693. oris r8,r8,MSR_VSX@h
  1694. END_FTR_SECTION_IFSET(CPU_FTR_VSX)
  1695. #endif
  1696. mtmsrd r8
  1697. isync
  1698. #ifdef CONFIG_VSX
  1699. BEGIN_FTR_SECTION
  1700. reg = 0
  1701. .rept 32
  1702. li r6,reg*16+VCPU_VSRS
  1703. STXVD2X(reg,R6,R3)
  1704. reg = reg + 1
  1705. .endr
  1706. FTR_SECTION_ELSE
  1707. #endif
  1708. reg = 0
  1709. .rept 32
  1710. stfd reg,reg*8+VCPU_FPRS(r3)
  1711. reg = reg + 1
  1712. .endr
  1713. #ifdef CONFIG_VSX
  1714. ALT_FTR_SECTION_END_IFSET(CPU_FTR_VSX)
  1715. #endif
  1716. mffs fr0
  1717. stfd fr0,VCPU_FPSCR(r3)
  1718. #ifdef CONFIG_ALTIVEC
  1719. BEGIN_FTR_SECTION
  1720. reg = 0
  1721. .rept 32
  1722. li r6,reg*16+VCPU_VRS
  1723. stvx reg,r6,r3
  1724. reg = reg + 1
  1725. .endr
  1726. mfvscr vr0
  1727. li r6,VCPU_VSCR
  1728. stvx vr0,r6,r3
  1729. END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
  1730. #endif
  1731. mfspr r6,SPRN_VRSAVE
  1732. stw r6,VCPU_VRSAVE(r3)
  1733. mtmsrd r5
  1734. isync
  1735. blr
  1736. /*
  1737. * Load up FP, VMX and VSX registers
  1738. * r4 = vcpu pointer
  1739. */
  1740. .globl kvmppc_load_fp
  1741. kvmppc_load_fp:
  1742. mfmsr r9
  1743. ori r8,r9,MSR_FP
  1744. #ifdef CONFIG_ALTIVEC
  1745. BEGIN_FTR_SECTION
  1746. oris r8,r8,MSR_VEC@h
  1747. END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
  1748. #endif
  1749. #ifdef CONFIG_VSX
  1750. BEGIN_FTR_SECTION
  1751. oris r8,r8,MSR_VSX@h
  1752. END_FTR_SECTION_IFSET(CPU_FTR_VSX)
  1753. #endif
  1754. mtmsrd r8
  1755. isync
  1756. lfd fr0,VCPU_FPSCR(r4)
  1757. MTFSF_L(fr0)
  1758. #ifdef CONFIG_VSX
  1759. BEGIN_FTR_SECTION
  1760. reg = 0
  1761. .rept 32
  1762. li r7,reg*16+VCPU_VSRS
  1763. LXVD2X(reg,R7,R4)
  1764. reg = reg + 1
  1765. .endr
  1766. FTR_SECTION_ELSE
  1767. #endif
  1768. reg = 0
  1769. .rept 32
  1770. lfd reg,reg*8+VCPU_FPRS(r4)
  1771. reg = reg + 1
  1772. .endr
  1773. #ifdef CONFIG_VSX
  1774. ALT_FTR_SECTION_END_IFSET(CPU_FTR_VSX)
  1775. #endif
  1776. #ifdef CONFIG_ALTIVEC
  1777. BEGIN_FTR_SECTION
  1778. li r7,VCPU_VSCR
  1779. lvx vr0,r7,r4
  1780. mtvscr vr0
  1781. reg = 0
  1782. .rept 32
  1783. li r7,reg*16+VCPU_VRS
  1784. lvx reg,r7,r4
  1785. reg = reg + 1
  1786. .endr
  1787. END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
  1788. #endif
  1789. lwz r7,VCPU_VRSAVE(r4)
  1790. mtspr SPRN_VRSAVE,r7
  1791. blr
  1792. /*
  1793. * We come here if we get any exception or interrupt while we are
  1794. * executing host real mode code while in guest MMU context.
  1795. * For now just spin, but we should do something better.
  1796. */
  1797. kvmppc_bad_host_intr:
  1798. b .