book3s_hv_rmhandlers.S 45 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952
  1. /*
  2. * This program is free software; you can redistribute it and/or modify
  3. * it under the terms of the GNU General Public License, version 2, as
  4. * published by the Free Software Foundation.
  5. *
  6. * This program is distributed in the hope that it will be useful,
  7. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  8. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  9. * GNU General Public License for more details.
  10. *
  11. * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
  12. *
  13. * Derived from book3s_rmhandlers.S and other files, which are:
  14. *
  15. * Copyright SUSE Linux Products GmbH 2009
  16. *
  17. * Authors: Alexander Graf <agraf@suse.de>
  18. */
  19. #include <asm/ppc_asm.h>
  20. #include <asm/kvm_asm.h>
  21. #include <asm/reg.h>
  22. #include <asm/mmu.h>
  23. #include <asm/page.h>
  24. #include <asm/ptrace.h>
  25. #include <asm/hvcall.h>
  26. #include <asm/asm-offsets.h>
  27. #include <asm/exception-64s.h>
  28. #include <asm/kvm_book3s_asm.h>
  29. #include <asm/mmu-hash64.h>
  30. #ifdef __LITTLE_ENDIAN__
  31. #error Need to fix lppaca and SLB shadow accesses in little endian mode
  32. #endif
  33. /*****************************************************************************
  34. * *
  35. * Real Mode handlers that need to be in the linear mapping *
  36. * *
  37. ****************************************************************************/
  38. .globl kvmppc_skip_interrupt
  39. kvmppc_skip_interrupt:
  40. mfspr r13,SPRN_SRR0
  41. addi r13,r13,4
  42. mtspr SPRN_SRR0,r13
  43. GET_SCRATCH0(r13)
  44. rfid
  45. b .
  46. .globl kvmppc_skip_Hinterrupt
  47. kvmppc_skip_Hinterrupt:
  48. mfspr r13,SPRN_HSRR0
  49. addi r13,r13,4
  50. mtspr SPRN_HSRR0,r13
  51. GET_SCRATCH0(r13)
  52. hrfid
  53. b .
  54. /*
  55. * Call kvmppc_hv_entry in real mode.
  56. * Must be called with interrupts hard-disabled.
  57. *
  58. * Input Registers:
  59. *
  60. * LR = return address to continue at after eventually re-enabling MMU
  61. */
  62. _GLOBAL(kvmppc_hv_entry_trampoline)
  63. mflr r0
  64. std r0, PPC_LR_STKOFF(r1)
  65. stdu r1, -112(r1)
  66. mfmsr r10
  67. LOAD_REG_ADDR(r5, kvmppc_call_hv_entry)
  68. li r0,MSR_RI
  69. andc r0,r10,r0
  70. li r6,MSR_IR | MSR_DR
  71. andc r6,r10,r6
  72. mtmsrd r0,1 /* clear RI in MSR */
  73. mtsrr0 r5
  74. mtsrr1 r6
  75. RFI
  76. kvmppc_call_hv_entry:
  77. bl kvmppc_hv_entry
  78. /* Back from guest - restore host state and return to caller */
  79. /* Restore host DABR and DABRX */
  80. ld r5,HSTATE_DABR(r13)
  81. li r6,7
  82. mtspr SPRN_DABR,r5
  83. mtspr SPRN_DABRX,r6
  84. /* Restore SPRG3 */
  85. ld r3,PACA_SPRG3(r13)
  86. mtspr SPRN_SPRG3,r3
  87. /*
  88. * Reload DEC. HDEC interrupts were disabled when
  89. * we reloaded the host's LPCR value.
  90. */
  91. ld r3, HSTATE_DECEXP(r13)
  92. mftb r4
  93. subf r4, r4, r3
  94. mtspr SPRN_DEC, r4
  95. /* Reload the host's PMU registers */
  96. ld r3, PACALPPACAPTR(r13) /* is the host using the PMU? */
  97. lbz r4, LPPACA_PMCINUSE(r3)
  98. cmpwi r4, 0
  99. beq 23f /* skip if not */
  100. lwz r3, HSTATE_PMC(r13)
  101. lwz r4, HSTATE_PMC + 4(r13)
  102. lwz r5, HSTATE_PMC + 8(r13)
  103. lwz r6, HSTATE_PMC + 12(r13)
  104. lwz r8, HSTATE_PMC + 16(r13)
  105. lwz r9, HSTATE_PMC + 20(r13)
  106. BEGIN_FTR_SECTION
  107. lwz r10, HSTATE_PMC + 24(r13)
  108. lwz r11, HSTATE_PMC + 28(r13)
  109. END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
  110. mtspr SPRN_PMC1, r3
  111. mtspr SPRN_PMC2, r4
  112. mtspr SPRN_PMC3, r5
  113. mtspr SPRN_PMC4, r6
  114. mtspr SPRN_PMC5, r8
  115. mtspr SPRN_PMC6, r9
  116. BEGIN_FTR_SECTION
  117. mtspr SPRN_PMC7, r10
  118. mtspr SPRN_PMC8, r11
  119. END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
  120. ld r3, HSTATE_MMCR(r13)
  121. ld r4, HSTATE_MMCR + 8(r13)
  122. ld r5, HSTATE_MMCR + 16(r13)
  123. mtspr SPRN_MMCR1, r4
  124. mtspr SPRN_MMCRA, r5
  125. mtspr SPRN_MMCR0, r3
  126. isync
  127. 23:
  128. /*
  129. * For external and machine check interrupts, we need
  130. * to call the Linux handler to process the interrupt.
  131. * We do that by jumping to absolute address 0x500 for
  132. * external interrupts, or the machine_check_fwnmi label
  133. * for machine checks (since firmware might have patched
  134. * the vector area at 0x200). The [h]rfid at the end of the
  135. * handler will return to the book3s_hv_interrupts.S code.
  136. * For other interrupts we do the rfid to get back
  137. * to the book3s_hv_interrupts.S code here.
  138. */
  139. ld r8, 112+PPC_LR_STKOFF(r1)
  140. addi r1, r1, 112
  141. ld r7, HSTATE_HOST_MSR(r13)
  142. cmpwi cr1, r12, BOOK3S_INTERRUPT_MACHINE_CHECK
  143. cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL
  144. BEGIN_FTR_SECTION
  145. beq 11f
  146. END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
  147. /* RFI into the highmem handler, or branch to interrupt handler */
  148. mfmsr r6
  149. li r0, MSR_RI
  150. andc r6, r6, r0
  151. mtmsrd r6, 1 /* Clear RI in MSR */
  152. mtsrr0 r8
  153. mtsrr1 r7
  154. beqa 0x500 /* external interrupt (PPC970) */
  155. beq cr1, 13f /* machine check */
  156. RFI
  157. /* On POWER7, we have external interrupts set to use HSRR0/1 */
  158. 11: mtspr SPRN_HSRR0, r8
  159. mtspr SPRN_HSRR1, r7
  160. ba 0x500
  161. 13: b machine_check_fwnmi
  162. /*
  163. * We come in here when wakened from nap mode on a secondary hw thread.
  164. * Relocation is off and most register values are lost.
  165. * r13 points to the PACA.
  166. */
  167. .globl kvm_start_guest
  168. kvm_start_guest:
  169. ld r1,PACAEMERGSP(r13)
  170. subi r1,r1,STACK_FRAME_OVERHEAD
  171. ld r2,PACATOC(r13)
  172. li r0,KVM_HWTHREAD_IN_KVM
  173. stb r0,HSTATE_HWTHREAD_STATE(r13)
  174. /* NV GPR values from power7_idle() will no longer be valid */
  175. li r0,1
  176. stb r0,PACA_NAPSTATELOST(r13)
  177. /* were we napping due to cede? */
  178. lbz r0,HSTATE_NAPPING(r13)
  179. cmpwi r0,0
  180. bne kvm_end_cede
  181. /*
  182. * We weren't napping due to cede, so this must be a secondary
  183. * thread being woken up to run a guest, or being woken up due
  184. * to a stray IPI. (Or due to some machine check or hypervisor
  185. * maintenance interrupt while the core is in KVM.)
  186. */
  187. /* Check the wake reason in SRR1 to see why we got here */
  188. mfspr r3,SPRN_SRR1
  189. rlwinm r3,r3,44-31,0x7 /* extract wake reason field */
  190. cmpwi r3,4 /* was it an external interrupt? */
  191. bne 27f /* if not */
  192. ld r5,HSTATE_XICS_PHYS(r13)
  193. li r7,XICS_XIRR /* if it was an external interrupt, */
  194. lwzcix r8,r5,r7 /* get and ack the interrupt */
  195. sync
  196. clrldi. r9,r8,40 /* get interrupt source ID. */
  197. beq 28f /* none there? */
  198. cmpwi r9,XICS_IPI /* was it an IPI? */
  199. bne 29f
  200. li r0,0xff
  201. li r6,XICS_MFRR
  202. stbcix r0,r5,r6 /* clear IPI */
  203. stwcix r8,r5,r7 /* EOI the interrupt */
  204. sync /* order loading of vcpu after that */
  205. /* get vcpu pointer, NULL if we have no vcpu to run */
  206. ld r4,HSTATE_KVM_VCPU(r13)
  207. cmpdi r4,0
  208. /* if we have no vcpu to run, go back to sleep */
  209. beq kvm_no_guest
  210. b 30f
  211. 27: /* XXX should handle hypervisor maintenance interrupts etc. here */
  212. b kvm_no_guest
  213. 28: /* SRR1 said external but ICP said nope?? */
  214. b kvm_no_guest
  215. 29: /* External non-IPI interrupt to offline secondary thread? help?? */
  216. stw r8,HSTATE_SAVED_XIRR(r13)
  217. b kvm_no_guest
  218. 30: bl kvmppc_hv_entry
  219. /* Back from the guest, go back to nap */
  220. /* Clear our vcpu pointer so we don't come back in early */
  221. li r0, 0
  222. std r0, HSTATE_KVM_VCPU(r13)
  223. lwsync
  224. /* Clear any pending IPI - we're an offline thread */
  225. ld r5, HSTATE_XICS_PHYS(r13)
  226. li r7, XICS_XIRR
  227. lwzcix r3, r5, r7 /* ack any pending interrupt */
  228. rlwinm. r0, r3, 0, 0xffffff /* any pending? */
  229. beq 37f
  230. sync
  231. li r0, 0xff
  232. li r6, XICS_MFRR
  233. stbcix r0, r5, r6 /* clear the IPI */
  234. stwcix r3, r5, r7 /* EOI it */
  235. 37: sync
  236. /* increment the nap count and then go to nap mode */
  237. ld r4, HSTATE_KVM_VCORE(r13)
  238. addi r4, r4, VCORE_NAP_COUNT
  239. lwsync /* make previous updates visible */
  240. 51: lwarx r3, 0, r4
  241. addi r3, r3, 1
  242. stwcx. r3, 0, r4
  243. bne 51b
  244. kvm_no_guest:
  245. li r0, KVM_HWTHREAD_IN_NAP
  246. stb r0, HSTATE_HWTHREAD_STATE(r13)
  247. li r3, LPCR_PECE0
  248. mfspr r4, SPRN_LPCR
  249. rlwimi r4, r3, 0, LPCR_PECE0 | LPCR_PECE1
  250. mtspr SPRN_LPCR, r4
  251. isync
  252. std r0, HSTATE_SCRATCH0(r13)
  253. ptesync
  254. ld r0, HSTATE_SCRATCH0(r13)
  255. 1: cmpd r0, r0
  256. bne 1b
  257. nap
  258. b .
  259. /******************************************************************************
  260. * *
  261. * Entry code *
  262. * *
  263. *****************************************************************************/
  264. .global kvmppc_hv_entry
  265. kvmppc_hv_entry:
  266. /* Required state:
  267. *
  268. * R4 = vcpu pointer
  269. * MSR = ~IR|DR
  270. * R13 = PACA
  271. * R1 = host R1
  272. * all other volatile GPRS = free
  273. */
  274. mflr r0
  275. std r0, PPC_LR_STKOFF(r1)
  276. stdu r1, -112(r1)
  277. /* Set partition DABR */
  278. /* Do this before re-enabling PMU to avoid P7 DABR corruption bug */
  279. li r5,3
  280. ld r6,VCPU_DABR(r4)
  281. mtspr SPRN_DABRX,r5
  282. mtspr SPRN_DABR,r6
  283. BEGIN_FTR_SECTION
  284. isync
  285. END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
  286. /* Load guest PMU registers */
  287. /* R4 is live here (vcpu pointer) */
  288. li r3, 1
  289. sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */
  290. mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */
  291. isync
  292. lwz r3, VCPU_PMC(r4) /* always load up guest PMU registers */
  293. lwz r5, VCPU_PMC + 4(r4) /* to prevent information leak */
  294. lwz r6, VCPU_PMC + 8(r4)
  295. lwz r7, VCPU_PMC + 12(r4)
  296. lwz r8, VCPU_PMC + 16(r4)
  297. lwz r9, VCPU_PMC + 20(r4)
  298. BEGIN_FTR_SECTION
  299. lwz r10, VCPU_PMC + 24(r4)
  300. lwz r11, VCPU_PMC + 28(r4)
  301. END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
  302. mtspr SPRN_PMC1, r3
  303. mtspr SPRN_PMC2, r5
  304. mtspr SPRN_PMC3, r6
  305. mtspr SPRN_PMC4, r7
  306. mtspr SPRN_PMC5, r8
  307. mtspr SPRN_PMC6, r9
  308. BEGIN_FTR_SECTION
  309. mtspr SPRN_PMC7, r10
  310. mtspr SPRN_PMC8, r11
  311. END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
  312. ld r3, VCPU_MMCR(r4)
  313. ld r5, VCPU_MMCR + 8(r4)
  314. ld r6, VCPU_MMCR + 16(r4)
  315. ld r7, VCPU_SIAR(r4)
  316. ld r8, VCPU_SDAR(r4)
  317. mtspr SPRN_MMCR1, r5
  318. mtspr SPRN_MMCRA, r6
  319. mtspr SPRN_SIAR, r7
  320. mtspr SPRN_SDAR, r8
  321. mtspr SPRN_MMCR0, r3
  322. isync
  323. /* Load up FP, VMX and VSX registers */
  324. bl kvmppc_load_fp
  325. ld r14, VCPU_GPR(R14)(r4)
  326. ld r15, VCPU_GPR(R15)(r4)
  327. ld r16, VCPU_GPR(R16)(r4)
  328. ld r17, VCPU_GPR(R17)(r4)
  329. ld r18, VCPU_GPR(R18)(r4)
  330. ld r19, VCPU_GPR(R19)(r4)
  331. ld r20, VCPU_GPR(R20)(r4)
  332. ld r21, VCPU_GPR(R21)(r4)
  333. ld r22, VCPU_GPR(R22)(r4)
  334. ld r23, VCPU_GPR(R23)(r4)
  335. ld r24, VCPU_GPR(R24)(r4)
  336. ld r25, VCPU_GPR(R25)(r4)
  337. ld r26, VCPU_GPR(R26)(r4)
  338. ld r27, VCPU_GPR(R27)(r4)
  339. ld r28, VCPU_GPR(R28)(r4)
  340. ld r29, VCPU_GPR(R29)(r4)
  341. ld r30, VCPU_GPR(R30)(r4)
  342. ld r31, VCPU_GPR(R31)(r4)
  343. BEGIN_FTR_SECTION
  344. /* Switch DSCR to guest value */
  345. ld r5, VCPU_DSCR(r4)
  346. mtspr SPRN_DSCR, r5
  347. END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
  348. /*
  349. * Set the decrementer to the guest decrementer.
  350. */
  351. ld r8,VCPU_DEC_EXPIRES(r4)
  352. mftb r7
  353. subf r3,r7,r8
  354. mtspr SPRN_DEC,r3
  355. stw r3,VCPU_DEC(r4)
  356. ld r5, VCPU_SPRG0(r4)
  357. ld r6, VCPU_SPRG1(r4)
  358. ld r7, VCPU_SPRG2(r4)
  359. ld r8, VCPU_SPRG3(r4)
  360. mtspr SPRN_SPRG0, r5
  361. mtspr SPRN_SPRG1, r6
  362. mtspr SPRN_SPRG2, r7
  363. mtspr SPRN_SPRG3, r8
  364. /* Save R1 in the PACA */
  365. std r1, HSTATE_HOST_R1(r13)
  366. /* Increment yield count if they have a VPA */
  367. ld r3, VCPU_VPA(r4)
  368. cmpdi r3, 0
  369. beq 25f
  370. lwz r5, LPPACA_YIELDCOUNT(r3)
  371. addi r5, r5, 1
  372. stw r5, LPPACA_YIELDCOUNT(r3)
  373. li r6, 1
  374. stb r6, VCPU_VPA_DIRTY(r4)
  375. 25:
  376. /* Load up DAR and DSISR */
  377. ld r5, VCPU_DAR(r4)
  378. lwz r6, VCPU_DSISR(r4)
  379. mtspr SPRN_DAR, r5
  380. mtspr SPRN_DSISR, r6
  381. BEGIN_FTR_SECTION
  382. /* Restore AMR and UAMOR, set AMOR to all 1s */
  383. ld r5,VCPU_AMR(r4)
  384. ld r6,VCPU_UAMOR(r4)
  385. li r7,-1
  386. mtspr SPRN_AMR,r5
  387. mtspr SPRN_UAMOR,r6
  388. mtspr SPRN_AMOR,r7
  389. END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
  390. /* Clear out SLB */
  391. li r6,0
  392. slbmte r6,r6
  393. slbia
  394. ptesync
  395. BEGIN_FTR_SECTION
  396. b 30f
  397. END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
  398. /*
  399. * POWER7 host -> guest partition switch code.
  400. * We don't have to lock against concurrent tlbies,
  401. * but we do have to coordinate across hardware threads.
  402. */
  403. /* Increment entry count iff exit count is zero. */
  404. ld r5,HSTATE_KVM_VCORE(r13)
  405. addi r9,r5,VCORE_ENTRY_EXIT
  406. 21: lwarx r3,0,r9
  407. cmpwi r3,0x100 /* any threads starting to exit? */
  408. bge secondary_too_late /* if so we're too late to the party */
  409. addi r3,r3,1
  410. stwcx. r3,0,r9
  411. bne 21b
  412. /* Primary thread switches to guest partition. */
  413. ld r9,VCPU_KVM(r4) /* pointer to struct kvm */
  414. lwz r6,VCPU_PTID(r4)
  415. cmpwi r6,0
  416. bne 20f
  417. ld r6,KVM_SDR1(r9)
  418. lwz r7,KVM_LPID(r9)
  419. li r0,LPID_RSVD /* switch to reserved LPID */
  420. mtspr SPRN_LPID,r0
  421. ptesync
  422. mtspr SPRN_SDR1,r6 /* switch to partition page table */
  423. mtspr SPRN_LPID,r7
  424. isync
  425. /* See if we need to flush the TLB */
  426. lhz r6,PACAPACAINDEX(r13) /* test_bit(cpu, need_tlb_flush) */
  427. clrldi r7,r6,64-6 /* extract bit number (6 bits) */
  428. srdi r6,r6,6 /* doubleword number */
  429. sldi r6,r6,3 /* address offset */
  430. add r6,r6,r9
  431. addi r6,r6,KVM_NEED_FLUSH /* dword in kvm->arch.need_tlb_flush */
  432. li r0,1
  433. sld r0,r0,r7
  434. ld r7,0(r6)
  435. and. r7,r7,r0
  436. beq 22f
  437. 23: ldarx r7,0,r6 /* if set, clear the bit */
  438. andc r7,r7,r0
  439. stdcx. r7,0,r6
  440. bne 23b
  441. li r6,128 /* and flush the TLB */
  442. mtctr r6
  443. li r7,0x800 /* IS field = 0b10 */
  444. ptesync
  445. 28: tlbiel r7
  446. addi r7,r7,0x1000
  447. bdnz 28b
  448. ptesync
  449. /* Add timebase offset onto timebase */
  450. 22: ld r8,VCORE_TB_OFFSET(r5)
  451. cmpdi r8,0
  452. beq 37f
  453. mftb r6 /* current host timebase */
  454. add r8,r8,r6
  455. mtspr SPRN_TBU40,r8 /* update upper 40 bits */
  456. mftb r7 /* check if lower 24 bits overflowed */
  457. clrldi r6,r6,40
  458. clrldi r7,r7,40
  459. cmpld r7,r6
  460. bge 37f
  461. addis r8,r8,0x100 /* if so, increment upper 40 bits */
  462. mtspr SPRN_TBU40,r8
  463. 37: li r0,1
  464. stb r0,VCORE_IN_GUEST(r5) /* signal secondaries to continue */
  465. b 10f
  466. /* Secondary threads wait for primary to have done partition switch */
  467. 20: lbz r0,VCORE_IN_GUEST(r5)
  468. cmpwi r0,0
  469. beq 20b
  470. /* Set LPCR and RMOR. */
  471. 10: ld r8,KVM_LPCR(r9)
  472. mtspr SPRN_LPCR,r8
  473. ld r8,KVM_RMOR(r9)
  474. mtspr SPRN_RMOR,r8
  475. isync
  476. /* Check if HDEC expires soon */
  477. mfspr r3,SPRN_HDEC
  478. cmpwi r3,10
  479. li r12,BOOK3S_INTERRUPT_HV_DECREMENTER
  480. mr r9,r4
  481. blt hdec_soon
  482. /* Save purr/spurr */
  483. mfspr r5,SPRN_PURR
  484. mfspr r6,SPRN_SPURR
  485. std r5,HSTATE_PURR(r13)
  486. std r6,HSTATE_SPURR(r13)
  487. ld r7,VCPU_PURR(r4)
  488. ld r8,VCPU_SPURR(r4)
  489. mtspr SPRN_PURR,r7
  490. mtspr SPRN_SPURR,r8
  491. b 31f
  492. /*
  493. * PPC970 host -> guest partition switch code.
  494. * We have to lock against concurrent tlbies,
  495. * using native_tlbie_lock to lock against host tlbies
  496. * and kvm->arch.tlbie_lock to lock against guest tlbies.
  497. * We also have to invalidate the TLB since its
  498. * entries aren't tagged with the LPID.
  499. */
  500. 30: ld r9,VCPU_KVM(r4) /* pointer to struct kvm */
  501. /* first take native_tlbie_lock */
  502. .section ".toc","aw"
  503. toc_tlbie_lock:
  504. .tc native_tlbie_lock[TC],native_tlbie_lock
  505. .previous
  506. ld r3,toc_tlbie_lock@toc(2)
  507. #ifdef __BIG_ENDIAN__
  508. lwz r8,PACA_LOCK_TOKEN(r13)
  509. #else
  510. lwz r8,PACAPACAINDEX(r13)
  511. #endif
  512. 24: lwarx r0,0,r3
  513. cmpwi r0,0
  514. bne 24b
  515. stwcx. r8,0,r3
  516. bne 24b
  517. isync
  518. ld r7,KVM_LPCR(r9) /* use kvm->arch.lpcr to store HID4 */
  519. li r0,0x18f
  520. rotldi r0,r0,HID4_LPID5_SH /* all lpid bits in HID4 = 1 */
  521. or r0,r7,r0
  522. ptesync
  523. sync
  524. mtspr SPRN_HID4,r0 /* switch to reserved LPID */
  525. isync
  526. li r0,0
  527. stw r0,0(r3) /* drop native_tlbie_lock */
  528. /* invalidate the whole TLB */
  529. li r0,256
  530. mtctr r0
  531. li r6,0
  532. 25: tlbiel r6
  533. addi r6,r6,0x1000
  534. bdnz 25b
  535. ptesync
  536. /* Take the guest's tlbie_lock */
  537. addi r3,r9,KVM_TLBIE_LOCK
  538. 24: lwarx r0,0,r3
  539. cmpwi r0,0
  540. bne 24b
  541. stwcx. r8,0,r3
  542. bne 24b
  543. isync
  544. ld r6,KVM_SDR1(r9)
  545. mtspr SPRN_SDR1,r6 /* switch to partition page table */
  546. /* Set up HID4 with the guest's LPID etc. */
  547. sync
  548. mtspr SPRN_HID4,r7
  549. isync
  550. /* drop the guest's tlbie_lock */
  551. li r0,0
  552. stw r0,0(r3)
  553. /* Check if HDEC expires soon */
  554. mfspr r3,SPRN_HDEC
  555. cmpwi r3,10
  556. li r12,BOOK3S_INTERRUPT_HV_DECREMENTER
  557. mr r9,r4
  558. blt hdec_soon
  559. /* Enable HDEC interrupts */
  560. mfspr r0,SPRN_HID0
  561. li r3,1
  562. rldimi r0,r3, HID0_HDICE_SH, 64-HID0_HDICE_SH-1
  563. sync
  564. mtspr SPRN_HID0,r0
  565. mfspr r0,SPRN_HID0
  566. mfspr r0,SPRN_HID0
  567. mfspr r0,SPRN_HID0
  568. mfspr r0,SPRN_HID0
  569. mfspr r0,SPRN_HID0
  570. mfspr r0,SPRN_HID0
  571. /* Load up guest SLB entries */
  572. 31: lwz r5,VCPU_SLB_MAX(r4)
  573. cmpwi r5,0
  574. beq 9f
  575. mtctr r5
  576. addi r6,r4,VCPU_SLB
  577. 1: ld r8,VCPU_SLB_E(r6)
  578. ld r9,VCPU_SLB_V(r6)
  579. slbmte r9,r8
  580. addi r6,r6,VCPU_SLB_SIZE
  581. bdnz 1b
  582. 9:
  583. /* Restore state of CTRL run bit; assume 1 on entry */
  584. lwz r5,VCPU_CTRL(r4)
  585. andi. r5,r5,1
  586. bne 4f
  587. mfspr r6,SPRN_CTRLF
  588. clrrdi r6,r6,1
  589. mtspr SPRN_CTRLT,r6
  590. 4:
  591. ld r6, VCPU_CTR(r4)
  592. lwz r7, VCPU_XER(r4)
  593. mtctr r6
  594. mtxer r7
  595. ld r10, VCPU_PC(r4)
  596. ld r11, VCPU_MSR(r4)
  597. kvmppc_cede_reentry: /* r4 = vcpu, r13 = paca */
  598. ld r6, VCPU_SRR0(r4)
  599. ld r7, VCPU_SRR1(r4)
  600. /* r11 = vcpu->arch.msr & ~MSR_HV */
  601. rldicl r11, r11, 63 - MSR_HV_LG, 1
  602. rotldi r11, r11, 1 + MSR_HV_LG
  603. ori r11, r11, MSR_ME
  604. /* Check if we can deliver an external or decrementer interrupt now */
  605. ld r0,VCPU_PENDING_EXC(r4)
  606. lis r8,(1 << BOOK3S_IRQPRIO_EXTERNAL_LEVEL)@h
  607. and r0,r0,r8
  608. cmpdi cr1,r0,0
  609. andi. r0,r11,MSR_EE
  610. beq cr1,11f
  611. BEGIN_FTR_SECTION
  612. mfspr r8,SPRN_LPCR
  613. ori r8,r8,LPCR_MER
  614. mtspr SPRN_LPCR,r8
  615. isync
  616. END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
  617. beq 5f
  618. li r0,BOOK3S_INTERRUPT_EXTERNAL
  619. 12: mr r6,r10
  620. mr r10,r0
  621. mr r7,r11
  622. li r11,(MSR_ME << 1) | 1 /* synthesize MSR_SF | MSR_ME */
  623. rotldi r11,r11,63
  624. b 5f
  625. 11: beq 5f
  626. mfspr r0,SPRN_DEC
  627. cmpwi r0,0
  628. li r0,BOOK3S_INTERRUPT_DECREMENTER
  629. blt 12b
  630. /* Move SRR0 and SRR1 into the respective regs */
  631. 5: mtspr SPRN_SRR0, r6
  632. mtspr SPRN_SRR1, r7
  633. fast_guest_return:
  634. li r0,0
  635. stb r0,VCPU_CEDED(r4) /* cancel cede */
  636. mtspr SPRN_HSRR0,r10
  637. mtspr SPRN_HSRR1,r11
  638. /* Activate guest mode, so faults get handled by KVM */
  639. li r9, KVM_GUEST_MODE_GUEST
  640. stb r9, HSTATE_IN_GUEST(r13)
  641. /* Enter guest */
  642. BEGIN_FTR_SECTION
  643. ld r5, VCPU_CFAR(r4)
  644. mtspr SPRN_CFAR, r5
  645. END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
  646. ld r5, VCPU_LR(r4)
  647. lwz r6, VCPU_CR(r4)
  648. mtlr r5
  649. mtcr r6
  650. ld r0, VCPU_GPR(R0)(r4)
  651. ld r1, VCPU_GPR(R1)(r4)
  652. ld r2, VCPU_GPR(R2)(r4)
  653. ld r3, VCPU_GPR(R3)(r4)
  654. ld r5, VCPU_GPR(R5)(r4)
  655. ld r6, VCPU_GPR(R6)(r4)
  656. ld r7, VCPU_GPR(R7)(r4)
  657. ld r8, VCPU_GPR(R8)(r4)
  658. ld r9, VCPU_GPR(R9)(r4)
  659. ld r10, VCPU_GPR(R10)(r4)
  660. ld r11, VCPU_GPR(R11)(r4)
  661. ld r12, VCPU_GPR(R12)(r4)
  662. ld r13, VCPU_GPR(R13)(r4)
  663. ld r4, VCPU_GPR(R4)(r4)
  664. hrfid
  665. b .
  666. /******************************************************************************
  667. * *
  668. * Exit code *
  669. * *
  670. *****************************************************************************/
  671. /*
  672. * We come here from the first-level interrupt handlers.
  673. */
  674. .globl kvmppc_interrupt
  675. kvmppc_interrupt:
  676. /*
  677. * Register contents:
  678. * R12 = interrupt vector
  679. * R13 = PACA
  680. * guest CR, R12 saved in shadow VCPU SCRATCH1/0
  681. * guest R13 saved in SPRN_SCRATCH0
  682. */
  683. /* abuse host_r2 as third scratch area; we get r2 from PACATOC(r13) */
  684. std r9, HSTATE_HOST_R2(r13)
  685. ld r9, HSTATE_KVM_VCPU(r13)
  686. /* Save registers */
  687. std r0, VCPU_GPR(R0)(r9)
  688. std r1, VCPU_GPR(R1)(r9)
  689. std r2, VCPU_GPR(R2)(r9)
  690. std r3, VCPU_GPR(R3)(r9)
  691. std r4, VCPU_GPR(R4)(r9)
  692. std r5, VCPU_GPR(R5)(r9)
  693. std r6, VCPU_GPR(R6)(r9)
  694. std r7, VCPU_GPR(R7)(r9)
  695. std r8, VCPU_GPR(R8)(r9)
  696. ld r0, HSTATE_HOST_R2(r13)
  697. std r0, VCPU_GPR(R9)(r9)
  698. std r10, VCPU_GPR(R10)(r9)
  699. std r11, VCPU_GPR(R11)(r9)
  700. ld r3, HSTATE_SCRATCH0(r13)
  701. lwz r4, HSTATE_SCRATCH1(r13)
  702. std r3, VCPU_GPR(R12)(r9)
  703. stw r4, VCPU_CR(r9)
  704. BEGIN_FTR_SECTION
  705. ld r3, HSTATE_CFAR(r13)
  706. std r3, VCPU_CFAR(r9)
  707. END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
  708. /* Restore R1/R2 so we can handle faults */
  709. ld r1, HSTATE_HOST_R1(r13)
  710. ld r2, PACATOC(r13)
  711. mfspr r10, SPRN_SRR0
  712. mfspr r11, SPRN_SRR1
  713. std r10, VCPU_SRR0(r9)
  714. std r11, VCPU_SRR1(r9)
  715. andi. r0, r12, 2 /* need to read HSRR0/1? */
  716. beq 1f
  717. mfspr r10, SPRN_HSRR0
  718. mfspr r11, SPRN_HSRR1
  719. clrrdi r12, r12, 2
  720. 1: std r10, VCPU_PC(r9)
  721. std r11, VCPU_MSR(r9)
  722. GET_SCRATCH0(r3)
  723. mflr r4
  724. std r3, VCPU_GPR(R13)(r9)
  725. std r4, VCPU_LR(r9)
  726. /* Unset guest mode */
  727. li r0, KVM_GUEST_MODE_NONE
  728. stb r0, HSTATE_IN_GUEST(r13)
  729. stw r12,VCPU_TRAP(r9)
  730. /* Save HEIR (HV emulation assist reg) in last_inst
  731. if this is an HEI (HV emulation interrupt, e40) */
  732. li r3,KVM_INST_FETCH_FAILED
  733. BEGIN_FTR_SECTION
  734. cmpwi r12,BOOK3S_INTERRUPT_H_EMUL_ASSIST
  735. bne 11f
  736. mfspr r3,SPRN_HEIR
  737. END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
  738. 11: stw r3,VCPU_LAST_INST(r9)
  739. /* these are volatile across C function calls */
  740. mfctr r3
  741. mfxer r4
  742. std r3, VCPU_CTR(r9)
  743. stw r4, VCPU_XER(r9)
  744. BEGIN_FTR_SECTION
  745. /* If this is a page table miss then see if it's theirs or ours */
  746. cmpwi r12, BOOK3S_INTERRUPT_H_DATA_STORAGE
  747. beq kvmppc_hdsi
  748. cmpwi r12, BOOK3S_INTERRUPT_H_INST_STORAGE
  749. beq kvmppc_hisi
  750. END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
  751. /* See if this is a leftover HDEC interrupt */
  752. cmpwi r12,BOOK3S_INTERRUPT_HV_DECREMENTER
  753. bne 2f
  754. mfspr r3,SPRN_HDEC
  755. cmpwi r3,0
  756. bge ignore_hdec
  757. 2:
  758. /* See if this is an hcall we can handle in real mode */
  759. cmpwi r12,BOOK3S_INTERRUPT_SYSCALL
  760. beq hcall_try_real_mode
  761. /* Only handle external interrupts here on arch 206 and later */
  762. BEGIN_FTR_SECTION
  763. b ext_interrupt_to_host
  764. END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206)
  765. /* External interrupt ? */
  766. cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL
  767. bne+ ext_interrupt_to_host
  768. /* External interrupt, first check for host_ipi. If this is
  769. * set, we know the host wants us out so let's do it now
  770. */
  771. do_ext_interrupt:
  772. bl kvmppc_read_intr
  773. cmpdi r3, 0
  774. bgt ext_interrupt_to_host
  775. /* Allright, looks like an IPI for the guest, we need to set MER */
  776. /* Check if any CPU is heading out to the host, if so head out too */
  777. ld r5, HSTATE_KVM_VCORE(r13)
  778. lwz r0, VCORE_ENTRY_EXIT(r5)
  779. cmpwi r0, 0x100
  780. bge ext_interrupt_to_host
  781. /* See if there is a pending interrupt for the guest */
  782. mfspr r8, SPRN_LPCR
  783. ld r0, VCPU_PENDING_EXC(r9)
  784. /* Insert EXTERNAL_LEVEL bit into LPCR at the MER bit position */
  785. rldicl. r0, r0, 64 - BOOK3S_IRQPRIO_EXTERNAL_LEVEL, 63
  786. rldimi r8, r0, LPCR_MER_SH, 63 - LPCR_MER_SH
  787. beq 2f
  788. /* And if the guest EE is set, we can deliver immediately, else
  789. * we return to the guest with MER set
  790. */
  791. andi. r0, r11, MSR_EE
  792. beq 2f
  793. mtspr SPRN_SRR0, r10
  794. mtspr SPRN_SRR1, r11
  795. li r10, BOOK3S_INTERRUPT_EXTERNAL
  796. li r11, (MSR_ME << 1) | 1 /* synthesize MSR_SF | MSR_ME */
  797. rotldi r11, r11, 63
  798. 2: mr r4, r9
  799. mtspr SPRN_LPCR, r8
  800. b fast_guest_return
  801. ext_interrupt_to_host:
  802. guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */
  803. /* Save more register state */
  804. mfdar r6
  805. mfdsisr r7
  806. std r6, VCPU_DAR(r9)
  807. stw r7, VCPU_DSISR(r9)
  808. BEGIN_FTR_SECTION
  809. /* don't overwrite fault_dar/fault_dsisr if HDSI */
  810. cmpwi r12,BOOK3S_INTERRUPT_H_DATA_STORAGE
  811. beq 6f
  812. END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
  813. std r6, VCPU_FAULT_DAR(r9)
  814. stw r7, VCPU_FAULT_DSISR(r9)
  815. /* See if it is a machine check */
  816. cmpwi r12, BOOK3S_INTERRUPT_MACHINE_CHECK
  817. beq machine_check_realmode
  818. mc_cont:
  819. /* Save guest CTRL register, set runlatch to 1 */
  820. 6: mfspr r6,SPRN_CTRLF
  821. stw r6,VCPU_CTRL(r9)
  822. andi. r0,r6,1
  823. bne 4f
  824. ori r6,r6,1
  825. mtspr SPRN_CTRLT,r6
  826. 4:
  827. /* Read the guest SLB and save it away */
  828. lwz r0,VCPU_SLB_NR(r9) /* number of entries in SLB */
  829. mtctr r0
  830. li r6,0
  831. addi r7,r9,VCPU_SLB
  832. li r5,0
  833. 1: slbmfee r8,r6
  834. andis. r0,r8,SLB_ESID_V@h
  835. beq 2f
  836. add r8,r8,r6 /* put index in */
  837. slbmfev r3,r6
  838. std r8,VCPU_SLB_E(r7)
  839. std r3,VCPU_SLB_V(r7)
  840. addi r7,r7,VCPU_SLB_SIZE
  841. addi r5,r5,1
  842. 2: addi r6,r6,1
  843. bdnz 1b
  844. stw r5,VCPU_SLB_MAX(r9)
  845. /*
  846. * Save the guest PURR/SPURR
  847. */
  848. BEGIN_FTR_SECTION
  849. mfspr r5,SPRN_PURR
  850. mfspr r6,SPRN_SPURR
  851. ld r7,VCPU_PURR(r9)
  852. ld r8,VCPU_SPURR(r9)
  853. std r5,VCPU_PURR(r9)
  854. std r6,VCPU_SPURR(r9)
  855. subf r5,r7,r5
  856. subf r6,r8,r6
  857. /*
  858. * Restore host PURR/SPURR and add guest times
  859. * so that the time in the guest gets accounted.
  860. */
  861. ld r3,HSTATE_PURR(r13)
  862. ld r4,HSTATE_SPURR(r13)
  863. add r3,r3,r5
  864. add r4,r4,r6
  865. mtspr SPRN_PURR,r3
  866. mtspr SPRN_SPURR,r4
  867. END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_201)
  868. /* Clear out SLB */
  869. li r5,0
  870. slbmte r5,r5
  871. slbia
  872. ptesync
  873. hdec_soon: /* r9 = vcpu, r12 = trap, r13 = paca */
  874. BEGIN_FTR_SECTION
  875. b 32f
  876. END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
  877. /*
  878. * POWER7 guest -> host partition switch code.
  879. * We don't have to lock against tlbies but we do
  880. * have to coordinate the hardware threads.
  881. */
  882. /* Increment the threads-exiting-guest count in the 0xff00
  883. bits of vcore->entry_exit_count */
  884. lwsync
  885. ld r5,HSTATE_KVM_VCORE(r13)
  886. addi r6,r5,VCORE_ENTRY_EXIT
  887. 41: lwarx r3,0,r6
  888. addi r0,r3,0x100
  889. stwcx. r0,0,r6
  890. bne 41b
  891. lwsync
  892. /*
  893. * At this point we have an interrupt that we have to pass
  894. * up to the kernel or qemu; we can't handle it in real mode.
  895. * Thus we have to do a partition switch, so we have to
  896. * collect the other threads, if we are the first thread
  897. * to take an interrupt. To do this, we set the HDEC to 0,
  898. * which causes an HDEC interrupt in all threads within 2ns
  899. * because the HDEC register is shared between all 4 threads.
  900. * However, we don't need to bother if this is an HDEC
  901. * interrupt, since the other threads will already be on their
  902. * way here in that case.
  903. */
  904. cmpwi r3,0x100 /* Are we the first here? */
  905. bge 43f
  906. cmpwi r3,1 /* Are any other threads in the guest? */
  907. ble 43f
  908. cmpwi r12,BOOK3S_INTERRUPT_HV_DECREMENTER
  909. beq 40f
  910. li r0,0
  911. mtspr SPRN_HDEC,r0
  912. 40:
  913. /*
  914. * Send an IPI to any napping threads, since an HDEC interrupt
  915. * doesn't wake CPUs up from nap.
  916. */
  917. lwz r3,VCORE_NAPPING_THREADS(r5)
  918. lwz r4,VCPU_PTID(r9)
  919. li r0,1
  920. sld r0,r0,r4
  921. andc. r3,r3,r0 /* no sense IPI'ing ourselves */
  922. beq 43f
  923. mulli r4,r4,PACA_SIZE /* get paca for thread 0 */
  924. subf r6,r4,r13
  925. 42: andi. r0,r3,1
  926. beq 44f
  927. ld r8,HSTATE_XICS_PHYS(r6) /* get thread's XICS reg addr */
  928. li r0,IPI_PRIORITY
  929. li r7,XICS_MFRR
  930. stbcix r0,r7,r8 /* trigger the IPI */
  931. 44: srdi. r3,r3,1
  932. addi r6,r6,PACA_SIZE
  933. bne 42b
  934. /* Secondary threads wait for primary to do partition switch */
  935. 43: ld r4,VCPU_KVM(r9) /* pointer to struct kvm */
  936. ld r5,HSTATE_KVM_VCORE(r13)
  937. lwz r3,VCPU_PTID(r9)
  938. cmpwi r3,0
  939. beq 15f
  940. HMT_LOW
  941. 13: lbz r3,VCORE_IN_GUEST(r5)
  942. cmpwi r3,0
  943. bne 13b
  944. HMT_MEDIUM
  945. b 16f
  946. /* Primary thread waits for all the secondaries to exit guest */
  947. 15: lwz r3,VCORE_ENTRY_EXIT(r5)
  948. srwi r0,r3,8
  949. clrldi r3,r3,56
  950. cmpw r3,r0
  951. bne 15b
  952. isync
  953. /* Primary thread switches back to host partition */
  954. ld r6,KVM_HOST_SDR1(r4)
  955. lwz r7,KVM_HOST_LPID(r4)
  956. li r8,LPID_RSVD /* switch to reserved LPID */
  957. mtspr SPRN_LPID,r8
  958. ptesync
  959. mtspr SPRN_SDR1,r6 /* switch to partition page table */
  960. mtspr SPRN_LPID,r7
  961. isync
  962. /* Subtract timebase offset from timebase */
  963. ld r8,VCORE_TB_OFFSET(r5)
  964. cmpdi r8,0
  965. beq 17f
  966. mftb r6 /* current host timebase */
  967. subf r8,r8,r6
  968. mtspr SPRN_TBU40,r8 /* update upper 40 bits */
  969. mftb r7 /* check if lower 24 bits overflowed */
  970. clrldi r6,r6,40
  971. clrldi r7,r7,40
  972. cmpld r7,r6
  973. bge 17f
  974. addis r8,r8,0x100 /* if so, increment upper 40 bits */
  975. mtspr SPRN_TBU40,r8
  976. /* Signal secondary CPUs to continue */
  977. 17: li r0,0
  978. stb r0,VCORE_IN_GUEST(r5)
  979. lis r8,0x7fff /* MAX_INT@h */
  980. mtspr SPRN_HDEC,r8
  981. 16: ld r8,KVM_HOST_LPCR(r4)
  982. mtspr SPRN_LPCR,r8
  983. isync
  984. b 33f
  985. /*
  986. * PPC970 guest -> host partition switch code.
  987. * We have to lock against concurrent tlbies, and
  988. * we have to flush the whole TLB.
  989. */
  990. 32: ld r4,VCPU_KVM(r9) /* pointer to struct kvm */
  991. /* Take the guest's tlbie_lock */
  992. #ifdef __BIG_ENDIAN__
  993. lwz r8,PACA_LOCK_TOKEN(r13)
  994. #else
  995. lwz r8,PACAPACAINDEX(r13)
  996. #endif
  997. addi r3,r4,KVM_TLBIE_LOCK
  998. 24: lwarx r0,0,r3
  999. cmpwi r0,0
  1000. bne 24b
  1001. stwcx. r8,0,r3
  1002. bne 24b
  1003. isync
  1004. ld r7,KVM_HOST_LPCR(r4) /* use kvm->arch.host_lpcr for HID4 */
  1005. li r0,0x18f
  1006. rotldi r0,r0,HID4_LPID5_SH /* all lpid bits in HID4 = 1 */
  1007. or r0,r7,r0
  1008. ptesync
  1009. sync
  1010. mtspr SPRN_HID4,r0 /* switch to reserved LPID */
  1011. isync
  1012. li r0,0
  1013. stw r0,0(r3) /* drop guest tlbie_lock */
  1014. /* invalidate the whole TLB */
  1015. li r0,256
  1016. mtctr r0
  1017. li r6,0
  1018. 25: tlbiel r6
  1019. addi r6,r6,0x1000
  1020. bdnz 25b
  1021. ptesync
  1022. /* take native_tlbie_lock */
  1023. ld r3,toc_tlbie_lock@toc(2)
  1024. 24: lwarx r0,0,r3
  1025. cmpwi r0,0
  1026. bne 24b
  1027. stwcx. r8,0,r3
  1028. bne 24b
  1029. isync
  1030. ld r6,KVM_HOST_SDR1(r4)
  1031. mtspr SPRN_SDR1,r6 /* switch to host page table */
  1032. /* Set up host HID4 value */
  1033. sync
  1034. mtspr SPRN_HID4,r7
  1035. isync
  1036. li r0,0
  1037. stw r0,0(r3) /* drop native_tlbie_lock */
  1038. lis r8,0x7fff /* MAX_INT@h */
  1039. mtspr SPRN_HDEC,r8
  1040. /* Disable HDEC interrupts */
  1041. mfspr r0,SPRN_HID0
  1042. li r3,0
  1043. rldimi r0,r3, HID0_HDICE_SH, 64-HID0_HDICE_SH-1
  1044. sync
  1045. mtspr SPRN_HID0,r0
  1046. mfspr r0,SPRN_HID0
  1047. mfspr r0,SPRN_HID0
  1048. mfspr r0,SPRN_HID0
  1049. mfspr r0,SPRN_HID0
  1050. mfspr r0,SPRN_HID0
  1051. mfspr r0,SPRN_HID0
  1052. /* load host SLB entries */
  1053. 33: ld r8,PACA_SLBSHADOWPTR(r13)
  1054. .rept SLB_NUM_BOLTED
  1055. ld r5,SLBSHADOW_SAVEAREA(r8)
  1056. ld r6,SLBSHADOW_SAVEAREA+8(r8)
  1057. andis. r7,r5,SLB_ESID_V@h
  1058. beq 1f
  1059. slbmte r6,r5
  1060. 1: addi r8,r8,16
  1061. .endr
  1062. /* Save DEC */
  1063. mfspr r5,SPRN_DEC
  1064. mftb r6
  1065. extsw r5,r5
  1066. add r5,r5,r6
  1067. std r5,VCPU_DEC_EXPIRES(r9)
  1068. /* Save and reset AMR and UAMOR before turning on the MMU */
  1069. BEGIN_FTR_SECTION
  1070. mfspr r5,SPRN_AMR
  1071. mfspr r6,SPRN_UAMOR
  1072. std r5,VCPU_AMR(r9)
  1073. std r6,VCPU_UAMOR(r9)
  1074. li r6,0
  1075. mtspr SPRN_AMR,r6
  1076. END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
  1077. /* Switch DSCR back to host value */
  1078. BEGIN_FTR_SECTION
  1079. mfspr r8, SPRN_DSCR
  1080. ld r7, HSTATE_DSCR(r13)
  1081. std r8, VCPU_DSCR(r7)
  1082. mtspr SPRN_DSCR, r7
  1083. END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
  1084. /* Save non-volatile GPRs */
  1085. std r14, VCPU_GPR(R14)(r9)
  1086. std r15, VCPU_GPR(R15)(r9)
  1087. std r16, VCPU_GPR(R16)(r9)
  1088. std r17, VCPU_GPR(R17)(r9)
  1089. std r18, VCPU_GPR(R18)(r9)
  1090. std r19, VCPU_GPR(R19)(r9)
  1091. std r20, VCPU_GPR(R20)(r9)
  1092. std r21, VCPU_GPR(R21)(r9)
  1093. std r22, VCPU_GPR(R22)(r9)
  1094. std r23, VCPU_GPR(R23)(r9)
  1095. std r24, VCPU_GPR(R24)(r9)
  1096. std r25, VCPU_GPR(R25)(r9)
  1097. std r26, VCPU_GPR(R26)(r9)
  1098. std r27, VCPU_GPR(R27)(r9)
  1099. std r28, VCPU_GPR(R28)(r9)
  1100. std r29, VCPU_GPR(R29)(r9)
  1101. std r30, VCPU_GPR(R30)(r9)
  1102. std r31, VCPU_GPR(R31)(r9)
  1103. /* Save SPRGs */
  1104. mfspr r3, SPRN_SPRG0
  1105. mfspr r4, SPRN_SPRG1
  1106. mfspr r5, SPRN_SPRG2
  1107. mfspr r6, SPRN_SPRG3
  1108. std r3, VCPU_SPRG0(r9)
  1109. std r4, VCPU_SPRG1(r9)
  1110. std r5, VCPU_SPRG2(r9)
  1111. std r6, VCPU_SPRG3(r9)
  1112. /* save FP state */
  1113. mr r3, r9
  1114. bl .kvmppc_save_fp
  1115. /* Increment yield count if they have a VPA */
  1116. ld r8, VCPU_VPA(r9) /* do they have a VPA? */
  1117. cmpdi r8, 0
  1118. beq 25f
  1119. lwz r3, LPPACA_YIELDCOUNT(r8)
  1120. addi r3, r3, 1
  1121. stw r3, LPPACA_YIELDCOUNT(r8)
  1122. li r3, 1
  1123. stb r3, VCPU_VPA_DIRTY(r9)
  1124. 25:
  1125. /* Save PMU registers if requested */
  1126. /* r8 and cr0.eq are live here */
  1127. li r3, 1
  1128. sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */
  1129. mfspr r4, SPRN_MMCR0 /* save MMCR0 */
  1130. mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */
  1131. mfspr r6, SPRN_MMCRA
  1132. BEGIN_FTR_SECTION
  1133. /* On P7, clear MMCRA in order to disable SDAR updates */
  1134. li r7, 0
  1135. mtspr SPRN_MMCRA, r7
  1136. END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
  1137. isync
  1138. beq 21f /* if no VPA, save PMU stuff anyway */
  1139. lbz r7, LPPACA_PMCINUSE(r8)
  1140. cmpwi r7, 0 /* did they ask for PMU stuff to be saved? */
  1141. bne 21f
  1142. std r3, VCPU_MMCR(r9) /* if not, set saved MMCR0 to FC */
  1143. b 22f
  1144. 21: mfspr r5, SPRN_MMCR1
  1145. mfspr r7, SPRN_SIAR
  1146. mfspr r8, SPRN_SDAR
  1147. std r4, VCPU_MMCR(r9)
  1148. std r5, VCPU_MMCR + 8(r9)
  1149. std r6, VCPU_MMCR + 16(r9)
  1150. std r7, VCPU_SIAR(r9)
  1151. std r8, VCPU_SDAR(r9)
  1152. mfspr r3, SPRN_PMC1
  1153. mfspr r4, SPRN_PMC2
  1154. mfspr r5, SPRN_PMC3
  1155. mfspr r6, SPRN_PMC4
  1156. mfspr r7, SPRN_PMC5
  1157. mfspr r8, SPRN_PMC6
  1158. BEGIN_FTR_SECTION
  1159. mfspr r10, SPRN_PMC7
  1160. mfspr r11, SPRN_PMC8
  1161. END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
  1162. stw r3, VCPU_PMC(r9)
  1163. stw r4, VCPU_PMC + 4(r9)
  1164. stw r5, VCPU_PMC + 8(r9)
  1165. stw r6, VCPU_PMC + 12(r9)
  1166. stw r7, VCPU_PMC + 16(r9)
  1167. stw r8, VCPU_PMC + 20(r9)
  1168. BEGIN_FTR_SECTION
  1169. stw r10, VCPU_PMC + 24(r9)
  1170. stw r11, VCPU_PMC + 28(r9)
  1171. END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
  1172. 22:
  1173. ld r0, 112+PPC_LR_STKOFF(r1)
  1174. addi r1, r1, 112
  1175. mtlr r0
  1176. blr
  1177. secondary_too_late:
  1178. ld r5,HSTATE_KVM_VCORE(r13)
  1179. HMT_LOW
  1180. 13: lbz r3,VCORE_IN_GUEST(r5)
  1181. cmpwi r3,0
  1182. bne 13b
  1183. HMT_MEDIUM
  1184. li r0, KVM_GUEST_MODE_NONE
  1185. stb r0, HSTATE_IN_GUEST(r13)
  1186. ld r11,PACA_SLBSHADOWPTR(r13)
  1187. .rept SLB_NUM_BOLTED
  1188. ld r5,SLBSHADOW_SAVEAREA(r11)
  1189. ld r6,SLBSHADOW_SAVEAREA+8(r11)
  1190. andis. r7,r5,SLB_ESID_V@h
  1191. beq 1f
  1192. slbmte r6,r5
  1193. 1: addi r11,r11,16
  1194. .endr
  1195. b 22b
  1196. /*
  1197. * Check whether an HDSI is an HPTE not found fault or something else.
  1198. * If it is an HPTE not found fault that is due to the guest accessing
  1199. * a page that they have mapped but which we have paged out, then
  1200. * we continue on with the guest exit path. In all other cases,
  1201. * reflect the HDSI to the guest as a DSI.
  1202. */
  1203. kvmppc_hdsi:
  1204. mfspr r4, SPRN_HDAR
  1205. mfspr r6, SPRN_HDSISR
  1206. /* HPTE not found fault or protection fault? */
  1207. andis. r0, r6, (DSISR_NOHPTE | DSISR_PROTFAULT)@h
  1208. beq 1f /* if not, send it to the guest */
  1209. andi. r0, r11, MSR_DR /* data relocation enabled? */
  1210. beq 3f
  1211. clrrdi r0, r4, 28
  1212. PPC_SLBFEE_DOT(R5, R0) /* if so, look up SLB */
  1213. bne 1f /* if no SLB entry found */
  1214. 4: std r4, VCPU_FAULT_DAR(r9)
  1215. stw r6, VCPU_FAULT_DSISR(r9)
  1216. /* Search the hash table. */
  1217. mr r3, r9 /* vcpu pointer */
  1218. li r7, 1 /* data fault */
  1219. bl .kvmppc_hpte_hv_fault
  1220. ld r9, HSTATE_KVM_VCPU(r13)
  1221. ld r10, VCPU_PC(r9)
  1222. ld r11, VCPU_MSR(r9)
  1223. li r12, BOOK3S_INTERRUPT_H_DATA_STORAGE
  1224. cmpdi r3, 0 /* retry the instruction */
  1225. beq 6f
  1226. cmpdi r3, -1 /* handle in kernel mode */
  1227. beq guest_exit_cont
  1228. cmpdi r3, -2 /* MMIO emulation; need instr word */
  1229. beq 2f
  1230. /* Synthesize a DSI for the guest */
  1231. ld r4, VCPU_FAULT_DAR(r9)
  1232. mr r6, r3
  1233. 1: mtspr SPRN_DAR, r4
  1234. mtspr SPRN_DSISR, r6
  1235. mtspr SPRN_SRR0, r10
  1236. mtspr SPRN_SRR1, r11
  1237. li r10, BOOK3S_INTERRUPT_DATA_STORAGE
  1238. li r11, (MSR_ME << 1) | 1 /* synthesize MSR_SF | MSR_ME */
  1239. rotldi r11, r11, 63
  1240. fast_interrupt_c_return:
  1241. 6: ld r7, VCPU_CTR(r9)
  1242. lwz r8, VCPU_XER(r9)
  1243. mtctr r7
  1244. mtxer r8
  1245. mr r4, r9
  1246. b fast_guest_return
  1247. 3: ld r5, VCPU_KVM(r9) /* not relocated, use VRMA */
  1248. ld r5, KVM_VRMA_SLB_V(r5)
  1249. b 4b
  1250. /* If this is for emulated MMIO, load the instruction word */
  1251. 2: li r8, KVM_INST_FETCH_FAILED /* In case lwz faults */
  1252. /* Set guest mode to 'jump over instruction' so if lwz faults
  1253. * we'll just continue at the next IP. */
  1254. li r0, KVM_GUEST_MODE_SKIP
  1255. stb r0, HSTATE_IN_GUEST(r13)
  1256. /* Do the access with MSR:DR enabled */
  1257. mfmsr r3
  1258. ori r4, r3, MSR_DR /* Enable paging for data */
  1259. mtmsrd r4
  1260. lwz r8, 0(r10)
  1261. mtmsrd r3
  1262. /* Store the result */
  1263. stw r8, VCPU_LAST_INST(r9)
  1264. /* Unset guest mode. */
  1265. li r0, KVM_GUEST_MODE_NONE
  1266. stb r0, HSTATE_IN_GUEST(r13)
  1267. b guest_exit_cont
  1268. /*
  1269. * Similarly for an HISI, reflect it to the guest as an ISI unless
  1270. * it is an HPTE not found fault for a page that we have paged out.
  1271. */
  1272. kvmppc_hisi:
  1273. andis. r0, r11, SRR1_ISI_NOPT@h
  1274. beq 1f
  1275. andi. r0, r11, MSR_IR /* instruction relocation enabled? */
  1276. beq 3f
  1277. clrrdi r0, r10, 28
  1278. PPC_SLBFEE_DOT(R5, R0) /* if so, look up SLB */
  1279. bne 1f /* if no SLB entry found */
  1280. 4:
  1281. /* Search the hash table. */
  1282. mr r3, r9 /* vcpu pointer */
  1283. mr r4, r10
  1284. mr r6, r11
  1285. li r7, 0 /* instruction fault */
  1286. bl .kvmppc_hpte_hv_fault
  1287. ld r9, HSTATE_KVM_VCPU(r13)
  1288. ld r10, VCPU_PC(r9)
  1289. ld r11, VCPU_MSR(r9)
  1290. li r12, BOOK3S_INTERRUPT_H_INST_STORAGE
  1291. cmpdi r3, 0 /* retry the instruction */
  1292. beq fast_interrupt_c_return
  1293. cmpdi r3, -1 /* handle in kernel mode */
  1294. beq guest_exit_cont
  1295. /* Synthesize an ISI for the guest */
  1296. mr r11, r3
  1297. 1: mtspr SPRN_SRR0, r10
  1298. mtspr SPRN_SRR1, r11
  1299. li r10, BOOK3S_INTERRUPT_INST_STORAGE
  1300. li r11, (MSR_ME << 1) | 1 /* synthesize MSR_SF | MSR_ME */
  1301. rotldi r11, r11, 63
  1302. b fast_interrupt_c_return
  1303. 3: ld r6, VCPU_KVM(r9) /* not relocated, use VRMA */
  1304. ld r5, KVM_VRMA_SLB_V(r6)
  1305. b 4b
  1306. /*
  1307. * Try to handle an hcall in real mode.
  1308. * Returns to the guest if we handle it, or continues on up to
  1309. * the kernel if we can't (i.e. if we don't have a handler for
  1310. * it, or if the handler returns H_TOO_HARD).
  1311. */
  1312. .globl hcall_try_real_mode
  1313. hcall_try_real_mode:
  1314. ld r3,VCPU_GPR(R3)(r9)
  1315. andi. r0,r11,MSR_PR
  1316. bne guest_exit_cont
  1317. clrrdi r3,r3,2
  1318. cmpldi r3,hcall_real_table_end - hcall_real_table
  1319. bge guest_exit_cont
  1320. LOAD_REG_ADDR(r4, hcall_real_table)
  1321. lwax r3,r3,r4
  1322. cmpwi r3,0
  1323. beq guest_exit_cont
  1324. add r3,r3,r4
  1325. mtctr r3
  1326. mr r3,r9 /* get vcpu pointer */
  1327. ld r4,VCPU_GPR(R4)(r9)
  1328. bctrl
  1329. cmpdi r3,H_TOO_HARD
  1330. beq hcall_real_fallback
  1331. ld r4,HSTATE_KVM_VCPU(r13)
  1332. std r3,VCPU_GPR(R3)(r4)
  1333. ld r10,VCPU_PC(r4)
  1334. ld r11,VCPU_MSR(r4)
  1335. b fast_guest_return
  1336. /* We've attempted a real mode hcall, but it's punted it back
  1337. * to userspace. We need to restore some clobbered volatiles
  1338. * before resuming the pass-it-to-qemu path */
  1339. hcall_real_fallback:
  1340. li r12,BOOK3S_INTERRUPT_SYSCALL
  1341. ld r9, HSTATE_KVM_VCPU(r13)
  1342. b guest_exit_cont
  1343. .globl hcall_real_table
  1344. hcall_real_table:
  1345. .long 0 /* 0 - unused */
  1346. .long .kvmppc_h_remove - hcall_real_table
  1347. .long .kvmppc_h_enter - hcall_real_table
  1348. .long .kvmppc_h_read - hcall_real_table
  1349. .long 0 /* 0x10 - H_CLEAR_MOD */
  1350. .long 0 /* 0x14 - H_CLEAR_REF */
  1351. .long .kvmppc_h_protect - hcall_real_table
  1352. .long 0 /* 0x1c - H_GET_TCE */
  1353. .long .kvmppc_h_put_tce - hcall_real_table
  1354. .long 0 /* 0x24 - H_SET_SPRG0 */
  1355. .long .kvmppc_h_set_dabr - hcall_real_table
  1356. .long 0 /* 0x2c */
  1357. .long 0 /* 0x30 */
  1358. .long 0 /* 0x34 */
  1359. .long 0 /* 0x38 */
  1360. .long 0 /* 0x3c */
  1361. .long 0 /* 0x40 */
  1362. .long 0 /* 0x44 */
  1363. .long 0 /* 0x48 */
  1364. .long 0 /* 0x4c */
  1365. .long 0 /* 0x50 */
  1366. .long 0 /* 0x54 */
  1367. .long 0 /* 0x58 */
  1368. .long 0 /* 0x5c */
  1369. .long 0 /* 0x60 */
  1370. #ifdef CONFIG_KVM_XICS
  1371. .long .kvmppc_rm_h_eoi - hcall_real_table
  1372. .long .kvmppc_rm_h_cppr - hcall_real_table
  1373. .long .kvmppc_rm_h_ipi - hcall_real_table
  1374. .long 0 /* 0x70 - H_IPOLL */
  1375. .long .kvmppc_rm_h_xirr - hcall_real_table
  1376. #else
  1377. .long 0 /* 0x64 - H_EOI */
  1378. .long 0 /* 0x68 - H_CPPR */
  1379. .long 0 /* 0x6c - H_IPI */
  1380. .long 0 /* 0x70 - H_IPOLL */
  1381. .long 0 /* 0x74 - H_XIRR */
  1382. #endif
  1383. .long 0 /* 0x78 */
  1384. .long 0 /* 0x7c */
  1385. .long 0 /* 0x80 */
  1386. .long 0 /* 0x84 */
  1387. .long 0 /* 0x88 */
  1388. .long 0 /* 0x8c */
  1389. .long 0 /* 0x90 */
  1390. .long 0 /* 0x94 */
  1391. .long 0 /* 0x98 */
  1392. .long 0 /* 0x9c */
  1393. .long 0 /* 0xa0 */
  1394. .long 0 /* 0xa4 */
  1395. .long 0 /* 0xa8 */
  1396. .long 0 /* 0xac */
  1397. .long 0 /* 0xb0 */
  1398. .long 0 /* 0xb4 */
  1399. .long 0 /* 0xb8 */
  1400. .long 0 /* 0xbc */
  1401. .long 0 /* 0xc0 */
  1402. .long 0 /* 0xc4 */
  1403. .long 0 /* 0xc8 */
  1404. .long 0 /* 0xcc */
  1405. .long 0 /* 0xd0 */
  1406. .long 0 /* 0xd4 */
  1407. .long 0 /* 0xd8 */
  1408. .long 0 /* 0xdc */
  1409. .long .kvmppc_h_cede - hcall_real_table
  1410. .long 0 /* 0xe4 */
  1411. .long 0 /* 0xe8 */
  1412. .long 0 /* 0xec */
  1413. .long 0 /* 0xf0 */
  1414. .long 0 /* 0xf4 */
  1415. .long 0 /* 0xf8 */
  1416. .long 0 /* 0xfc */
  1417. .long 0 /* 0x100 */
  1418. .long 0 /* 0x104 */
  1419. .long 0 /* 0x108 */
  1420. .long 0 /* 0x10c */
  1421. .long 0 /* 0x110 */
  1422. .long 0 /* 0x114 */
  1423. .long 0 /* 0x118 */
  1424. .long 0 /* 0x11c */
  1425. .long 0 /* 0x120 */
  1426. .long .kvmppc_h_bulk_remove - hcall_real_table
  1427. hcall_real_table_end:
  1428. ignore_hdec:
  1429. mr r4,r9
  1430. b fast_guest_return
  1431. _GLOBAL(kvmppc_h_set_dabr)
  1432. std r4,VCPU_DABR(r3)
  1433. /* Work around P7 bug where DABR can get corrupted on mtspr */
  1434. 1: mtspr SPRN_DABR,r4
  1435. mfspr r5, SPRN_DABR
  1436. cmpd r4, r5
  1437. bne 1b
  1438. isync
  1439. li r3,0
  1440. blr
  1441. _GLOBAL(kvmppc_h_cede)
  1442. ori r11,r11,MSR_EE
  1443. std r11,VCPU_MSR(r3)
  1444. li r0,1
  1445. stb r0,VCPU_CEDED(r3)
  1446. sync /* order setting ceded vs. testing prodded */
  1447. lbz r5,VCPU_PRODDED(r3)
  1448. cmpwi r5,0
  1449. bne kvm_cede_prodded
  1450. li r0,0 /* set trap to 0 to say hcall is handled */
  1451. stw r0,VCPU_TRAP(r3)
  1452. li r0,H_SUCCESS
  1453. std r0,VCPU_GPR(R3)(r3)
  1454. BEGIN_FTR_SECTION
  1455. b kvm_cede_exit /* just send it up to host on 970 */
  1456. END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206)
  1457. /*
  1458. * Set our bit in the bitmask of napping threads unless all the
  1459. * other threads are already napping, in which case we send this
  1460. * up to the host.
  1461. */
  1462. ld r5,HSTATE_KVM_VCORE(r13)
  1463. lwz r6,VCPU_PTID(r3)
  1464. lwz r8,VCORE_ENTRY_EXIT(r5)
  1465. clrldi r8,r8,56
  1466. li r0,1
  1467. sld r0,r0,r6
  1468. addi r6,r5,VCORE_NAPPING_THREADS
  1469. 31: lwarx r4,0,r6
  1470. or r4,r4,r0
  1471. PPC_POPCNTW(R7,R4)
  1472. cmpw r7,r8
  1473. bge kvm_cede_exit
  1474. stwcx. r4,0,r6
  1475. bne 31b
  1476. li r0,1
  1477. stb r0,HSTATE_NAPPING(r13)
  1478. /* order napping_threads update vs testing entry_exit_count */
  1479. lwsync
  1480. mr r4,r3
  1481. lwz r7,VCORE_ENTRY_EXIT(r5)
  1482. cmpwi r7,0x100
  1483. bge 33f /* another thread already exiting */
  1484. /*
  1485. * Although not specifically required by the architecture, POWER7
  1486. * preserves the following registers in nap mode, even if an SMT mode
  1487. * switch occurs: SLB entries, PURR, SPURR, AMOR, UAMOR, AMR, SPRG0-3,
  1488. * DAR, DSISR, DABR, DABRX, DSCR, PMCx, MMCRx, SIAR, SDAR.
  1489. */
  1490. /* Save non-volatile GPRs */
  1491. std r14, VCPU_GPR(R14)(r3)
  1492. std r15, VCPU_GPR(R15)(r3)
  1493. std r16, VCPU_GPR(R16)(r3)
  1494. std r17, VCPU_GPR(R17)(r3)
  1495. std r18, VCPU_GPR(R18)(r3)
  1496. std r19, VCPU_GPR(R19)(r3)
  1497. std r20, VCPU_GPR(R20)(r3)
  1498. std r21, VCPU_GPR(R21)(r3)
  1499. std r22, VCPU_GPR(R22)(r3)
  1500. std r23, VCPU_GPR(R23)(r3)
  1501. std r24, VCPU_GPR(R24)(r3)
  1502. std r25, VCPU_GPR(R25)(r3)
  1503. std r26, VCPU_GPR(R26)(r3)
  1504. std r27, VCPU_GPR(R27)(r3)
  1505. std r28, VCPU_GPR(R28)(r3)
  1506. std r29, VCPU_GPR(R29)(r3)
  1507. std r30, VCPU_GPR(R30)(r3)
  1508. std r31, VCPU_GPR(R31)(r3)
  1509. /* save FP state */
  1510. bl .kvmppc_save_fp
  1511. /*
  1512. * Take a nap until a decrementer or external interrupt occurs,
  1513. * with PECE1 (wake on decr) and PECE0 (wake on external) set in LPCR
  1514. */
  1515. li r0,1
  1516. stb r0,HSTATE_HWTHREAD_REQ(r13)
  1517. mfspr r5,SPRN_LPCR
  1518. ori r5,r5,LPCR_PECE0 | LPCR_PECE1
  1519. mtspr SPRN_LPCR,r5
  1520. isync
  1521. li r0, 0
  1522. std r0, HSTATE_SCRATCH0(r13)
  1523. ptesync
  1524. ld r0, HSTATE_SCRATCH0(r13)
  1525. 1: cmpd r0, r0
  1526. bne 1b
  1527. nap
  1528. b .
  1529. kvm_end_cede:
  1530. /* get vcpu pointer */
  1531. ld r4, HSTATE_KVM_VCPU(r13)
  1532. /* Woken by external or decrementer interrupt */
  1533. ld r1, HSTATE_HOST_R1(r13)
  1534. /* load up FP state */
  1535. bl kvmppc_load_fp
  1536. /* Load NV GPRS */
  1537. ld r14, VCPU_GPR(R14)(r4)
  1538. ld r15, VCPU_GPR(R15)(r4)
  1539. ld r16, VCPU_GPR(R16)(r4)
  1540. ld r17, VCPU_GPR(R17)(r4)
  1541. ld r18, VCPU_GPR(R18)(r4)
  1542. ld r19, VCPU_GPR(R19)(r4)
  1543. ld r20, VCPU_GPR(R20)(r4)
  1544. ld r21, VCPU_GPR(R21)(r4)
  1545. ld r22, VCPU_GPR(R22)(r4)
  1546. ld r23, VCPU_GPR(R23)(r4)
  1547. ld r24, VCPU_GPR(R24)(r4)
  1548. ld r25, VCPU_GPR(R25)(r4)
  1549. ld r26, VCPU_GPR(R26)(r4)
  1550. ld r27, VCPU_GPR(R27)(r4)
  1551. ld r28, VCPU_GPR(R28)(r4)
  1552. ld r29, VCPU_GPR(R29)(r4)
  1553. ld r30, VCPU_GPR(R30)(r4)
  1554. ld r31, VCPU_GPR(R31)(r4)
  1555. /* clear our bit in vcore->napping_threads */
  1556. 33: ld r5,HSTATE_KVM_VCORE(r13)
  1557. lwz r3,VCPU_PTID(r4)
  1558. li r0,1
  1559. sld r0,r0,r3
  1560. addi r6,r5,VCORE_NAPPING_THREADS
  1561. 32: lwarx r7,0,r6
  1562. andc r7,r7,r0
  1563. stwcx. r7,0,r6
  1564. bne 32b
  1565. li r0,0
  1566. stb r0,HSTATE_NAPPING(r13)
  1567. /* Check the wake reason in SRR1 to see why we got here */
  1568. mfspr r3, SPRN_SRR1
  1569. rlwinm r3, r3, 44-31, 0x7 /* extract wake reason field */
  1570. cmpwi r3, 4 /* was it an external interrupt? */
  1571. li r12, BOOK3S_INTERRUPT_EXTERNAL
  1572. mr r9, r4
  1573. ld r10, VCPU_PC(r9)
  1574. ld r11, VCPU_MSR(r9)
  1575. beq do_ext_interrupt /* if so */
  1576. /* see if any other thread is already exiting */
  1577. lwz r0,VCORE_ENTRY_EXIT(r5)
  1578. cmpwi r0,0x100
  1579. blt kvmppc_cede_reentry /* if not go back to guest */
  1580. /* some threads are exiting, so go to the guest exit path */
  1581. b hcall_real_fallback
  1582. /* cede when already previously prodded case */
  1583. kvm_cede_prodded:
  1584. li r0,0
  1585. stb r0,VCPU_PRODDED(r3)
  1586. sync /* order testing prodded vs. clearing ceded */
  1587. stb r0,VCPU_CEDED(r3)
  1588. li r3,H_SUCCESS
  1589. blr
  1590. /* we've ceded but we want to give control to the host */
  1591. kvm_cede_exit:
  1592. b hcall_real_fallback
  1593. /* Try to handle a machine check in real mode */
  1594. machine_check_realmode:
  1595. mr r3, r9 /* get vcpu pointer */
  1596. bl .kvmppc_realmode_machine_check
  1597. nop
  1598. cmpdi r3, 0 /* continue exiting from guest? */
  1599. ld r9, HSTATE_KVM_VCPU(r13)
  1600. li r12, BOOK3S_INTERRUPT_MACHINE_CHECK
  1601. beq mc_cont
  1602. /* If not, deliver a machine check. SRR0/1 are already set */
  1603. li r10, BOOK3S_INTERRUPT_MACHINE_CHECK
  1604. li r11, (MSR_ME << 1) | 1 /* synthesize MSR_SF | MSR_ME */
  1605. rotldi r11, r11, 63
  1606. b fast_interrupt_c_return
  1607. /*
  1608. * Determine what sort of external interrupt is pending (if any).
  1609. * Returns:
  1610. * 0 if no interrupt is pending
  1611. * 1 if an interrupt is pending that needs to be handled by the host
  1612. * -1 if there was a guest wakeup IPI (which has now been cleared)
  1613. */
  1614. kvmppc_read_intr:
  1615. /* see if a host IPI is pending */
  1616. li r3, 1
  1617. lbz r0, HSTATE_HOST_IPI(r13)
  1618. cmpwi r0, 0
  1619. bne 1f
  1620. /* Now read the interrupt from the ICP */
  1621. ld r6, HSTATE_XICS_PHYS(r13)
  1622. li r7, XICS_XIRR
  1623. cmpdi r6, 0
  1624. beq- 1f
  1625. lwzcix r0, r6, r7
  1626. rlwinm. r3, r0, 0, 0xffffff
  1627. sync
  1628. beq 1f /* if nothing pending in the ICP */
  1629. /* We found something in the ICP...
  1630. *
  1631. * If it's not an IPI, stash it in the PACA and return to
  1632. * the host, we don't (yet) handle directing real external
  1633. * interrupts directly to the guest
  1634. */
  1635. cmpwi r3, XICS_IPI /* if there is, is it an IPI? */
  1636. li r3, 1
  1637. bne 42f
  1638. /* It's an IPI, clear the MFRR and EOI it */
  1639. li r3, 0xff
  1640. li r8, XICS_MFRR
  1641. stbcix r3, r6, r8 /* clear the IPI */
  1642. stwcix r0, r6, r7 /* EOI it */
  1643. sync
  1644. /* We need to re-check host IPI now in case it got set in the
  1645. * meantime. If it's clear, we bounce the interrupt to the
  1646. * guest
  1647. */
  1648. lbz r0, HSTATE_HOST_IPI(r13)
  1649. cmpwi r0, 0
  1650. bne- 43f
  1651. /* OK, it's an IPI for us */
  1652. li r3, -1
  1653. 1: blr
  1654. 42: /* It's not an IPI and it's for the host, stash it in the PACA
  1655. * before exit, it will be picked up by the host ICP driver
  1656. */
  1657. stw r0, HSTATE_SAVED_XIRR(r13)
  1658. b 1b
  1659. 43: /* We raced with the host, we need to resend that IPI, bummer */
  1660. li r0, IPI_PRIORITY
  1661. stbcix r0, r6, r8 /* set the IPI */
  1662. sync
  1663. b 1b
  1664. /*
  1665. * Save away FP, VMX and VSX registers.
  1666. * r3 = vcpu pointer
  1667. */
  1668. _GLOBAL(kvmppc_save_fp)
  1669. mfmsr r5
  1670. ori r8,r5,MSR_FP
  1671. #ifdef CONFIG_ALTIVEC
  1672. BEGIN_FTR_SECTION
  1673. oris r8,r8,MSR_VEC@h
  1674. END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
  1675. #endif
  1676. #ifdef CONFIG_VSX
  1677. BEGIN_FTR_SECTION
  1678. oris r8,r8,MSR_VSX@h
  1679. END_FTR_SECTION_IFSET(CPU_FTR_VSX)
  1680. #endif
  1681. mtmsrd r8
  1682. isync
  1683. #ifdef CONFIG_VSX
  1684. BEGIN_FTR_SECTION
  1685. reg = 0
  1686. .rept 32
  1687. li r6,reg*16+VCPU_VSRS
  1688. STXVD2X(reg,R6,R3)
  1689. reg = reg + 1
  1690. .endr
  1691. FTR_SECTION_ELSE
  1692. #endif
  1693. reg = 0
  1694. .rept 32
  1695. stfd reg,reg*8+VCPU_FPRS(r3)
  1696. reg = reg + 1
  1697. .endr
  1698. #ifdef CONFIG_VSX
  1699. ALT_FTR_SECTION_END_IFSET(CPU_FTR_VSX)
  1700. #endif
  1701. mffs fr0
  1702. stfd fr0,VCPU_FPSCR(r3)
  1703. #ifdef CONFIG_ALTIVEC
  1704. BEGIN_FTR_SECTION
  1705. reg = 0
  1706. .rept 32
  1707. li r6,reg*16+VCPU_VRS
  1708. stvx reg,r6,r3
  1709. reg = reg + 1
  1710. .endr
  1711. mfvscr vr0
  1712. li r6,VCPU_VSCR
  1713. stvx vr0,r6,r3
  1714. END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
  1715. #endif
  1716. mfspr r6,SPRN_VRSAVE
  1717. stw r6,VCPU_VRSAVE(r3)
  1718. mtmsrd r5
  1719. isync
  1720. blr
  1721. /*
  1722. * Load up FP, VMX and VSX registers
  1723. * r4 = vcpu pointer
  1724. */
  1725. .globl kvmppc_load_fp
  1726. kvmppc_load_fp:
  1727. mfmsr r9
  1728. ori r8,r9,MSR_FP
  1729. #ifdef CONFIG_ALTIVEC
  1730. BEGIN_FTR_SECTION
  1731. oris r8,r8,MSR_VEC@h
  1732. END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
  1733. #endif
  1734. #ifdef CONFIG_VSX
  1735. BEGIN_FTR_SECTION
  1736. oris r8,r8,MSR_VSX@h
  1737. END_FTR_SECTION_IFSET(CPU_FTR_VSX)
  1738. #endif
  1739. mtmsrd r8
  1740. isync
  1741. lfd fr0,VCPU_FPSCR(r4)
  1742. MTFSF_L(fr0)
  1743. #ifdef CONFIG_VSX
  1744. BEGIN_FTR_SECTION
  1745. reg = 0
  1746. .rept 32
  1747. li r7,reg*16+VCPU_VSRS
  1748. LXVD2X(reg,R7,R4)
  1749. reg = reg + 1
  1750. .endr
  1751. FTR_SECTION_ELSE
  1752. #endif
  1753. reg = 0
  1754. .rept 32
  1755. lfd reg,reg*8+VCPU_FPRS(r4)
  1756. reg = reg + 1
  1757. .endr
  1758. #ifdef CONFIG_VSX
  1759. ALT_FTR_SECTION_END_IFSET(CPU_FTR_VSX)
  1760. #endif
  1761. #ifdef CONFIG_ALTIVEC
  1762. BEGIN_FTR_SECTION
  1763. li r7,VCPU_VSCR
  1764. lvx vr0,r7,r4
  1765. mtvscr vr0
  1766. reg = 0
  1767. .rept 32
  1768. li r7,reg*16+VCPU_VRS
  1769. lvx reg,r7,r4
  1770. reg = reg + 1
  1771. .endr
  1772. END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
  1773. #endif
  1774. lwz r7,VCPU_VRSAVE(r4)
  1775. mtspr SPRN_VRSAVE,r7
  1776. blr