book3s_hv_rmhandlers.S 44 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869
  1. /*
  2. * This program is free software; you can redistribute it and/or modify
  3. * it under the terms of the GNU General Public License, version 2, as
  4. * published by the Free Software Foundation.
  5. *
  6. * This program is distributed in the hope that it will be useful,
  7. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  8. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  9. * GNU General Public License for more details.
  10. *
  11. * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
  12. *
  13. * Derived from book3s_rmhandlers.S and other files, which are:
  14. *
  15. * Copyright SUSE Linux Products GmbH 2009
  16. *
  17. * Authors: Alexander Graf <agraf@suse.de>
  18. */
  19. #include <asm/ppc_asm.h>
  20. #include <asm/kvm_asm.h>
  21. #include <asm/reg.h>
  22. #include <asm/mmu.h>
  23. #include <asm/page.h>
  24. #include <asm/ptrace.h>
  25. #include <asm/hvcall.h>
  26. #include <asm/asm-offsets.h>
  27. #include <asm/exception-64s.h>
  28. #include <asm/kvm_book3s_asm.h>
  29. #include <asm/mmu-hash64.h>
  30. /*****************************************************************************
  31. * *
  32. * Real Mode handlers that need to be in the linear mapping *
  33. * *
  34. ****************************************************************************/
  35. .globl kvmppc_skip_interrupt
  36. kvmppc_skip_interrupt:
  37. mfspr r13,SPRN_SRR0
  38. addi r13,r13,4
  39. mtspr SPRN_SRR0,r13
  40. GET_SCRATCH0(r13)
  41. rfid
  42. b .
  43. .globl kvmppc_skip_Hinterrupt
  44. kvmppc_skip_Hinterrupt:
  45. mfspr r13,SPRN_HSRR0
  46. addi r13,r13,4
  47. mtspr SPRN_HSRR0,r13
  48. GET_SCRATCH0(r13)
  49. hrfid
  50. b .
  51. /*
  52. * Call kvmppc_hv_entry in real mode.
  53. * Must be called with interrupts hard-disabled.
  54. *
  55. * Input Registers:
  56. *
  57. * LR = return address to continue at after eventually re-enabling MMU
  58. */
  59. _GLOBAL(kvmppc_hv_entry_trampoline)
  60. mfmsr r10
  61. LOAD_REG_ADDR(r5, kvmppc_hv_entry)
  62. li r0,MSR_RI
  63. andc r0,r10,r0
  64. li r6,MSR_IR | MSR_DR
  65. andc r6,r10,r6
  66. mtmsrd r0,1 /* clear RI in MSR */
  67. mtsrr0 r5
  68. mtsrr1 r6
  69. RFI
  70. /******************************************************************************
  71. * *
  72. * Entry code *
  73. * *
  74. *****************************************************************************/
  75. /*
  76. * We come in here when wakened from nap mode on a secondary hw thread.
  77. * Relocation is off and most register values are lost.
  78. * r13 points to the PACA.
  79. */
  80. .globl kvm_start_guest
  81. kvm_start_guest:
  82. ld r1,PACAEMERGSP(r13)
  83. subi r1,r1,STACK_FRAME_OVERHEAD
  84. ld r2,PACATOC(r13)
  85. li r0,KVM_HWTHREAD_IN_KVM
  86. stb r0,HSTATE_HWTHREAD_STATE(r13)
  87. /* NV GPR values from power7_idle() will no longer be valid */
  88. li r0,1
  89. stb r0,PACA_NAPSTATELOST(r13)
  90. /* were we napping due to cede? */
  91. lbz r0,HSTATE_NAPPING(r13)
  92. cmpwi r0,0
  93. bne kvm_end_cede
  94. /*
  95. * We weren't napping due to cede, so this must be a secondary
  96. * thread being woken up to run a guest, or being woken up due
  97. * to a stray IPI. (Or due to some machine check or hypervisor
  98. * maintenance interrupt while the core is in KVM.)
  99. */
  100. /* Check the wake reason in SRR1 to see why we got here */
  101. mfspr r3,SPRN_SRR1
  102. rlwinm r3,r3,44-31,0x7 /* extract wake reason field */
  103. cmpwi r3,4 /* was it an external interrupt? */
  104. bne 27f /* if not */
  105. ld r5,HSTATE_XICS_PHYS(r13)
  106. li r7,XICS_XIRR /* if it was an external interrupt, */
  107. lwzcix r8,r5,r7 /* get and ack the interrupt */
  108. sync
  109. clrldi. r9,r8,40 /* get interrupt source ID. */
  110. beq 28f /* none there? */
  111. cmpwi r9,XICS_IPI /* was it an IPI? */
  112. bne 29f
  113. li r0,0xff
  114. li r6,XICS_MFRR
  115. stbcix r0,r5,r6 /* clear IPI */
  116. stwcix r8,r5,r7 /* EOI the interrupt */
  117. sync /* order loading of vcpu after that */
  118. /* get vcpu pointer, NULL if we have no vcpu to run */
  119. ld r4,HSTATE_KVM_VCPU(r13)
  120. cmpdi r4,0
  121. /* if we have no vcpu to run, go back to sleep */
  122. beq kvm_no_guest
  123. b kvmppc_hv_entry
  124. 27: /* XXX should handle hypervisor maintenance interrupts etc. here */
  125. b kvm_no_guest
  126. 28: /* SRR1 said external but ICP said nope?? */
  127. b kvm_no_guest
  128. 29: /* External non-IPI interrupt to offline secondary thread? help?? */
  129. stw r8,HSTATE_SAVED_XIRR(r13)
  130. b kvm_no_guest
  131. .global kvmppc_hv_entry
  132. kvmppc_hv_entry:
  133. /* Required state:
  134. *
  135. * R4 = vcpu pointer
  136. * MSR = ~IR|DR
  137. * R13 = PACA
  138. * R1 = host R1
  139. * all other volatile GPRS = free
  140. */
  141. mflr r0
  142. std r0, HSTATE_VMHANDLER(r13)
  143. /* Set partition DABR */
  144. /* Do this before re-enabling PMU to avoid P7 DABR corruption bug */
  145. li r5,3
  146. ld r6,VCPU_DABR(r4)
  147. mtspr SPRN_DABRX,r5
  148. mtspr SPRN_DABR,r6
  149. BEGIN_FTR_SECTION
  150. isync
  151. END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
  152. /* Load guest PMU registers */
  153. /* R4 is live here (vcpu pointer) */
  154. li r3, 1
  155. sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */
  156. mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */
  157. isync
  158. lwz r3, VCPU_PMC(r4) /* always load up guest PMU registers */
  159. lwz r5, VCPU_PMC + 4(r4) /* to prevent information leak */
  160. lwz r6, VCPU_PMC + 8(r4)
  161. lwz r7, VCPU_PMC + 12(r4)
  162. lwz r8, VCPU_PMC + 16(r4)
  163. lwz r9, VCPU_PMC + 20(r4)
  164. BEGIN_FTR_SECTION
  165. lwz r10, VCPU_PMC + 24(r4)
  166. lwz r11, VCPU_PMC + 28(r4)
  167. END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
  168. mtspr SPRN_PMC1, r3
  169. mtspr SPRN_PMC2, r5
  170. mtspr SPRN_PMC3, r6
  171. mtspr SPRN_PMC4, r7
  172. mtspr SPRN_PMC5, r8
  173. mtspr SPRN_PMC6, r9
  174. BEGIN_FTR_SECTION
  175. mtspr SPRN_PMC7, r10
  176. mtspr SPRN_PMC8, r11
  177. END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
  178. ld r3, VCPU_MMCR(r4)
  179. ld r5, VCPU_MMCR + 8(r4)
  180. ld r6, VCPU_MMCR + 16(r4)
  181. mtspr SPRN_MMCR1, r5
  182. mtspr SPRN_MMCRA, r6
  183. mtspr SPRN_MMCR0, r3
  184. isync
  185. /* Load up FP, VMX and VSX registers */
  186. bl kvmppc_load_fp
  187. ld r14, VCPU_GPR(R14)(r4)
  188. ld r15, VCPU_GPR(R15)(r4)
  189. ld r16, VCPU_GPR(R16)(r4)
  190. ld r17, VCPU_GPR(R17)(r4)
  191. ld r18, VCPU_GPR(R18)(r4)
  192. ld r19, VCPU_GPR(R19)(r4)
  193. ld r20, VCPU_GPR(R20)(r4)
  194. ld r21, VCPU_GPR(R21)(r4)
  195. ld r22, VCPU_GPR(R22)(r4)
  196. ld r23, VCPU_GPR(R23)(r4)
  197. ld r24, VCPU_GPR(R24)(r4)
  198. ld r25, VCPU_GPR(R25)(r4)
  199. ld r26, VCPU_GPR(R26)(r4)
  200. ld r27, VCPU_GPR(R27)(r4)
  201. ld r28, VCPU_GPR(R28)(r4)
  202. ld r29, VCPU_GPR(R29)(r4)
  203. ld r30, VCPU_GPR(R30)(r4)
  204. ld r31, VCPU_GPR(R31)(r4)
  205. BEGIN_FTR_SECTION
  206. /* Switch DSCR to guest value */
  207. ld r5, VCPU_DSCR(r4)
  208. mtspr SPRN_DSCR, r5
  209. END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
  210. /*
  211. * Set the decrementer to the guest decrementer.
  212. */
  213. ld r8,VCPU_DEC_EXPIRES(r4)
  214. mftb r7
  215. subf r3,r7,r8
  216. mtspr SPRN_DEC,r3
  217. stw r3,VCPU_DEC(r4)
  218. ld r5, VCPU_SPRG0(r4)
  219. ld r6, VCPU_SPRG1(r4)
  220. ld r7, VCPU_SPRG2(r4)
  221. ld r8, VCPU_SPRG3(r4)
  222. mtspr SPRN_SPRG0, r5
  223. mtspr SPRN_SPRG1, r6
  224. mtspr SPRN_SPRG2, r7
  225. mtspr SPRN_SPRG3, r8
  226. /* Save R1 in the PACA */
  227. std r1, HSTATE_HOST_R1(r13)
  228. /* Increment yield count if they have a VPA */
  229. ld r3, VCPU_VPA(r4)
  230. cmpdi r3, 0
  231. beq 25f
  232. lwz r5, LPPACA_YIELDCOUNT(r3)
  233. addi r5, r5, 1
  234. stw r5, LPPACA_YIELDCOUNT(r3)
  235. li r6, 1
  236. stb r6, VCPU_VPA_DIRTY(r4)
  237. 25:
  238. /* Load up DAR and DSISR */
  239. ld r5, VCPU_DAR(r4)
  240. lwz r6, VCPU_DSISR(r4)
  241. mtspr SPRN_DAR, r5
  242. mtspr SPRN_DSISR, r6
  243. BEGIN_FTR_SECTION
  244. /* Restore AMR and UAMOR, set AMOR to all 1s */
  245. ld r5,VCPU_AMR(r4)
  246. ld r6,VCPU_UAMOR(r4)
  247. li r7,-1
  248. mtspr SPRN_AMR,r5
  249. mtspr SPRN_UAMOR,r6
  250. mtspr SPRN_AMOR,r7
  251. END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
  252. /* Clear out SLB */
  253. li r6,0
  254. slbmte r6,r6
  255. slbia
  256. ptesync
  257. BEGIN_FTR_SECTION
  258. b 30f
  259. END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
  260. /*
  261. * POWER7 host -> guest partition switch code.
  262. * We don't have to lock against concurrent tlbies,
  263. * but we do have to coordinate across hardware threads.
  264. */
  265. /* Increment entry count iff exit count is zero. */
  266. ld r5,HSTATE_KVM_VCORE(r13)
  267. addi r9,r5,VCORE_ENTRY_EXIT
  268. 21: lwarx r3,0,r9
  269. cmpwi r3,0x100 /* any threads starting to exit? */
  270. bge secondary_too_late /* if so we're too late to the party */
  271. addi r3,r3,1
  272. stwcx. r3,0,r9
  273. bne 21b
  274. /* Primary thread switches to guest partition. */
  275. ld r9,VCPU_KVM(r4) /* pointer to struct kvm */
  276. lwz r6,VCPU_PTID(r4)
  277. cmpwi r6,0
  278. bne 20f
  279. ld r6,KVM_SDR1(r9)
  280. lwz r7,KVM_LPID(r9)
  281. li r0,LPID_RSVD /* switch to reserved LPID */
  282. mtspr SPRN_LPID,r0
  283. ptesync
  284. mtspr SPRN_SDR1,r6 /* switch to partition page table */
  285. mtspr SPRN_LPID,r7
  286. isync
  287. /* See if we need to flush the TLB */
  288. lhz r6,PACAPACAINDEX(r13) /* test_bit(cpu, need_tlb_flush) */
  289. clrldi r7,r6,64-6 /* extract bit number (6 bits) */
  290. srdi r6,r6,6 /* doubleword number */
  291. sldi r6,r6,3 /* address offset */
  292. add r6,r6,r9
  293. addi r6,r6,KVM_NEED_FLUSH /* dword in kvm->arch.need_tlb_flush */
  294. li r0,1
  295. sld r0,r0,r7
  296. ld r7,0(r6)
  297. and. r7,r7,r0
  298. beq 22f
  299. 23: ldarx r7,0,r6 /* if set, clear the bit */
  300. andc r7,r7,r0
  301. stdcx. r7,0,r6
  302. bne 23b
  303. li r6,128 /* and flush the TLB */
  304. mtctr r6
  305. li r7,0x800 /* IS field = 0b10 */
  306. ptesync
  307. 28: tlbiel r7
  308. addi r7,r7,0x1000
  309. bdnz 28b
  310. ptesync
  311. 22: li r0,1
  312. stb r0,VCORE_IN_GUEST(r5) /* signal secondaries to continue */
  313. b 10f
  314. /* Secondary threads wait for primary to have done partition switch */
  315. 20: lbz r0,VCORE_IN_GUEST(r5)
  316. cmpwi r0,0
  317. beq 20b
  318. /* Set LPCR and RMOR. */
  319. 10: ld r8,KVM_LPCR(r9)
  320. mtspr SPRN_LPCR,r8
  321. ld r8,KVM_RMOR(r9)
  322. mtspr SPRN_RMOR,r8
  323. isync
  324. /* Check if HDEC expires soon */
  325. mfspr r3,SPRN_HDEC
  326. cmpwi r3,10
  327. li r12,BOOK3S_INTERRUPT_HV_DECREMENTER
  328. mr r9,r4
  329. blt hdec_soon
  330. /* Save purr/spurr */
  331. mfspr r5,SPRN_PURR
  332. mfspr r6,SPRN_SPURR
  333. std r5,HSTATE_PURR(r13)
  334. std r6,HSTATE_SPURR(r13)
  335. ld r7,VCPU_PURR(r4)
  336. ld r8,VCPU_SPURR(r4)
  337. mtspr SPRN_PURR,r7
  338. mtspr SPRN_SPURR,r8
  339. b 31f
  340. /*
  341. * PPC970 host -> guest partition switch code.
  342. * We have to lock against concurrent tlbies,
  343. * using native_tlbie_lock to lock against host tlbies
  344. * and kvm->arch.tlbie_lock to lock against guest tlbies.
  345. * We also have to invalidate the TLB since its
  346. * entries aren't tagged with the LPID.
  347. */
  348. 30: ld r9,VCPU_KVM(r4) /* pointer to struct kvm */
  349. /* first take native_tlbie_lock */
  350. .section ".toc","aw"
  351. toc_tlbie_lock:
  352. .tc native_tlbie_lock[TC],native_tlbie_lock
  353. .previous
  354. ld r3,toc_tlbie_lock@toc(2)
  355. lwz r8,PACA_LOCK_TOKEN(r13)
  356. 24: lwarx r0,0,r3
  357. cmpwi r0,0
  358. bne 24b
  359. stwcx. r8,0,r3
  360. bne 24b
  361. isync
  362. ld r7,KVM_LPCR(r9) /* use kvm->arch.lpcr to store HID4 */
  363. li r0,0x18f
  364. rotldi r0,r0,HID4_LPID5_SH /* all lpid bits in HID4 = 1 */
  365. or r0,r7,r0
  366. ptesync
  367. sync
  368. mtspr SPRN_HID4,r0 /* switch to reserved LPID */
  369. isync
  370. li r0,0
  371. stw r0,0(r3) /* drop native_tlbie_lock */
  372. /* invalidate the whole TLB */
  373. li r0,256
  374. mtctr r0
  375. li r6,0
  376. 25: tlbiel r6
  377. addi r6,r6,0x1000
  378. bdnz 25b
  379. ptesync
  380. /* Take the guest's tlbie_lock */
  381. addi r3,r9,KVM_TLBIE_LOCK
  382. 24: lwarx r0,0,r3
  383. cmpwi r0,0
  384. bne 24b
  385. stwcx. r8,0,r3
  386. bne 24b
  387. isync
  388. ld r6,KVM_SDR1(r9)
  389. mtspr SPRN_SDR1,r6 /* switch to partition page table */
  390. /* Set up HID4 with the guest's LPID etc. */
  391. sync
  392. mtspr SPRN_HID4,r7
  393. isync
  394. /* drop the guest's tlbie_lock */
  395. li r0,0
  396. stw r0,0(r3)
  397. /* Check if HDEC expires soon */
  398. mfspr r3,SPRN_HDEC
  399. cmpwi r3,10
  400. li r12,BOOK3S_INTERRUPT_HV_DECREMENTER
  401. mr r9,r4
  402. blt hdec_soon
  403. /* Enable HDEC interrupts */
  404. mfspr r0,SPRN_HID0
  405. li r3,1
  406. rldimi r0,r3, HID0_HDICE_SH, 64-HID0_HDICE_SH-1
  407. sync
  408. mtspr SPRN_HID0,r0
  409. mfspr r0,SPRN_HID0
  410. mfspr r0,SPRN_HID0
  411. mfspr r0,SPRN_HID0
  412. mfspr r0,SPRN_HID0
  413. mfspr r0,SPRN_HID0
  414. mfspr r0,SPRN_HID0
  415. /* Load up guest SLB entries */
  416. 31: lwz r5,VCPU_SLB_MAX(r4)
  417. cmpwi r5,0
  418. beq 9f
  419. mtctr r5
  420. addi r6,r4,VCPU_SLB
  421. 1: ld r8,VCPU_SLB_E(r6)
  422. ld r9,VCPU_SLB_V(r6)
  423. slbmte r9,r8
  424. addi r6,r6,VCPU_SLB_SIZE
  425. bdnz 1b
  426. 9:
  427. /* Restore state of CTRL run bit; assume 1 on entry */
  428. lwz r5,VCPU_CTRL(r4)
  429. andi. r5,r5,1
  430. bne 4f
  431. mfspr r6,SPRN_CTRLF
  432. clrrdi r6,r6,1
  433. mtspr SPRN_CTRLT,r6
  434. 4:
  435. ld r6, VCPU_CTR(r4)
  436. lwz r7, VCPU_XER(r4)
  437. mtctr r6
  438. mtxer r7
  439. ld r10, VCPU_PC(r4)
  440. ld r11, VCPU_MSR(r4)
  441. kvmppc_cede_reentry: /* r4 = vcpu, r13 = paca */
  442. ld r6, VCPU_SRR0(r4)
  443. ld r7, VCPU_SRR1(r4)
  444. /* r11 = vcpu->arch.msr & ~MSR_HV */
  445. rldicl r11, r11, 63 - MSR_HV_LG, 1
  446. rotldi r11, r11, 1 + MSR_HV_LG
  447. ori r11, r11, MSR_ME
  448. /* Check if we can deliver an external or decrementer interrupt now */
  449. ld r0,VCPU_PENDING_EXC(r4)
  450. lis r8,(1 << BOOK3S_IRQPRIO_EXTERNAL_LEVEL)@h
  451. and r0,r0,r8
  452. cmpdi cr1,r0,0
  453. andi. r0,r11,MSR_EE
  454. beq cr1,11f
  455. BEGIN_FTR_SECTION
  456. mfspr r8,SPRN_LPCR
  457. ori r8,r8,LPCR_MER
  458. mtspr SPRN_LPCR,r8
  459. isync
  460. END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
  461. beq 5f
  462. li r0,BOOK3S_INTERRUPT_EXTERNAL
  463. 12: mr r6,r10
  464. mr r10,r0
  465. mr r7,r11
  466. li r11,(MSR_ME << 1) | 1 /* synthesize MSR_SF | MSR_ME */
  467. rotldi r11,r11,63
  468. b 5f
  469. 11: beq 5f
  470. mfspr r0,SPRN_DEC
  471. cmpwi r0,0
  472. li r0,BOOK3S_INTERRUPT_DECREMENTER
  473. blt 12b
  474. /* Move SRR0 and SRR1 into the respective regs */
  475. 5: mtspr SPRN_SRR0, r6
  476. mtspr SPRN_SRR1, r7
  477. fast_guest_return:
  478. li r0,0
  479. stb r0,VCPU_CEDED(r4) /* cancel cede */
  480. mtspr SPRN_HSRR0,r10
  481. mtspr SPRN_HSRR1,r11
  482. /* Activate guest mode, so faults get handled by KVM */
  483. li r9, KVM_GUEST_MODE_GUEST
  484. stb r9, HSTATE_IN_GUEST(r13)
  485. /* Enter guest */
  486. BEGIN_FTR_SECTION
  487. ld r5, VCPU_CFAR(r4)
  488. mtspr SPRN_CFAR, r5
  489. END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
  490. ld r5, VCPU_LR(r4)
  491. lwz r6, VCPU_CR(r4)
  492. mtlr r5
  493. mtcr r6
  494. ld r0, VCPU_GPR(R0)(r4)
  495. ld r1, VCPU_GPR(R1)(r4)
  496. ld r2, VCPU_GPR(R2)(r4)
  497. ld r3, VCPU_GPR(R3)(r4)
  498. ld r5, VCPU_GPR(R5)(r4)
  499. ld r6, VCPU_GPR(R6)(r4)
  500. ld r7, VCPU_GPR(R7)(r4)
  501. ld r8, VCPU_GPR(R8)(r4)
  502. ld r9, VCPU_GPR(R9)(r4)
  503. ld r10, VCPU_GPR(R10)(r4)
  504. ld r11, VCPU_GPR(R11)(r4)
  505. ld r12, VCPU_GPR(R12)(r4)
  506. ld r13, VCPU_GPR(R13)(r4)
  507. ld r4, VCPU_GPR(R4)(r4)
  508. hrfid
  509. b .
  510. /******************************************************************************
  511. * *
  512. * Exit code *
  513. * *
  514. *****************************************************************************/
  515. /*
  516. * We come here from the first-level interrupt handlers.
  517. */
  518. .globl kvmppc_interrupt
  519. kvmppc_interrupt:
  520. /*
  521. * Register contents:
  522. * R12 = interrupt vector
  523. * R13 = PACA
  524. * guest CR, R12 saved in shadow VCPU SCRATCH1/0
  525. * guest R13 saved in SPRN_SCRATCH0
  526. */
  527. /* abuse host_r2 as third scratch area; we get r2 from PACATOC(r13) */
  528. std r9, HSTATE_HOST_R2(r13)
  529. ld r9, HSTATE_KVM_VCPU(r13)
  530. /* Save registers */
  531. std r0, VCPU_GPR(R0)(r9)
  532. std r1, VCPU_GPR(R1)(r9)
  533. std r2, VCPU_GPR(R2)(r9)
  534. std r3, VCPU_GPR(R3)(r9)
  535. std r4, VCPU_GPR(R4)(r9)
  536. std r5, VCPU_GPR(R5)(r9)
  537. std r6, VCPU_GPR(R6)(r9)
  538. std r7, VCPU_GPR(R7)(r9)
  539. std r8, VCPU_GPR(R8)(r9)
  540. ld r0, HSTATE_HOST_R2(r13)
  541. std r0, VCPU_GPR(R9)(r9)
  542. std r10, VCPU_GPR(R10)(r9)
  543. std r11, VCPU_GPR(R11)(r9)
  544. ld r3, HSTATE_SCRATCH0(r13)
  545. lwz r4, HSTATE_SCRATCH1(r13)
  546. std r3, VCPU_GPR(R12)(r9)
  547. stw r4, VCPU_CR(r9)
  548. BEGIN_FTR_SECTION
  549. ld r3, HSTATE_CFAR(r13)
  550. std r3, VCPU_CFAR(r9)
  551. END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
  552. /* Restore R1/R2 so we can handle faults */
  553. ld r1, HSTATE_HOST_R1(r13)
  554. ld r2, PACATOC(r13)
  555. mfspr r10, SPRN_SRR0
  556. mfspr r11, SPRN_SRR1
  557. std r10, VCPU_SRR0(r9)
  558. std r11, VCPU_SRR1(r9)
  559. andi. r0, r12, 2 /* need to read HSRR0/1? */
  560. beq 1f
  561. mfspr r10, SPRN_HSRR0
  562. mfspr r11, SPRN_HSRR1
  563. clrrdi r12, r12, 2
  564. 1: std r10, VCPU_PC(r9)
  565. std r11, VCPU_MSR(r9)
  566. GET_SCRATCH0(r3)
  567. mflr r4
  568. std r3, VCPU_GPR(R13)(r9)
  569. std r4, VCPU_LR(r9)
  570. /* Unset guest mode */
  571. li r0, KVM_GUEST_MODE_NONE
  572. stb r0, HSTATE_IN_GUEST(r13)
  573. stw r12,VCPU_TRAP(r9)
  574. /* Save HEIR (HV emulation assist reg) in last_inst
  575. if this is an HEI (HV emulation interrupt, e40) */
  576. li r3,KVM_INST_FETCH_FAILED
  577. BEGIN_FTR_SECTION
  578. cmpwi r12,BOOK3S_INTERRUPT_H_EMUL_ASSIST
  579. bne 11f
  580. mfspr r3,SPRN_HEIR
  581. END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
  582. 11: stw r3,VCPU_LAST_INST(r9)
  583. /* these are volatile across C function calls */
  584. mfctr r3
  585. mfxer r4
  586. std r3, VCPU_CTR(r9)
  587. stw r4, VCPU_XER(r9)
  588. BEGIN_FTR_SECTION
  589. /* If this is a page table miss then see if it's theirs or ours */
  590. cmpwi r12, BOOK3S_INTERRUPT_H_DATA_STORAGE
  591. beq kvmppc_hdsi
  592. cmpwi r12, BOOK3S_INTERRUPT_H_INST_STORAGE
  593. beq kvmppc_hisi
  594. END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
  595. /* See if this is a leftover HDEC interrupt */
  596. cmpwi r12,BOOK3S_INTERRUPT_HV_DECREMENTER
  597. bne 2f
  598. mfspr r3,SPRN_HDEC
  599. cmpwi r3,0
  600. bge ignore_hdec
  601. 2:
  602. /* See if this is an hcall we can handle in real mode */
  603. cmpwi r12,BOOK3S_INTERRUPT_SYSCALL
  604. beq hcall_try_real_mode
  605. /* Only handle external interrupts here on arch 206 and later */
  606. BEGIN_FTR_SECTION
  607. b ext_interrupt_to_host
  608. END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206)
  609. /* External interrupt ? */
  610. cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL
  611. bne+ ext_interrupt_to_host
  612. /* External interrupt, first check for host_ipi. If this is
  613. * set, we know the host wants us out so let's do it now
  614. */
  615. do_ext_interrupt:
  616. lbz r0, HSTATE_HOST_IPI(r13)
  617. cmpwi r0, 0
  618. bne ext_interrupt_to_host
  619. /* Now read the interrupt from the ICP */
  620. ld r5, HSTATE_XICS_PHYS(r13)
  621. li r7, XICS_XIRR
  622. cmpdi r5, 0
  623. beq- ext_interrupt_to_host
  624. lwzcix r3, r5, r7
  625. rlwinm. r0, r3, 0, 0xffffff
  626. sync
  627. beq 3f /* if nothing pending in the ICP */
  628. /* We found something in the ICP...
  629. *
  630. * If it's not an IPI, stash it in the PACA and return to
  631. * the host, we don't (yet) handle directing real external
  632. * interrupts directly to the guest
  633. */
  634. cmpwi r0, XICS_IPI
  635. bne ext_stash_for_host
  636. /* It's an IPI, clear the MFRR and EOI it */
  637. li r0, 0xff
  638. li r6, XICS_MFRR
  639. stbcix r0, r5, r6 /* clear the IPI */
  640. stwcix r3, r5, r7 /* EOI it */
  641. sync
  642. /* We need to re-check host IPI now in case it got set in the
  643. * meantime. If it's clear, we bounce the interrupt to the
  644. * guest
  645. */
  646. lbz r0, HSTATE_HOST_IPI(r13)
  647. cmpwi r0, 0
  648. bne- 1f
  649. /* Allright, looks like an IPI for the guest, we need to set MER */
  650. 3:
  651. /* Check if any CPU is heading out to the host, if so head out too */
  652. ld r5, HSTATE_KVM_VCORE(r13)
  653. lwz r0, VCORE_ENTRY_EXIT(r5)
  654. cmpwi r0, 0x100
  655. bge ext_interrupt_to_host
  656. /* See if there is a pending interrupt for the guest */
  657. mfspr r8, SPRN_LPCR
  658. ld r0, VCPU_PENDING_EXC(r9)
  659. /* Insert EXTERNAL_LEVEL bit into LPCR at the MER bit position */
  660. rldicl. r0, r0, 64 - BOOK3S_IRQPRIO_EXTERNAL_LEVEL, 63
  661. rldimi r8, r0, LPCR_MER_SH, 63 - LPCR_MER_SH
  662. beq 2f
  663. /* And if the guest EE is set, we can deliver immediately, else
  664. * we return to the guest with MER set
  665. */
  666. andi. r0, r11, MSR_EE
  667. beq 2f
  668. mtspr SPRN_SRR0, r10
  669. mtspr SPRN_SRR1, r11
  670. li r10, BOOK3S_INTERRUPT_EXTERNAL
  671. li r11, (MSR_ME << 1) | 1 /* synthesize MSR_SF | MSR_ME */
  672. rotldi r11, r11, 63
  673. 2: mr r4, r9
  674. mtspr SPRN_LPCR, r8
  675. b fast_guest_return
  676. /* We raced with the host, we need to resend that IPI, bummer */
  677. 1: li r0, IPI_PRIORITY
  678. stbcix r0, r5, r6 /* set the IPI */
  679. sync
  680. b ext_interrupt_to_host
  681. ext_stash_for_host:
  682. /* It's not an IPI and it's for the host, stash it in the PACA
  683. * before exit, it will be picked up by the host ICP driver
  684. */
  685. stw r3, HSTATE_SAVED_XIRR(r13)
  686. ext_interrupt_to_host:
  687. guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */
  688. /* Save DEC */
  689. mfspr r5,SPRN_DEC
  690. mftb r6
  691. extsw r5,r5
  692. add r5,r5,r6
  693. std r5,VCPU_DEC_EXPIRES(r9)
  694. /* Save more register state */
  695. mfdar r6
  696. mfdsisr r7
  697. std r6, VCPU_DAR(r9)
  698. stw r7, VCPU_DSISR(r9)
  699. BEGIN_FTR_SECTION
  700. /* don't overwrite fault_dar/fault_dsisr if HDSI */
  701. cmpwi r12,BOOK3S_INTERRUPT_H_DATA_STORAGE
  702. beq 6f
  703. END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
  704. std r6, VCPU_FAULT_DAR(r9)
  705. stw r7, VCPU_FAULT_DSISR(r9)
  706. /* See if it is a machine check */
  707. cmpwi r12, BOOK3S_INTERRUPT_MACHINE_CHECK
  708. beq machine_check_realmode
  709. mc_cont:
  710. /* Save guest CTRL register, set runlatch to 1 */
  711. 6: mfspr r6,SPRN_CTRLF
  712. stw r6,VCPU_CTRL(r9)
  713. andi. r0,r6,1
  714. bne 4f
  715. ori r6,r6,1
  716. mtspr SPRN_CTRLT,r6
  717. 4:
  718. /* Read the guest SLB and save it away */
  719. lwz r0,VCPU_SLB_NR(r9) /* number of entries in SLB */
  720. mtctr r0
  721. li r6,0
  722. addi r7,r9,VCPU_SLB
  723. li r5,0
  724. 1: slbmfee r8,r6
  725. andis. r0,r8,SLB_ESID_V@h
  726. beq 2f
  727. add r8,r8,r6 /* put index in */
  728. slbmfev r3,r6
  729. std r8,VCPU_SLB_E(r7)
  730. std r3,VCPU_SLB_V(r7)
  731. addi r7,r7,VCPU_SLB_SIZE
  732. addi r5,r5,1
  733. 2: addi r6,r6,1
  734. bdnz 1b
  735. stw r5,VCPU_SLB_MAX(r9)
  736. /*
  737. * Save the guest PURR/SPURR
  738. */
  739. BEGIN_FTR_SECTION
  740. mfspr r5,SPRN_PURR
  741. mfspr r6,SPRN_SPURR
  742. ld r7,VCPU_PURR(r9)
  743. ld r8,VCPU_SPURR(r9)
  744. std r5,VCPU_PURR(r9)
  745. std r6,VCPU_SPURR(r9)
  746. subf r5,r7,r5
  747. subf r6,r8,r6
  748. /*
  749. * Restore host PURR/SPURR and add guest times
  750. * so that the time in the guest gets accounted.
  751. */
  752. ld r3,HSTATE_PURR(r13)
  753. ld r4,HSTATE_SPURR(r13)
  754. add r3,r3,r5
  755. add r4,r4,r6
  756. mtspr SPRN_PURR,r3
  757. mtspr SPRN_SPURR,r4
  758. END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_201)
  759. /* Clear out SLB */
  760. li r5,0
  761. slbmte r5,r5
  762. slbia
  763. ptesync
  764. hdec_soon: /* r9 = vcpu, r12 = trap, r13 = paca */
  765. BEGIN_FTR_SECTION
  766. b 32f
  767. END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
  768. /*
  769. * POWER7 guest -> host partition switch code.
  770. * We don't have to lock against tlbies but we do
  771. * have to coordinate the hardware threads.
  772. */
  773. /* Increment the threads-exiting-guest count in the 0xff00
  774. bits of vcore->entry_exit_count */
  775. lwsync
  776. ld r5,HSTATE_KVM_VCORE(r13)
  777. addi r6,r5,VCORE_ENTRY_EXIT
  778. 41: lwarx r3,0,r6
  779. addi r0,r3,0x100
  780. stwcx. r0,0,r6
  781. bne 41b
  782. lwsync
  783. /*
  784. * At this point we have an interrupt that we have to pass
  785. * up to the kernel or qemu; we can't handle it in real mode.
  786. * Thus we have to do a partition switch, so we have to
  787. * collect the other threads, if we are the first thread
  788. * to take an interrupt. To do this, we set the HDEC to 0,
  789. * which causes an HDEC interrupt in all threads within 2ns
  790. * because the HDEC register is shared between all 4 threads.
  791. * However, we don't need to bother if this is an HDEC
  792. * interrupt, since the other threads will already be on their
  793. * way here in that case.
  794. */
  795. cmpwi r3,0x100 /* Are we the first here? */
  796. bge 43f
  797. cmpwi r3,1 /* Are any other threads in the guest? */
  798. ble 43f
  799. cmpwi r12,BOOK3S_INTERRUPT_HV_DECREMENTER
  800. beq 40f
  801. li r0,0
  802. mtspr SPRN_HDEC,r0
  803. 40:
  804. /*
  805. * Send an IPI to any napping threads, since an HDEC interrupt
  806. * doesn't wake CPUs up from nap.
  807. */
  808. lwz r3,VCORE_NAPPING_THREADS(r5)
  809. lwz r4,VCPU_PTID(r9)
  810. li r0,1
  811. sld r0,r0,r4
  812. andc. r3,r3,r0 /* no sense IPI'ing ourselves */
  813. beq 43f
  814. mulli r4,r4,PACA_SIZE /* get paca for thread 0 */
  815. subf r6,r4,r13
  816. 42: andi. r0,r3,1
  817. beq 44f
  818. ld r8,HSTATE_XICS_PHYS(r6) /* get thread's XICS reg addr */
  819. li r0,IPI_PRIORITY
  820. li r7,XICS_MFRR
  821. stbcix r0,r7,r8 /* trigger the IPI */
  822. 44: srdi. r3,r3,1
  823. addi r6,r6,PACA_SIZE
  824. bne 42b
  825. /* Secondary threads wait for primary to do partition switch */
  826. 43: ld r4,VCPU_KVM(r9) /* pointer to struct kvm */
  827. ld r5,HSTATE_KVM_VCORE(r13)
  828. lwz r3,VCPU_PTID(r9)
  829. cmpwi r3,0
  830. beq 15f
  831. HMT_LOW
  832. 13: lbz r3,VCORE_IN_GUEST(r5)
  833. cmpwi r3,0
  834. bne 13b
  835. HMT_MEDIUM
  836. b 16f
  837. /* Primary thread waits for all the secondaries to exit guest */
  838. 15: lwz r3,VCORE_ENTRY_EXIT(r5)
  839. srwi r0,r3,8
  840. clrldi r3,r3,56
  841. cmpw r3,r0
  842. bne 15b
  843. isync
  844. /* Primary thread switches back to host partition */
  845. ld r6,KVM_HOST_SDR1(r4)
  846. lwz r7,KVM_HOST_LPID(r4)
  847. li r8,LPID_RSVD /* switch to reserved LPID */
  848. mtspr SPRN_LPID,r8
  849. ptesync
  850. mtspr SPRN_SDR1,r6 /* switch to partition page table */
  851. mtspr SPRN_LPID,r7
  852. isync
  853. li r0,0
  854. stb r0,VCORE_IN_GUEST(r5)
  855. lis r8,0x7fff /* MAX_INT@h */
  856. mtspr SPRN_HDEC,r8
  857. 16: ld r8,KVM_HOST_LPCR(r4)
  858. mtspr SPRN_LPCR,r8
  859. isync
  860. b 33f
  861. /*
  862. * PPC970 guest -> host partition switch code.
  863. * We have to lock against concurrent tlbies, and
  864. * we have to flush the whole TLB.
  865. */
  866. 32: ld r4,VCPU_KVM(r9) /* pointer to struct kvm */
  867. /* Take the guest's tlbie_lock */
  868. lwz r8,PACA_LOCK_TOKEN(r13)
  869. addi r3,r4,KVM_TLBIE_LOCK
  870. 24: lwarx r0,0,r3
  871. cmpwi r0,0
  872. bne 24b
  873. stwcx. r8,0,r3
  874. bne 24b
  875. isync
  876. ld r7,KVM_HOST_LPCR(r4) /* use kvm->arch.host_lpcr for HID4 */
  877. li r0,0x18f
  878. rotldi r0,r0,HID4_LPID5_SH /* all lpid bits in HID4 = 1 */
  879. or r0,r7,r0
  880. ptesync
  881. sync
  882. mtspr SPRN_HID4,r0 /* switch to reserved LPID */
  883. isync
  884. li r0,0
  885. stw r0,0(r3) /* drop guest tlbie_lock */
  886. /* invalidate the whole TLB */
  887. li r0,256
  888. mtctr r0
  889. li r6,0
  890. 25: tlbiel r6
  891. addi r6,r6,0x1000
  892. bdnz 25b
  893. ptesync
  894. /* take native_tlbie_lock */
  895. ld r3,toc_tlbie_lock@toc(2)
  896. 24: lwarx r0,0,r3
  897. cmpwi r0,0
  898. bne 24b
  899. stwcx. r8,0,r3
  900. bne 24b
  901. isync
  902. ld r6,KVM_HOST_SDR1(r4)
  903. mtspr SPRN_SDR1,r6 /* switch to host page table */
  904. /* Set up host HID4 value */
  905. sync
  906. mtspr SPRN_HID4,r7
  907. isync
  908. li r0,0
  909. stw r0,0(r3) /* drop native_tlbie_lock */
  910. lis r8,0x7fff /* MAX_INT@h */
  911. mtspr SPRN_HDEC,r8
  912. /* Disable HDEC interrupts */
  913. mfspr r0,SPRN_HID0
  914. li r3,0
  915. rldimi r0,r3, HID0_HDICE_SH, 64-HID0_HDICE_SH-1
  916. sync
  917. mtspr SPRN_HID0,r0
  918. mfspr r0,SPRN_HID0
  919. mfspr r0,SPRN_HID0
  920. mfspr r0,SPRN_HID0
  921. mfspr r0,SPRN_HID0
  922. mfspr r0,SPRN_HID0
  923. mfspr r0,SPRN_HID0
  924. /* load host SLB entries */
  925. 33: ld r8,PACA_SLBSHADOWPTR(r13)
  926. .rept SLB_NUM_BOLTED
  927. ld r5,SLBSHADOW_SAVEAREA(r8)
  928. ld r6,SLBSHADOW_SAVEAREA+8(r8)
  929. andis. r7,r5,SLB_ESID_V@h
  930. beq 1f
  931. slbmte r6,r5
  932. 1: addi r8,r8,16
  933. .endr
  934. /* Save and reset AMR and UAMOR before turning on the MMU */
  935. BEGIN_FTR_SECTION
  936. mfspr r5,SPRN_AMR
  937. mfspr r6,SPRN_UAMOR
  938. std r5,VCPU_AMR(r9)
  939. std r6,VCPU_UAMOR(r9)
  940. li r6,0
  941. mtspr SPRN_AMR,r6
  942. END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
  943. /* Switch DSCR back to host value */
  944. BEGIN_FTR_SECTION
  945. mfspr r8, SPRN_DSCR
  946. ld r7, HSTATE_DSCR(r13)
  947. std r8, VCPU_DSCR(r7)
  948. mtspr SPRN_DSCR, r7
  949. END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
  950. /* Save non-volatile GPRs */
  951. std r14, VCPU_GPR(R14)(r9)
  952. std r15, VCPU_GPR(R15)(r9)
  953. std r16, VCPU_GPR(R16)(r9)
  954. std r17, VCPU_GPR(R17)(r9)
  955. std r18, VCPU_GPR(R18)(r9)
  956. std r19, VCPU_GPR(R19)(r9)
  957. std r20, VCPU_GPR(R20)(r9)
  958. std r21, VCPU_GPR(R21)(r9)
  959. std r22, VCPU_GPR(R22)(r9)
  960. std r23, VCPU_GPR(R23)(r9)
  961. std r24, VCPU_GPR(R24)(r9)
  962. std r25, VCPU_GPR(R25)(r9)
  963. std r26, VCPU_GPR(R26)(r9)
  964. std r27, VCPU_GPR(R27)(r9)
  965. std r28, VCPU_GPR(R28)(r9)
  966. std r29, VCPU_GPR(R29)(r9)
  967. std r30, VCPU_GPR(R30)(r9)
  968. std r31, VCPU_GPR(R31)(r9)
  969. /* Save SPRGs */
  970. mfspr r3, SPRN_SPRG0
  971. mfspr r4, SPRN_SPRG1
  972. mfspr r5, SPRN_SPRG2
  973. mfspr r6, SPRN_SPRG3
  974. std r3, VCPU_SPRG0(r9)
  975. std r4, VCPU_SPRG1(r9)
  976. std r5, VCPU_SPRG2(r9)
  977. std r6, VCPU_SPRG3(r9)
  978. /* save FP state */
  979. mr r3, r9
  980. bl .kvmppc_save_fp
  981. /* Increment yield count if they have a VPA */
  982. ld r8, VCPU_VPA(r9) /* do they have a VPA? */
  983. cmpdi r8, 0
  984. beq 25f
  985. lwz r3, LPPACA_YIELDCOUNT(r8)
  986. addi r3, r3, 1
  987. stw r3, LPPACA_YIELDCOUNT(r8)
  988. li r3, 1
  989. stb r3, VCPU_VPA_DIRTY(r9)
  990. 25:
  991. /* Save PMU registers if requested */
  992. /* r8 and cr0.eq are live here */
  993. li r3, 1
  994. sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */
  995. mfspr r4, SPRN_MMCR0 /* save MMCR0 */
  996. mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */
  997. mfspr r6, SPRN_MMCRA
  998. BEGIN_FTR_SECTION
  999. /* On P7, clear MMCRA in order to disable SDAR updates */
  1000. li r7, 0
  1001. mtspr SPRN_MMCRA, r7
  1002. END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
  1003. isync
  1004. beq 21f /* if no VPA, save PMU stuff anyway */
  1005. lbz r7, LPPACA_PMCINUSE(r8)
  1006. cmpwi r7, 0 /* did they ask for PMU stuff to be saved? */
  1007. bne 21f
  1008. std r3, VCPU_MMCR(r9) /* if not, set saved MMCR0 to FC */
  1009. b 22f
  1010. 21: mfspr r5, SPRN_MMCR1
  1011. std r4, VCPU_MMCR(r9)
  1012. std r5, VCPU_MMCR + 8(r9)
  1013. std r6, VCPU_MMCR + 16(r9)
  1014. mfspr r3, SPRN_PMC1
  1015. mfspr r4, SPRN_PMC2
  1016. mfspr r5, SPRN_PMC3
  1017. mfspr r6, SPRN_PMC4
  1018. mfspr r7, SPRN_PMC5
  1019. mfspr r8, SPRN_PMC6
  1020. BEGIN_FTR_SECTION
  1021. mfspr r10, SPRN_PMC7
  1022. mfspr r11, SPRN_PMC8
  1023. END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
  1024. stw r3, VCPU_PMC(r9)
  1025. stw r4, VCPU_PMC + 4(r9)
  1026. stw r5, VCPU_PMC + 8(r9)
  1027. stw r6, VCPU_PMC + 12(r9)
  1028. stw r7, VCPU_PMC + 16(r9)
  1029. stw r8, VCPU_PMC + 20(r9)
  1030. BEGIN_FTR_SECTION
  1031. stw r10, VCPU_PMC + 24(r9)
  1032. stw r11, VCPU_PMC + 28(r9)
  1033. END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
  1034. 22:
  1035. /* Secondary threads go off to take a nap on POWER7 */
  1036. BEGIN_FTR_SECTION
  1037. lwz r0,VCPU_PTID(r9)
  1038. cmpwi r0,0
  1039. bne secondary_nap
  1040. END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
  1041. /* Restore host DABR and DABRX */
  1042. ld r5,HSTATE_DABR(r13)
  1043. li r6,7
  1044. mtspr SPRN_DABR,r5
  1045. mtspr SPRN_DABRX,r6
  1046. /* Restore SPRG3 */
  1047. ld r3,PACA_SPRG3(r13)
  1048. mtspr SPRN_SPRG3,r3
  1049. /*
  1050. * Reload DEC. HDEC interrupts were disabled when
  1051. * we reloaded the host's LPCR value.
  1052. */
  1053. ld r3, HSTATE_DECEXP(r13)
  1054. mftb r4
  1055. subf r4, r4, r3
  1056. mtspr SPRN_DEC, r4
  1057. /* Reload the host's PMU registers */
  1058. ld r3, PACALPPACAPTR(r13) /* is the host using the PMU? */
  1059. lbz r4, LPPACA_PMCINUSE(r3)
  1060. cmpwi r4, 0
  1061. beq 23f /* skip if not */
  1062. lwz r3, HSTATE_PMC(r13)
  1063. lwz r4, HSTATE_PMC + 4(r13)
  1064. lwz r5, HSTATE_PMC + 8(r13)
  1065. lwz r6, HSTATE_PMC + 12(r13)
  1066. lwz r8, HSTATE_PMC + 16(r13)
  1067. lwz r9, HSTATE_PMC + 20(r13)
  1068. BEGIN_FTR_SECTION
  1069. lwz r10, HSTATE_PMC + 24(r13)
  1070. lwz r11, HSTATE_PMC + 28(r13)
  1071. END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
  1072. mtspr SPRN_PMC1, r3
  1073. mtspr SPRN_PMC2, r4
  1074. mtspr SPRN_PMC3, r5
  1075. mtspr SPRN_PMC4, r6
  1076. mtspr SPRN_PMC5, r8
  1077. mtspr SPRN_PMC6, r9
  1078. BEGIN_FTR_SECTION
  1079. mtspr SPRN_PMC7, r10
  1080. mtspr SPRN_PMC8, r11
  1081. END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
  1082. ld r3, HSTATE_MMCR(r13)
  1083. ld r4, HSTATE_MMCR + 8(r13)
  1084. ld r5, HSTATE_MMCR + 16(r13)
  1085. mtspr SPRN_MMCR1, r4
  1086. mtspr SPRN_MMCRA, r5
  1087. mtspr SPRN_MMCR0, r3
  1088. isync
  1089. 23:
  1090. /*
  1091. * For external and machine check interrupts, we need
  1092. * to call the Linux handler to process the interrupt.
  1093. * We do that by jumping to absolute address 0x500 for
  1094. * external interrupts, or the machine_check_fwnmi label
  1095. * for machine checks (since firmware might have patched
  1096. * the vector area at 0x200). The [h]rfid at the end of the
  1097. * handler will return to the book3s_hv_interrupts.S code.
  1098. * For other interrupts we do the rfid to get back
  1099. * to the book3s_hv_interrupts.S code here.
  1100. */
  1101. ld r8, HSTATE_VMHANDLER(r13)
  1102. ld r7, HSTATE_HOST_MSR(r13)
  1103. cmpwi cr1, r12, BOOK3S_INTERRUPT_MACHINE_CHECK
  1104. cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL
  1105. BEGIN_FTR_SECTION
  1106. beq 11f
  1107. END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
  1108. /* RFI into the highmem handler, or branch to interrupt handler */
  1109. mfmsr r6
  1110. li r0, MSR_RI
  1111. andc r6, r6, r0
  1112. mtmsrd r6, 1 /* Clear RI in MSR */
  1113. mtsrr0 r8
  1114. mtsrr1 r7
  1115. beqa 0x500 /* external interrupt (PPC970) */
  1116. beq cr1, 13f /* machine check */
  1117. RFI
  1118. /* On POWER7, we have external interrupts set to use HSRR0/1 */
  1119. 11: mtspr SPRN_HSRR0, r8
  1120. mtspr SPRN_HSRR1, r7
  1121. ba 0x500
  1122. 13: b machine_check_fwnmi
  1123. /*
  1124. * Check whether an HDSI is an HPTE not found fault or something else.
  1125. * If it is an HPTE not found fault that is due to the guest accessing
  1126. * a page that they have mapped but which we have paged out, then
  1127. * we continue on with the guest exit path. In all other cases,
  1128. * reflect the HDSI to the guest as a DSI.
  1129. */
  1130. kvmppc_hdsi:
  1131. mfspr r4, SPRN_HDAR
  1132. mfspr r6, SPRN_HDSISR
  1133. /* HPTE not found fault or protection fault? */
  1134. andis. r0, r6, (DSISR_NOHPTE | DSISR_PROTFAULT)@h
  1135. beq 1f /* if not, send it to the guest */
  1136. andi. r0, r11, MSR_DR /* data relocation enabled? */
  1137. beq 3f
  1138. clrrdi r0, r4, 28
  1139. PPC_SLBFEE_DOT(R5, R0) /* if so, look up SLB */
  1140. bne 1f /* if no SLB entry found */
  1141. 4: std r4, VCPU_FAULT_DAR(r9)
  1142. stw r6, VCPU_FAULT_DSISR(r9)
  1143. /* Search the hash table. */
  1144. mr r3, r9 /* vcpu pointer */
  1145. li r7, 1 /* data fault */
  1146. bl .kvmppc_hpte_hv_fault
  1147. ld r9, HSTATE_KVM_VCPU(r13)
  1148. ld r10, VCPU_PC(r9)
  1149. ld r11, VCPU_MSR(r9)
  1150. li r12, BOOK3S_INTERRUPT_H_DATA_STORAGE
  1151. cmpdi r3, 0 /* retry the instruction */
  1152. beq 6f
  1153. cmpdi r3, -1 /* handle in kernel mode */
  1154. beq guest_exit_cont
  1155. cmpdi r3, -2 /* MMIO emulation; need instr word */
  1156. beq 2f
  1157. /* Synthesize a DSI for the guest */
  1158. ld r4, VCPU_FAULT_DAR(r9)
  1159. mr r6, r3
  1160. 1: mtspr SPRN_DAR, r4
  1161. mtspr SPRN_DSISR, r6
  1162. mtspr SPRN_SRR0, r10
  1163. mtspr SPRN_SRR1, r11
  1164. li r10, BOOK3S_INTERRUPT_DATA_STORAGE
  1165. li r11, (MSR_ME << 1) | 1 /* synthesize MSR_SF | MSR_ME */
  1166. rotldi r11, r11, 63
  1167. fast_interrupt_c_return:
  1168. 6: ld r7, VCPU_CTR(r9)
  1169. lwz r8, VCPU_XER(r9)
  1170. mtctr r7
  1171. mtxer r8
  1172. mr r4, r9
  1173. b fast_guest_return
  1174. 3: ld r5, VCPU_KVM(r9) /* not relocated, use VRMA */
  1175. ld r5, KVM_VRMA_SLB_V(r5)
  1176. b 4b
  1177. /* If this is for emulated MMIO, load the instruction word */
  1178. 2: li r8, KVM_INST_FETCH_FAILED /* In case lwz faults */
  1179. /* Set guest mode to 'jump over instruction' so if lwz faults
  1180. * we'll just continue at the next IP. */
  1181. li r0, KVM_GUEST_MODE_SKIP
  1182. stb r0, HSTATE_IN_GUEST(r13)
  1183. /* Do the access with MSR:DR enabled */
  1184. mfmsr r3
  1185. ori r4, r3, MSR_DR /* Enable paging for data */
  1186. mtmsrd r4
  1187. lwz r8, 0(r10)
  1188. mtmsrd r3
  1189. /* Store the result */
  1190. stw r8, VCPU_LAST_INST(r9)
  1191. /* Unset guest mode. */
  1192. li r0, KVM_GUEST_MODE_NONE
  1193. stb r0, HSTATE_IN_GUEST(r13)
  1194. b guest_exit_cont
  1195. /*
  1196. * Similarly for an HISI, reflect it to the guest as an ISI unless
  1197. * it is an HPTE not found fault for a page that we have paged out.
  1198. */
  1199. kvmppc_hisi:
  1200. andis. r0, r11, SRR1_ISI_NOPT@h
  1201. beq 1f
  1202. andi. r0, r11, MSR_IR /* instruction relocation enabled? */
  1203. beq 3f
  1204. clrrdi r0, r10, 28
  1205. PPC_SLBFEE_DOT(R5, R0) /* if so, look up SLB */
  1206. bne 1f /* if no SLB entry found */
  1207. 4:
  1208. /* Search the hash table. */
  1209. mr r3, r9 /* vcpu pointer */
  1210. mr r4, r10
  1211. mr r6, r11
  1212. li r7, 0 /* instruction fault */
  1213. bl .kvmppc_hpte_hv_fault
  1214. ld r9, HSTATE_KVM_VCPU(r13)
  1215. ld r10, VCPU_PC(r9)
  1216. ld r11, VCPU_MSR(r9)
  1217. li r12, BOOK3S_INTERRUPT_H_INST_STORAGE
  1218. cmpdi r3, 0 /* retry the instruction */
  1219. beq fast_interrupt_c_return
  1220. cmpdi r3, -1 /* handle in kernel mode */
  1221. beq guest_exit_cont
  1222. /* Synthesize an ISI for the guest */
  1223. mr r11, r3
  1224. 1: mtspr SPRN_SRR0, r10
  1225. mtspr SPRN_SRR1, r11
  1226. li r10, BOOK3S_INTERRUPT_INST_STORAGE
  1227. li r11, (MSR_ME << 1) | 1 /* synthesize MSR_SF | MSR_ME */
  1228. rotldi r11, r11, 63
  1229. b fast_interrupt_c_return
  1230. 3: ld r6, VCPU_KVM(r9) /* not relocated, use VRMA */
  1231. ld r5, KVM_VRMA_SLB_V(r6)
  1232. b 4b
  1233. /*
  1234. * Try to handle an hcall in real mode.
  1235. * Returns to the guest if we handle it, or continues on up to
  1236. * the kernel if we can't (i.e. if we don't have a handler for
  1237. * it, or if the handler returns H_TOO_HARD).
  1238. */
  1239. .globl hcall_try_real_mode
  1240. hcall_try_real_mode:
  1241. ld r3,VCPU_GPR(R3)(r9)
  1242. andi. r0,r11,MSR_PR
  1243. bne guest_exit_cont
  1244. clrrdi r3,r3,2
  1245. cmpldi r3,hcall_real_table_end - hcall_real_table
  1246. bge guest_exit_cont
  1247. LOAD_REG_ADDR(r4, hcall_real_table)
  1248. lwzx r3,r3,r4
  1249. cmpwi r3,0
  1250. beq guest_exit_cont
  1251. add r3,r3,r4
  1252. mtctr r3
  1253. mr r3,r9 /* get vcpu pointer */
  1254. ld r4,VCPU_GPR(R4)(r9)
  1255. bctrl
  1256. cmpdi r3,H_TOO_HARD
  1257. beq hcall_real_fallback
  1258. ld r4,HSTATE_KVM_VCPU(r13)
  1259. std r3,VCPU_GPR(R3)(r4)
  1260. ld r10,VCPU_PC(r4)
  1261. ld r11,VCPU_MSR(r4)
  1262. b fast_guest_return
  1263. /* We've attempted a real mode hcall, but it's punted it back
  1264. * to userspace. We need to restore some clobbered volatiles
  1265. * before resuming the pass-it-to-qemu path */
  1266. hcall_real_fallback:
  1267. li r12,BOOK3S_INTERRUPT_SYSCALL
  1268. ld r9, HSTATE_KVM_VCPU(r13)
  1269. b guest_exit_cont
  1270. .globl hcall_real_table
  1271. hcall_real_table:
  1272. .long 0 /* 0 - unused */
  1273. .long .kvmppc_h_remove - hcall_real_table
  1274. .long .kvmppc_h_enter - hcall_real_table
  1275. .long .kvmppc_h_read - hcall_real_table
  1276. .long 0 /* 0x10 - H_CLEAR_MOD */
  1277. .long 0 /* 0x14 - H_CLEAR_REF */
  1278. .long .kvmppc_h_protect - hcall_real_table
  1279. .long 0 /* 0x1c - H_GET_TCE */
  1280. .long .kvmppc_h_put_tce - hcall_real_table
  1281. .long 0 /* 0x24 - H_SET_SPRG0 */
  1282. .long .kvmppc_h_set_dabr - hcall_real_table
  1283. .long 0 /* 0x2c */
  1284. .long 0 /* 0x30 */
  1285. .long 0 /* 0x34 */
  1286. .long 0 /* 0x38 */
  1287. .long 0 /* 0x3c */
  1288. .long 0 /* 0x40 */
  1289. .long 0 /* 0x44 */
  1290. .long 0 /* 0x48 */
  1291. .long 0 /* 0x4c */
  1292. .long 0 /* 0x50 */
  1293. .long 0 /* 0x54 */
  1294. .long 0 /* 0x58 */
  1295. .long 0 /* 0x5c */
  1296. .long 0 /* 0x60 */
  1297. #ifdef CONFIG_KVM_XICS
  1298. .long .kvmppc_rm_h_eoi - hcall_real_table
  1299. .long .kvmppc_rm_h_cppr - hcall_real_table
  1300. .long .kvmppc_rm_h_ipi - hcall_real_table
  1301. .long 0 /* 0x70 - H_IPOLL */
  1302. .long .kvmppc_rm_h_xirr - hcall_real_table
  1303. #else
  1304. .long 0 /* 0x64 - H_EOI */
  1305. .long 0 /* 0x68 - H_CPPR */
  1306. .long 0 /* 0x6c - H_IPI */
  1307. .long 0 /* 0x70 - H_IPOLL */
  1308. .long 0 /* 0x74 - H_XIRR */
  1309. #endif
  1310. .long 0 /* 0x78 */
  1311. .long 0 /* 0x7c */
  1312. .long 0 /* 0x80 */
  1313. .long 0 /* 0x84 */
  1314. .long 0 /* 0x88 */
  1315. .long 0 /* 0x8c */
  1316. .long 0 /* 0x90 */
  1317. .long 0 /* 0x94 */
  1318. .long 0 /* 0x98 */
  1319. .long 0 /* 0x9c */
  1320. .long 0 /* 0xa0 */
  1321. .long 0 /* 0xa4 */
  1322. .long 0 /* 0xa8 */
  1323. .long 0 /* 0xac */
  1324. .long 0 /* 0xb0 */
  1325. .long 0 /* 0xb4 */
  1326. .long 0 /* 0xb8 */
  1327. .long 0 /* 0xbc */
  1328. .long 0 /* 0xc0 */
  1329. .long 0 /* 0xc4 */
  1330. .long 0 /* 0xc8 */
  1331. .long 0 /* 0xcc */
  1332. .long 0 /* 0xd0 */
  1333. .long 0 /* 0xd4 */
  1334. .long 0 /* 0xd8 */
  1335. .long 0 /* 0xdc */
  1336. .long .kvmppc_h_cede - hcall_real_table
  1337. .long 0 /* 0xe4 */
  1338. .long 0 /* 0xe8 */
  1339. .long 0 /* 0xec */
  1340. .long 0 /* 0xf0 */
  1341. .long 0 /* 0xf4 */
  1342. .long 0 /* 0xf8 */
  1343. .long 0 /* 0xfc */
  1344. .long 0 /* 0x100 */
  1345. .long 0 /* 0x104 */
  1346. .long 0 /* 0x108 */
  1347. .long 0 /* 0x10c */
  1348. .long 0 /* 0x110 */
  1349. .long 0 /* 0x114 */
  1350. .long 0 /* 0x118 */
  1351. .long 0 /* 0x11c */
  1352. .long 0 /* 0x120 */
  1353. .long .kvmppc_h_bulk_remove - hcall_real_table
  1354. hcall_real_table_end:
  1355. ignore_hdec:
  1356. mr r4,r9
  1357. b fast_guest_return
  1358. _GLOBAL(kvmppc_h_set_dabr)
  1359. std r4,VCPU_DABR(r3)
  1360. /* Work around P7 bug where DABR can get corrupted on mtspr */
  1361. 1: mtspr SPRN_DABR,r4
  1362. mfspr r5, SPRN_DABR
  1363. cmpd r4, r5
  1364. bne 1b
  1365. isync
  1366. li r3,0
  1367. blr
  1368. _GLOBAL(kvmppc_h_cede)
  1369. ori r11,r11,MSR_EE
  1370. std r11,VCPU_MSR(r3)
  1371. li r0,1
  1372. stb r0,VCPU_CEDED(r3)
  1373. sync /* order setting ceded vs. testing prodded */
  1374. lbz r5,VCPU_PRODDED(r3)
  1375. cmpwi r5,0
  1376. bne kvm_cede_prodded
  1377. li r0,0 /* set trap to 0 to say hcall is handled */
  1378. stw r0,VCPU_TRAP(r3)
  1379. li r0,H_SUCCESS
  1380. std r0,VCPU_GPR(R3)(r3)
  1381. BEGIN_FTR_SECTION
  1382. b kvm_cede_exit /* just send it up to host on 970 */
  1383. END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206)
  1384. /*
  1385. * Set our bit in the bitmask of napping threads unless all the
  1386. * other threads are already napping, in which case we send this
  1387. * up to the host.
  1388. */
  1389. ld r5,HSTATE_KVM_VCORE(r13)
  1390. lwz r6,VCPU_PTID(r3)
  1391. lwz r8,VCORE_ENTRY_EXIT(r5)
  1392. clrldi r8,r8,56
  1393. li r0,1
  1394. sld r0,r0,r6
  1395. addi r6,r5,VCORE_NAPPING_THREADS
  1396. 31: lwarx r4,0,r6
  1397. or r4,r4,r0
  1398. PPC_POPCNTW(R7,R4)
  1399. cmpw r7,r8
  1400. bge kvm_cede_exit
  1401. stwcx. r4,0,r6
  1402. bne 31b
  1403. li r0,1
  1404. stb r0,HSTATE_NAPPING(r13)
  1405. /* order napping_threads update vs testing entry_exit_count */
  1406. lwsync
  1407. mr r4,r3
  1408. lwz r7,VCORE_ENTRY_EXIT(r5)
  1409. cmpwi r7,0x100
  1410. bge 33f /* another thread already exiting */
  1411. /*
  1412. * Although not specifically required by the architecture, POWER7
  1413. * preserves the following registers in nap mode, even if an SMT mode
  1414. * switch occurs: SLB entries, PURR, SPURR, AMOR, UAMOR, AMR, SPRG0-3,
  1415. * DAR, DSISR, DABR, DABRX, DSCR, PMCx, MMCRx, SIAR, SDAR.
  1416. */
  1417. /* Save non-volatile GPRs */
  1418. std r14, VCPU_GPR(R14)(r3)
  1419. std r15, VCPU_GPR(R15)(r3)
  1420. std r16, VCPU_GPR(R16)(r3)
  1421. std r17, VCPU_GPR(R17)(r3)
  1422. std r18, VCPU_GPR(R18)(r3)
  1423. std r19, VCPU_GPR(R19)(r3)
  1424. std r20, VCPU_GPR(R20)(r3)
  1425. std r21, VCPU_GPR(R21)(r3)
  1426. std r22, VCPU_GPR(R22)(r3)
  1427. std r23, VCPU_GPR(R23)(r3)
  1428. std r24, VCPU_GPR(R24)(r3)
  1429. std r25, VCPU_GPR(R25)(r3)
  1430. std r26, VCPU_GPR(R26)(r3)
  1431. std r27, VCPU_GPR(R27)(r3)
  1432. std r28, VCPU_GPR(R28)(r3)
  1433. std r29, VCPU_GPR(R29)(r3)
  1434. std r30, VCPU_GPR(R30)(r3)
  1435. std r31, VCPU_GPR(R31)(r3)
  1436. /* save FP state */
  1437. bl .kvmppc_save_fp
  1438. /*
  1439. * Take a nap until a decrementer or external interrupt occurs,
  1440. * with PECE1 (wake on decr) and PECE0 (wake on external) set in LPCR
  1441. */
  1442. li r0,1
  1443. stb r0,HSTATE_HWTHREAD_REQ(r13)
  1444. mfspr r5,SPRN_LPCR
  1445. ori r5,r5,LPCR_PECE0 | LPCR_PECE1
  1446. mtspr SPRN_LPCR,r5
  1447. isync
  1448. li r0, 0
  1449. std r0, HSTATE_SCRATCH0(r13)
  1450. ptesync
  1451. ld r0, HSTATE_SCRATCH0(r13)
  1452. 1: cmpd r0, r0
  1453. bne 1b
  1454. nap
  1455. b .
  1456. kvm_end_cede:
  1457. /* get vcpu pointer */
  1458. ld r4, HSTATE_KVM_VCPU(r13)
  1459. /* Woken by external or decrementer interrupt */
  1460. ld r1, HSTATE_HOST_R1(r13)
  1461. /* load up FP state */
  1462. bl kvmppc_load_fp
  1463. /* Load NV GPRS */
  1464. ld r14, VCPU_GPR(R14)(r4)
  1465. ld r15, VCPU_GPR(R15)(r4)
  1466. ld r16, VCPU_GPR(R16)(r4)
  1467. ld r17, VCPU_GPR(R17)(r4)
  1468. ld r18, VCPU_GPR(R18)(r4)
  1469. ld r19, VCPU_GPR(R19)(r4)
  1470. ld r20, VCPU_GPR(R20)(r4)
  1471. ld r21, VCPU_GPR(R21)(r4)
  1472. ld r22, VCPU_GPR(R22)(r4)
  1473. ld r23, VCPU_GPR(R23)(r4)
  1474. ld r24, VCPU_GPR(R24)(r4)
  1475. ld r25, VCPU_GPR(R25)(r4)
  1476. ld r26, VCPU_GPR(R26)(r4)
  1477. ld r27, VCPU_GPR(R27)(r4)
  1478. ld r28, VCPU_GPR(R28)(r4)
  1479. ld r29, VCPU_GPR(R29)(r4)
  1480. ld r30, VCPU_GPR(R30)(r4)
  1481. ld r31, VCPU_GPR(R31)(r4)
  1482. /* clear our bit in vcore->napping_threads */
  1483. 33: ld r5,HSTATE_KVM_VCORE(r13)
  1484. lwz r3,VCPU_PTID(r4)
  1485. li r0,1
  1486. sld r0,r0,r3
  1487. addi r6,r5,VCORE_NAPPING_THREADS
  1488. 32: lwarx r7,0,r6
  1489. andc r7,r7,r0
  1490. stwcx. r7,0,r6
  1491. bne 32b
  1492. li r0,0
  1493. stb r0,HSTATE_NAPPING(r13)
  1494. /* Check the wake reason in SRR1 to see why we got here */
  1495. mfspr r3, SPRN_SRR1
  1496. rlwinm r3, r3, 44-31, 0x7 /* extract wake reason field */
  1497. cmpwi r3, 4 /* was it an external interrupt? */
  1498. li r12, BOOK3S_INTERRUPT_EXTERNAL
  1499. mr r9, r4
  1500. ld r10, VCPU_PC(r9)
  1501. ld r11, VCPU_MSR(r9)
  1502. beq do_ext_interrupt /* if so */
  1503. /* see if any other thread is already exiting */
  1504. lwz r0,VCORE_ENTRY_EXIT(r5)
  1505. cmpwi r0,0x100
  1506. blt kvmppc_cede_reentry /* if not go back to guest */
  1507. /* some threads are exiting, so go to the guest exit path */
  1508. b hcall_real_fallback
  1509. /* cede when already previously prodded case */
  1510. kvm_cede_prodded:
  1511. li r0,0
  1512. stb r0,VCPU_PRODDED(r3)
  1513. sync /* order testing prodded vs. clearing ceded */
  1514. stb r0,VCPU_CEDED(r3)
  1515. li r3,H_SUCCESS
  1516. blr
  1517. /* we've ceded but we want to give control to the host */
  1518. kvm_cede_exit:
  1519. b hcall_real_fallback
  1520. /* Try to handle a machine check in real mode */
  1521. machine_check_realmode:
  1522. mr r3, r9 /* get vcpu pointer */
  1523. bl .kvmppc_realmode_machine_check
  1524. nop
  1525. cmpdi r3, 0 /* continue exiting from guest? */
  1526. ld r9, HSTATE_KVM_VCPU(r13)
  1527. li r12, BOOK3S_INTERRUPT_MACHINE_CHECK
  1528. beq mc_cont
  1529. /* If not, deliver a machine check. SRR0/1 are already set */
  1530. li r10, BOOK3S_INTERRUPT_MACHINE_CHECK
  1531. li r11, (MSR_ME << 1) | 1 /* synthesize MSR_SF | MSR_ME */
  1532. rotldi r11, r11, 63
  1533. b fast_interrupt_c_return
  1534. secondary_too_late:
  1535. ld r5,HSTATE_KVM_VCORE(r13)
  1536. HMT_LOW
  1537. 13: lbz r3,VCORE_IN_GUEST(r5)
  1538. cmpwi r3,0
  1539. bne 13b
  1540. HMT_MEDIUM
  1541. ld r11,PACA_SLBSHADOWPTR(r13)
  1542. .rept SLB_NUM_BOLTED
  1543. ld r5,SLBSHADOW_SAVEAREA(r11)
  1544. ld r6,SLBSHADOW_SAVEAREA+8(r11)
  1545. andis. r7,r5,SLB_ESID_V@h
  1546. beq 1f
  1547. slbmte r6,r5
  1548. 1: addi r11,r11,16
  1549. .endr
  1550. secondary_nap:
  1551. /* Clear our vcpu pointer so we don't come back in early */
  1552. li r0, 0
  1553. std r0, HSTATE_KVM_VCPU(r13)
  1554. lwsync
  1555. /* Clear any pending IPI - assume we're a secondary thread */
  1556. ld r5, HSTATE_XICS_PHYS(r13)
  1557. li r7, XICS_XIRR
  1558. lwzcix r3, r5, r7 /* ack any pending interrupt */
  1559. rlwinm. r0, r3, 0, 0xffffff /* any pending? */
  1560. beq 37f
  1561. sync
  1562. li r0, 0xff
  1563. li r6, XICS_MFRR
  1564. stbcix r0, r5, r6 /* clear the IPI */
  1565. stwcix r3, r5, r7 /* EOI it */
  1566. 37: sync
  1567. /* increment the nap count and then go to nap mode */
  1568. ld r4, HSTATE_KVM_VCORE(r13)
  1569. addi r4, r4, VCORE_NAP_COUNT
  1570. lwsync /* make previous updates visible */
  1571. 51: lwarx r3, 0, r4
  1572. addi r3, r3, 1
  1573. stwcx. r3, 0, r4
  1574. bne 51b
  1575. kvm_no_guest:
  1576. li r0, KVM_HWTHREAD_IN_NAP
  1577. stb r0, HSTATE_HWTHREAD_STATE(r13)
  1578. li r3, LPCR_PECE0
  1579. mfspr r4, SPRN_LPCR
  1580. rlwimi r4, r3, 0, LPCR_PECE0 | LPCR_PECE1
  1581. mtspr SPRN_LPCR, r4
  1582. isync
  1583. std r0, HSTATE_SCRATCH0(r13)
  1584. ptesync
  1585. ld r0, HSTATE_SCRATCH0(r13)
  1586. 1: cmpd r0, r0
  1587. bne 1b
  1588. nap
  1589. b .
  1590. /*
  1591. * Save away FP, VMX and VSX registers.
  1592. * r3 = vcpu pointer
  1593. */
  1594. _GLOBAL(kvmppc_save_fp)
  1595. mfmsr r5
  1596. ori r8,r5,MSR_FP
  1597. #ifdef CONFIG_ALTIVEC
  1598. BEGIN_FTR_SECTION
  1599. oris r8,r8,MSR_VEC@h
  1600. END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
  1601. #endif
  1602. #ifdef CONFIG_VSX
  1603. BEGIN_FTR_SECTION
  1604. oris r8,r8,MSR_VSX@h
  1605. END_FTR_SECTION_IFSET(CPU_FTR_VSX)
  1606. #endif
  1607. mtmsrd r8
  1608. isync
  1609. #ifdef CONFIG_VSX
  1610. BEGIN_FTR_SECTION
  1611. reg = 0
  1612. .rept 32
  1613. li r6,reg*16+VCPU_VSRS
  1614. STXVD2X(reg,R6,R3)
  1615. reg = reg + 1
  1616. .endr
  1617. FTR_SECTION_ELSE
  1618. #endif
  1619. reg = 0
  1620. .rept 32
  1621. stfd reg,reg*8+VCPU_FPRS(r3)
  1622. reg = reg + 1
  1623. .endr
  1624. #ifdef CONFIG_VSX
  1625. ALT_FTR_SECTION_END_IFSET(CPU_FTR_VSX)
  1626. #endif
  1627. mffs fr0
  1628. stfd fr0,VCPU_FPSCR(r3)
  1629. #ifdef CONFIG_ALTIVEC
  1630. BEGIN_FTR_SECTION
  1631. reg = 0
  1632. .rept 32
  1633. li r6,reg*16+VCPU_VRS
  1634. stvx reg,r6,r3
  1635. reg = reg + 1
  1636. .endr
  1637. mfvscr vr0
  1638. li r6,VCPU_VSCR
  1639. stvx vr0,r6,r3
  1640. END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
  1641. #endif
  1642. mfspr r6,SPRN_VRSAVE
  1643. stw r6,VCPU_VRSAVE(r3)
  1644. mtmsrd r5
  1645. isync
  1646. blr
  1647. /*
  1648. * Load up FP, VMX and VSX registers
  1649. * r4 = vcpu pointer
  1650. */
  1651. .globl kvmppc_load_fp
  1652. kvmppc_load_fp:
  1653. mfmsr r9
  1654. ori r8,r9,MSR_FP
  1655. #ifdef CONFIG_ALTIVEC
  1656. BEGIN_FTR_SECTION
  1657. oris r8,r8,MSR_VEC@h
  1658. END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
  1659. #endif
  1660. #ifdef CONFIG_VSX
  1661. BEGIN_FTR_SECTION
  1662. oris r8,r8,MSR_VSX@h
  1663. END_FTR_SECTION_IFSET(CPU_FTR_VSX)
  1664. #endif
  1665. mtmsrd r8
  1666. isync
  1667. lfd fr0,VCPU_FPSCR(r4)
  1668. MTFSF_L(fr0)
  1669. #ifdef CONFIG_VSX
  1670. BEGIN_FTR_SECTION
  1671. reg = 0
  1672. .rept 32
  1673. li r7,reg*16+VCPU_VSRS
  1674. LXVD2X(reg,R7,R4)
  1675. reg = reg + 1
  1676. .endr
  1677. FTR_SECTION_ELSE
  1678. #endif
  1679. reg = 0
  1680. .rept 32
  1681. lfd reg,reg*8+VCPU_FPRS(r4)
  1682. reg = reg + 1
  1683. .endr
  1684. #ifdef CONFIG_VSX
  1685. ALT_FTR_SECTION_END_IFSET(CPU_FTR_VSX)
  1686. #endif
  1687. #ifdef CONFIG_ALTIVEC
  1688. BEGIN_FTR_SECTION
  1689. li r7,VCPU_VSCR
  1690. lvx vr0,r7,r4
  1691. mtvscr vr0
  1692. reg = 0
  1693. .rept 32
  1694. li r7,reg*16+VCPU_VRS
  1695. lvx reg,r7,r4
  1696. reg = reg + 1
  1697. .endr
  1698. END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
  1699. #endif
  1700. lwz r7,VCPU_VRSAVE(r4)
  1701. mtspr SPRN_VRSAVE,r7
  1702. blr