book3s_hv_rmhandlers.S 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806
  1. /*
  2. * This program is free software; you can redistribute it and/or modify
  3. * it under the terms of the GNU General Public License, version 2, as
  4. * published by the Free Software Foundation.
  5. *
  6. * This program is distributed in the hope that it will be useful,
  7. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  8. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  9. * GNU General Public License for more details.
  10. *
  11. * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
  12. *
  13. * Derived from book3s_rmhandlers.S and other files, which are:
  14. *
  15. * Copyright SUSE Linux Products GmbH 2009
  16. *
  17. * Authors: Alexander Graf <agraf@suse.de>
  18. */
  19. #include <asm/ppc_asm.h>
  20. #include <asm/kvm_asm.h>
  21. #include <asm/reg.h>
  22. #include <asm/page.h>
  23. #include <asm/asm-offsets.h>
  24. #include <asm/exception-64s.h>
  25. /*****************************************************************************
  26. * *
  27. * Real Mode handlers that need to be in the linear mapping *
  28. * *
  29. ****************************************************************************/
  30. #define SHADOW_VCPU_OFF PACA_KVM_SVCPU
  31. .globl kvmppc_skip_interrupt
  32. kvmppc_skip_interrupt:
  33. mfspr r13,SPRN_SRR0
  34. addi r13,r13,4
  35. mtspr SPRN_SRR0,r13
  36. GET_SCRATCH0(r13)
  37. rfid
  38. b .
  39. .globl kvmppc_skip_Hinterrupt
  40. kvmppc_skip_Hinterrupt:
  41. mfspr r13,SPRN_HSRR0
  42. addi r13,r13,4
  43. mtspr SPRN_HSRR0,r13
  44. GET_SCRATCH0(r13)
  45. hrfid
  46. b .
  47. /*
  48. * Call kvmppc_handler_trampoline_enter in real mode.
  49. * Must be called with interrupts hard-disabled.
  50. *
  51. * Input Registers:
  52. *
  53. * LR = return address to continue at after eventually re-enabling MMU
  54. */
  55. _GLOBAL(kvmppc_hv_entry_trampoline)
  56. mfmsr r10
  57. LOAD_REG_ADDR(r5, kvmppc_hv_entry)
  58. li r0,MSR_RI
  59. andc r0,r10,r0
  60. li r6,MSR_IR | MSR_DR
  61. andc r6,r10,r6
  62. mtmsrd r0,1 /* clear RI in MSR */
  63. mtsrr0 r5
  64. mtsrr1 r6
  65. RFI
  66. #define ULONG_SIZE 8
  67. #define VCPU_GPR(n) (VCPU_GPRS + (n * ULONG_SIZE))
  68. /******************************************************************************
  69. * *
  70. * Entry code *
  71. * *
  72. *****************************************************************************/
  73. .global kvmppc_hv_entry
  74. kvmppc_hv_entry:
  75. /* Required state:
  76. *
  77. * R4 = vcpu pointer
  78. * MSR = ~IR|DR
  79. * R13 = PACA
  80. * R1 = host R1
  81. * all other volatile GPRS = free
  82. */
  83. mflr r0
  84. std r0, HSTATE_VMHANDLER(r13)
  85. ld r14, VCPU_GPR(r14)(r4)
  86. ld r15, VCPU_GPR(r15)(r4)
  87. ld r16, VCPU_GPR(r16)(r4)
  88. ld r17, VCPU_GPR(r17)(r4)
  89. ld r18, VCPU_GPR(r18)(r4)
  90. ld r19, VCPU_GPR(r19)(r4)
  91. ld r20, VCPU_GPR(r20)(r4)
  92. ld r21, VCPU_GPR(r21)(r4)
  93. ld r22, VCPU_GPR(r22)(r4)
  94. ld r23, VCPU_GPR(r23)(r4)
  95. ld r24, VCPU_GPR(r24)(r4)
  96. ld r25, VCPU_GPR(r25)(r4)
  97. ld r26, VCPU_GPR(r26)(r4)
  98. ld r27, VCPU_GPR(r27)(r4)
  99. ld r28, VCPU_GPR(r28)(r4)
  100. ld r29, VCPU_GPR(r29)(r4)
  101. ld r30, VCPU_GPR(r30)(r4)
  102. ld r31, VCPU_GPR(r31)(r4)
  103. /* Load guest PMU registers */
  104. /* R4 is live here (vcpu pointer) */
  105. li r3, 1
  106. sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */
  107. mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */
  108. isync
  109. lwz r3, VCPU_PMC(r4) /* always load up guest PMU registers */
  110. lwz r5, VCPU_PMC + 4(r4) /* to prevent information leak */
  111. lwz r6, VCPU_PMC + 8(r4)
  112. lwz r7, VCPU_PMC + 12(r4)
  113. lwz r8, VCPU_PMC + 16(r4)
  114. lwz r9, VCPU_PMC + 20(r4)
  115. mtspr SPRN_PMC1, r3
  116. mtspr SPRN_PMC2, r5
  117. mtspr SPRN_PMC3, r6
  118. mtspr SPRN_PMC4, r7
  119. mtspr SPRN_PMC5, r8
  120. mtspr SPRN_PMC6, r9
  121. ld r3, VCPU_MMCR(r4)
  122. ld r5, VCPU_MMCR + 8(r4)
  123. ld r6, VCPU_MMCR + 16(r4)
  124. mtspr SPRN_MMCR1, r5
  125. mtspr SPRN_MMCRA, r6
  126. mtspr SPRN_MMCR0, r3
  127. isync
  128. /* Load up FP, VMX and VSX registers */
  129. bl kvmppc_load_fp
  130. /* Switch DSCR to guest value */
  131. ld r5, VCPU_DSCR(r4)
  132. mtspr SPRN_DSCR, r5
  133. /*
  134. * Set the decrementer to the guest decrementer.
  135. */
  136. ld r8,VCPU_DEC_EXPIRES(r4)
  137. mftb r7
  138. subf r3,r7,r8
  139. mtspr SPRN_DEC,r3
  140. stw r3,VCPU_DEC(r4)
  141. ld r5, VCPU_SPRG0(r4)
  142. ld r6, VCPU_SPRG1(r4)
  143. ld r7, VCPU_SPRG2(r4)
  144. ld r8, VCPU_SPRG3(r4)
  145. mtspr SPRN_SPRG0, r5
  146. mtspr SPRN_SPRG1, r6
  147. mtspr SPRN_SPRG2, r7
  148. mtspr SPRN_SPRG3, r8
  149. /* Save R1 in the PACA */
  150. std r1, HSTATE_HOST_R1(r13)
  151. /* Load up DAR and DSISR */
  152. ld r5, VCPU_DAR(r4)
  153. lwz r6, VCPU_DSISR(r4)
  154. mtspr SPRN_DAR, r5
  155. mtspr SPRN_DSISR, r6
  156. /* Set partition DABR */
  157. li r5,3
  158. ld r6,VCPU_DABR(r4)
  159. mtspr SPRN_DABRX,r5
  160. mtspr SPRN_DABR,r6
  161. /* Restore AMR and UAMOR, set AMOR to all 1s */
  162. ld r5,VCPU_AMR(r4)
  163. ld r6,VCPU_UAMOR(r4)
  164. li r7,-1
  165. mtspr SPRN_AMR,r5
  166. mtspr SPRN_UAMOR,r6
  167. mtspr SPRN_AMOR,r7
  168. /* Clear out SLB */
  169. li r6,0
  170. slbmte r6,r6
  171. slbia
  172. ptesync
  173. /* Switch to guest partition. */
  174. ld r9,VCPU_KVM(r4) /* pointer to struct kvm */
  175. ld r6,KVM_SDR1(r9)
  176. lwz r7,KVM_LPID(r9)
  177. li r0,LPID_RSVD /* switch to reserved LPID */
  178. mtspr SPRN_LPID,r0
  179. ptesync
  180. mtspr SPRN_SDR1,r6 /* switch to partition page table */
  181. mtspr SPRN_LPID,r7
  182. isync
  183. ld r8,VCPU_LPCR(r4)
  184. mtspr SPRN_LPCR,r8
  185. isync
  186. /* Check if HDEC expires soon */
  187. mfspr r3,SPRN_HDEC
  188. cmpwi r3,10
  189. li r12,BOOK3S_INTERRUPT_HV_DECREMENTER
  190. mr r9,r4
  191. blt hdec_soon
  192. /*
  193. * Invalidate the TLB if we could possibly have stale TLB
  194. * entries for this partition on this core due to the use
  195. * of tlbiel.
  196. */
  197. ld r9,VCPU_KVM(r4) /* pointer to struct kvm */
  198. lwz r5,VCPU_VCPUID(r4)
  199. lhz r6,PACAPACAINDEX(r13)
  200. lhz r8,VCPU_LAST_CPU(r4)
  201. sldi r7,r6,1 /* see if this is the same vcpu */
  202. add r7,r7,r9 /* as last ran on this pcpu */
  203. lhz r0,KVM_LAST_VCPU(r7)
  204. cmpw r6,r8 /* on the same cpu core as last time? */
  205. bne 3f
  206. cmpw r0,r5 /* same vcpu as this core last ran? */
  207. beq 1f
  208. 3: sth r6,VCPU_LAST_CPU(r4) /* if not, invalidate partition TLB */
  209. sth r5,KVM_LAST_VCPU(r7)
  210. li r6,128
  211. mtctr r6
  212. li r7,0x800 /* IS field = 0b10 */
  213. ptesync
  214. 2: tlbiel r7
  215. addi r7,r7,0x1000
  216. bdnz 2b
  217. ptesync
  218. 1:
  219. /* Save purr/spurr */
  220. mfspr r5,SPRN_PURR
  221. mfspr r6,SPRN_SPURR
  222. std r5,HSTATE_PURR(r13)
  223. std r6,HSTATE_SPURR(r13)
  224. ld r7,VCPU_PURR(r4)
  225. ld r8,VCPU_SPURR(r4)
  226. mtspr SPRN_PURR,r7
  227. mtspr SPRN_SPURR,r8
  228. /* Load up guest SLB entries */
  229. lwz r5,VCPU_SLB_MAX(r4)
  230. cmpwi r5,0
  231. beq 9f
  232. mtctr r5
  233. addi r6,r4,VCPU_SLB
  234. 1: ld r8,VCPU_SLB_E(r6)
  235. ld r9,VCPU_SLB_V(r6)
  236. slbmte r9,r8
  237. addi r6,r6,VCPU_SLB_SIZE
  238. bdnz 1b
  239. 9:
  240. /* Restore state of CTRL run bit; assume 1 on entry */
  241. lwz r5,VCPU_CTRL(r4)
  242. andi. r5,r5,1
  243. bne 4f
  244. mfspr r6,SPRN_CTRLF
  245. clrrdi r6,r6,1
  246. mtspr SPRN_CTRLT,r6
  247. 4:
  248. ld r6, VCPU_CTR(r4)
  249. lwz r7, VCPU_XER(r4)
  250. mtctr r6
  251. mtxer r7
  252. /* Move SRR0 and SRR1 into the respective regs */
  253. ld r6, VCPU_SRR0(r4)
  254. ld r7, VCPU_SRR1(r4)
  255. mtspr SPRN_SRR0, r6
  256. mtspr SPRN_SRR1, r7
  257. ld r10, VCPU_PC(r4)
  258. ld r11, VCPU_MSR(r4) /* r10 = vcpu->arch.msr & ~MSR_HV */
  259. rldicl r11, r11, 63 - MSR_HV_LG, 1
  260. rotldi r11, r11, 1 + MSR_HV_LG
  261. ori r11, r11, MSR_ME
  262. fast_guest_return:
  263. mtspr SPRN_HSRR0,r10
  264. mtspr SPRN_HSRR1,r11
  265. /* Activate guest mode, so faults get handled by KVM */
  266. li r9, KVM_GUEST_MODE_GUEST
  267. stb r9, HSTATE_IN_GUEST(r13)
  268. /* Enter guest */
  269. ld r5, VCPU_LR(r4)
  270. lwz r6, VCPU_CR(r4)
  271. mtlr r5
  272. mtcr r6
  273. ld r0, VCPU_GPR(r0)(r4)
  274. ld r1, VCPU_GPR(r1)(r4)
  275. ld r2, VCPU_GPR(r2)(r4)
  276. ld r3, VCPU_GPR(r3)(r4)
  277. ld r5, VCPU_GPR(r5)(r4)
  278. ld r6, VCPU_GPR(r6)(r4)
  279. ld r7, VCPU_GPR(r7)(r4)
  280. ld r8, VCPU_GPR(r8)(r4)
  281. ld r9, VCPU_GPR(r9)(r4)
  282. ld r10, VCPU_GPR(r10)(r4)
  283. ld r11, VCPU_GPR(r11)(r4)
  284. ld r12, VCPU_GPR(r12)(r4)
  285. ld r13, VCPU_GPR(r13)(r4)
  286. ld r4, VCPU_GPR(r4)(r4)
  287. hrfid
  288. b .
  289. /******************************************************************************
  290. * *
  291. * Exit code *
  292. * *
  293. *****************************************************************************/
  294. /*
  295. * We come here from the first-level interrupt handlers.
  296. */
  297. .globl kvmppc_interrupt
  298. kvmppc_interrupt:
  299. /*
  300. * Register contents:
  301. * R12 = interrupt vector
  302. * R13 = PACA
  303. * guest CR, R12 saved in shadow VCPU SCRATCH1/0
  304. * guest R13 saved in SPRN_SCRATCH0
  305. */
  306. /* abuse host_r2 as third scratch area; we get r2 from PACATOC(r13) */
  307. std r9, HSTATE_HOST_R2(r13)
  308. ld r9, HSTATE_KVM_VCPU(r13)
  309. /* Save registers */
  310. std r0, VCPU_GPR(r0)(r9)
  311. std r1, VCPU_GPR(r1)(r9)
  312. std r2, VCPU_GPR(r2)(r9)
  313. std r3, VCPU_GPR(r3)(r9)
  314. std r4, VCPU_GPR(r4)(r9)
  315. std r5, VCPU_GPR(r5)(r9)
  316. std r6, VCPU_GPR(r6)(r9)
  317. std r7, VCPU_GPR(r7)(r9)
  318. std r8, VCPU_GPR(r8)(r9)
  319. ld r0, HSTATE_HOST_R2(r13)
  320. std r0, VCPU_GPR(r9)(r9)
  321. std r10, VCPU_GPR(r10)(r9)
  322. std r11, VCPU_GPR(r11)(r9)
  323. ld r3, HSTATE_SCRATCH0(r13)
  324. lwz r4, HSTATE_SCRATCH1(r13)
  325. std r3, VCPU_GPR(r12)(r9)
  326. stw r4, VCPU_CR(r9)
  327. /* Restore R1/R2 so we can handle faults */
  328. ld r1, HSTATE_HOST_R1(r13)
  329. ld r2, PACATOC(r13)
  330. mfspr r10, SPRN_SRR0
  331. mfspr r11, SPRN_SRR1
  332. std r10, VCPU_SRR0(r9)
  333. std r11, VCPU_SRR1(r9)
  334. andi. r0, r12, 2 /* need to read HSRR0/1? */
  335. beq 1f
  336. mfspr r10, SPRN_HSRR0
  337. mfspr r11, SPRN_HSRR1
  338. clrrdi r12, r12, 2
  339. 1: std r10, VCPU_PC(r9)
  340. std r11, VCPU_MSR(r9)
  341. GET_SCRATCH0(r3)
  342. mflr r4
  343. std r3, VCPU_GPR(r13)(r9)
  344. std r4, VCPU_LR(r9)
  345. /* Unset guest mode */
  346. li r0, KVM_GUEST_MODE_NONE
  347. stb r0, HSTATE_IN_GUEST(r13)
  348. stw r12,VCPU_TRAP(r9)
  349. /* See if this is a leftover HDEC interrupt */
  350. cmpwi r12,BOOK3S_INTERRUPT_HV_DECREMENTER
  351. bne 2f
  352. mfspr r3,SPRN_HDEC
  353. cmpwi r3,0
  354. bge ignore_hdec
  355. 2:
  356. /* Check for mediated interrupts (could be done earlier really ...) */
  357. cmpwi r12,BOOK3S_INTERRUPT_EXTERNAL
  358. bne+ 1f
  359. ld r5,VCPU_LPCR(r9)
  360. andi. r0,r11,MSR_EE
  361. beq 1f
  362. andi. r0,r5,LPCR_MER
  363. bne bounce_ext_interrupt
  364. 1:
  365. /* Save DEC */
  366. mfspr r5,SPRN_DEC
  367. mftb r6
  368. extsw r5,r5
  369. add r5,r5,r6
  370. std r5,VCPU_DEC_EXPIRES(r9)
  371. /* Save HEIR (HV emulation assist reg) in last_inst
  372. if this is an HEI (HV emulation interrupt, e40) */
  373. li r3,-1
  374. cmpwi r12,BOOK3S_INTERRUPT_H_EMUL_ASSIST
  375. bne 11f
  376. mfspr r3,SPRN_HEIR
  377. 11: stw r3,VCPU_LAST_INST(r9)
  378. /* Save more register state */
  379. mfxer r5
  380. mfdar r6
  381. mfdsisr r7
  382. mfctr r8
  383. stw r5, VCPU_XER(r9)
  384. std r6, VCPU_DAR(r9)
  385. stw r7, VCPU_DSISR(r9)
  386. std r8, VCPU_CTR(r9)
  387. /* grab HDAR & HDSISR if HV data storage interrupt (HDSI) */
  388. cmpwi r12,BOOK3S_INTERRUPT_H_DATA_STORAGE
  389. beq 6f
  390. 7: std r6, VCPU_FAULT_DAR(r9)
  391. stw r7, VCPU_FAULT_DSISR(r9)
  392. /* Save guest CTRL register, set runlatch to 1 */
  393. mfspr r6,SPRN_CTRLF
  394. stw r6,VCPU_CTRL(r9)
  395. andi. r0,r6,1
  396. bne 4f
  397. ori r6,r6,1
  398. mtspr SPRN_CTRLT,r6
  399. 4:
  400. /* Read the guest SLB and save it away */
  401. lwz r0,VCPU_SLB_NR(r9) /* number of entries in SLB */
  402. mtctr r0
  403. li r6,0
  404. addi r7,r9,VCPU_SLB
  405. li r5,0
  406. 1: slbmfee r8,r6
  407. andis. r0,r8,SLB_ESID_V@h
  408. beq 2f
  409. add r8,r8,r6 /* put index in */
  410. slbmfev r3,r6
  411. std r8,VCPU_SLB_E(r7)
  412. std r3,VCPU_SLB_V(r7)
  413. addi r7,r7,VCPU_SLB_SIZE
  414. addi r5,r5,1
  415. 2: addi r6,r6,1
  416. bdnz 1b
  417. stw r5,VCPU_SLB_MAX(r9)
  418. /*
  419. * Save the guest PURR/SPURR
  420. */
  421. mfspr r5,SPRN_PURR
  422. mfspr r6,SPRN_SPURR
  423. ld r7,VCPU_PURR(r9)
  424. ld r8,VCPU_SPURR(r9)
  425. std r5,VCPU_PURR(r9)
  426. std r6,VCPU_SPURR(r9)
  427. subf r5,r7,r5
  428. subf r6,r8,r6
  429. /*
  430. * Restore host PURR/SPURR and add guest times
  431. * so that the time in the guest gets accounted.
  432. */
  433. ld r3,HSTATE_PURR(r13)
  434. ld r4,HSTATE_SPURR(r13)
  435. add r3,r3,r5
  436. add r4,r4,r6
  437. mtspr SPRN_PURR,r3
  438. mtspr SPRN_SPURR,r4
  439. /* Clear out SLB */
  440. li r5,0
  441. slbmte r5,r5
  442. slbia
  443. ptesync
  444. hdec_soon:
  445. /* Switch back to host partition */
  446. ld r4,VCPU_KVM(r9) /* pointer to struct kvm */
  447. ld r6,KVM_HOST_SDR1(r4)
  448. lwz r7,KVM_HOST_LPID(r4)
  449. li r8,LPID_RSVD /* switch to reserved LPID */
  450. mtspr SPRN_LPID,r8
  451. ptesync
  452. mtspr SPRN_SDR1,r6 /* switch to partition page table */
  453. mtspr SPRN_LPID,r7
  454. isync
  455. lis r8,0x7fff /* MAX_INT@h */
  456. mtspr SPRN_HDEC,r8
  457. ld r8,KVM_HOST_LPCR(r4)
  458. mtspr SPRN_LPCR,r8
  459. isync
  460. /* load host SLB entries */
  461. ld r8,PACA_SLBSHADOWPTR(r13)
  462. .rept SLB_NUM_BOLTED
  463. ld r5,SLBSHADOW_SAVEAREA(r8)
  464. ld r6,SLBSHADOW_SAVEAREA+8(r8)
  465. andis. r7,r5,SLB_ESID_V@h
  466. beq 1f
  467. slbmte r6,r5
  468. 1: addi r8,r8,16
  469. .endr
  470. /* Save and reset AMR and UAMOR before turning on the MMU */
  471. mfspr r5,SPRN_AMR
  472. mfspr r6,SPRN_UAMOR
  473. std r5,VCPU_AMR(r9)
  474. std r6,VCPU_UAMOR(r9)
  475. li r6,0
  476. mtspr SPRN_AMR,r6
  477. /* Restore host DABR and DABRX */
  478. ld r5,HSTATE_DABR(r13)
  479. li r6,7
  480. mtspr SPRN_DABR,r5
  481. mtspr SPRN_DABRX,r6
  482. /* Switch DSCR back to host value */
  483. mfspr r8, SPRN_DSCR
  484. ld r7, HSTATE_DSCR(r13)
  485. std r8, VCPU_DSCR(r7)
  486. mtspr SPRN_DSCR, r7
  487. /* Save non-volatile GPRs */
  488. std r14, VCPU_GPR(r14)(r9)
  489. std r15, VCPU_GPR(r15)(r9)
  490. std r16, VCPU_GPR(r16)(r9)
  491. std r17, VCPU_GPR(r17)(r9)
  492. std r18, VCPU_GPR(r18)(r9)
  493. std r19, VCPU_GPR(r19)(r9)
  494. std r20, VCPU_GPR(r20)(r9)
  495. std r21, VCPU_GPR(r21)(r9)
  496. std r22, VCPU_GPR(r22)(r9)
  497. std r23, VCPU_GPR(r23)(r9)
  498. std r24, VCPU_GPR(r24)(r9)
  499. std r25, VCPU_GPR(r25)(r9)
  500. std r26, VCPU_GPR(r26)(r9)
  501. std r27, VCPU_GPR(r27)(r9)
  502. std r28, VCPU_GPR(r28)(r9)
  503. std r29, VCPU_GPR(r29)(r9)
  504. std r30, VCPU_GPR(r30)(r9)
  505. std r31, VCPU_GPR(r31)(r9)
  506. /* Save SPRGs */
  507. mfspr r3, SPRN_SPRG0
  508. mfspr r4, SPRN_SPRG1
  509. mfspr r5, SPRN_SPRG2
  510. mfspr r6, SPRN_SPRG3
  511. std r3, VCPU_SPRG0(r9)
  512. std r4, VCPU_SPRG1(r9)
  513. std r5, VCPU_SPRG2(r9)
  514. std r6, VCPU_SPRG3(r9)
  515. /* Save PMU registers */
  516. li r3, 1
  517. sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */
  518. mfspr r4, SPRN_MMCR0 /* save MMCR0 */
  519. mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */
  520. isync
  521. mfspr r5, SPRN_MMCR1
  522. mfspr r6, SPRN_MMCRA
  523. std r4, VCPU_MMCR(r9)
  524. std r5, VCPU_MMCR + 8(r9)
  525. std r6, VCPU_MMCR + 16(r9)
  526. mfspr r3, SPRN_PMC1
  527. mfspr r4, SPRN_PMC2
  528. mfspr r5, SPRN_PMC3
  529. mfspr r6, SPRN_PMC4
  530. mfspr r7, SPRN_PMC5
  531. mfspr r8, SPRN_PMC6
  532. stw r3, VCPU_PMC(r9)
  533. stw r4, VCPU_PMC + 4(r9)
  534. stw r5, VCPU_PMC + 8(r9)
  535. stw r6, VCPU_PMC + 12(r9)
  536. stw r7, VCPU_PMC + 16(r9)
  537. stw r8, VCPU_PMC + 20(r9)
  538. 22:
  539. /* save FP state */
  540. mr r3, r9
  541. bl .kvmppc_save_fp
  542. /*
  543. * Reload DEC. HDEC interrupts were disabled when
  544. * we reloaded the host's LPCR value.
  545. */
  546. ld r3, HSTATE_DECEXP(r13)
  547. mftb r4
  548. subf r4, r4, r3
  549. mtspr SPRN_DEC, r4
  550. /* Reload the host's PMU registers */
  551. ld r3, PACALPPACAPTR(r13) /* is the host using the PMU? */
  552. lbz r4, LPPACA_PMCINUSE(r3)
  553. cmpwi r4, 0
  554. beq 23f /* skip if not */
  555. lwz r3, HSTATE_PMC(r13)
  556. lwz r4, HSTATE_PMC + 4(r13)
  557. lwz r5, HSTATE_PMC + 8(r13)
  558. lwz r6, HSTATE_PMC + 12(r13)
  559. lwz r8, HSTATE_PMC + 16(r13)
  560. lwz r9, HSTATE_PMC + 20(r13)
  561. mtspr SPRN_PMC1, r3
  562. mtspr SPRN_PMC2, r4
  563. mtspr SPRN_PMC3, r5
  564. mtspr SPRN_PMC4, r6
  565. mtspr SPRN_PMC5, r8
  566. mtspr SPRN_PMC6, r9
  567. ld r3, HSTATE_MMCR(r13)
  568. ld r4, HSTATE_MMCR + 8(r13)
  569. ld r5, HSTATE_MMCR + 16(r13)
  570. mtspr SPRN_MMCR1, r4
  571. mtspr SPRN_MMCRA, r5
  572. mtspr SPRN_MMCR0, r3
  573. isync
  574. 23:
  575. /*
  576. * For external and machine check interrupts, we need
  577. * to call the Linux handler to process the interrupt.
  578. * We do that by jumping to the interrupt vector address
  579. * which we have in r12. The [h]rfid at the end of the
  580. * handler will return to the book3s_hv_interrupts.S code.
  581. * For other interrupts we do the rfid to get back
  582. * to the book3s_interrupts.S code here.
  583. */
  584. ld r8, HSTATE_VMHANDLER(r13)
  585. ld r7, HSTATE_HOST_MSR(r13)
  586. cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL
  587. beq 11f
  588. cmpwi r12, BOOK3S_INTERRUPT_MACHINE_CHECK
  589. /* RFI into the highmem handler, or branch to interrupt handler */
  590. mfmsr r6
  591. mtctr r12
  592. li r0, MSR_RI
  593. andc r6, r6, r0
  594. mtmsrd r6, 1 /* Clear RI in MSR */
  595. mtsrr0 r8
  596. mtsrr1 r7
  597. beqctr
  598. RFI
  599. 11: mtspr SPRN_HSRR0, r8
  600. mtspr SPRN_HSRR1, r7
  601. ba 0x500
  602. 6: mfspr r6,SPRN_HDAR
  603. mfspr r7,SPRN_HDSISR
  604. b 7b
  605. ignore_hdec:
  606. mr r4,r9
  607. b fast_guest_return
  608. bounce_ext_interrupt:
  609. mr r4,r9
  610. mtspr SPRN_SRR0,r10
  611. mtspr SPRN_SRR1,r11
  612. li r10,BOOK3S_INTERRUPT_EXTERNAL
  613. LOAD_REG_IMMEDIATE(r11,MSR_SF | MSR_ME);
  614. b fast_guest_return
  615. /*
  616. * Save away FP, VMX and VSX registers.
  617. * r3 = vcpu pointer
  618. */
  619. _GLOBAL(kvmppc_save_fp)
  620. mfmsr r9
  621. ori r8,r9,MSR_FP
  622. #ifdef CONFIG_ALTIVEC
  623. BEGIN_FTR_SECTION
  624. oris r8,r8,MSR_VEC@h
  625. END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
  626. #endif
  627. #ifdef CONFIG_VSX
  628. BEGIN_FTR_SECTION
  629. oris r8,r8,MSR_VSX@h
  630. END_FTR_SECTION_IFSET(CPU_FTR_VSX)
  631. #endif
  632. mtmsrd r8
  633. isync
  634. #ifdef CONFIG_VSX
  635. BEGIN_FTR_SECTION
  636. reg = 0
  637. .rept 32
  638. li r6,reg*16+VCPU_VSRS
  639. stxvd2x reg,r6,r3
  640. reg = reg + 1
  641. .endr
  642. FTR_SECTION_ELSE
  643. #endif
  644. reg = 0
  645. .rept 32
  646. stfd reg,reg*8+VCPU_FPRS(r3)
  647. reg = reg + 1
  648. .endr
  649. #ifdef CONFIG_VSX
  650. ALT_FTR_SECTION_END_IFSET(CPU_FTR_VSX)
  651. #endif
  652. mffs fr0
  653. stfd fr0,VCPU_FPSCR(r3)
  654. #ifdef CONFIG_ALTIVEC
  655. BEGIN_FTR_SECTION
  656. reg = 0
  657. .rept 32
  658. li r6,reg*16+VCPU_VRS
  659. stvx reg,r6,r3
  660. reg = reg + 1
  661. .endr
  662. mfvscr vr0
  663. li r6,VCPU_VSCR
  664. stvx vr0,r6,r3
  665. END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
  666. #endif
  667. mfspr r6,SPRN_VRSAVE
  668. stw r6,VCPU_VRSAVE(r3)
  669. mtmsrd r9
  670. isync
  671. blr
  672. /*
  673. * Load up FP, VMX and VSX registers
  674. * r4 = vcpu pointer
  675. */
  676. .globl kvmppc_load_fp
  677. kvmppc_load_fp:
  678. mfmsr r9
  679. ori r8,r9,MSR_FP
  680. #ifdef CONFIG_ALTIVEC
  681. BEGIN_FTR_SECTION
  682. oris r8,r8,MSR_VEC@h
  683. END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
  684. #endif
  685. #ifdef CONFIG_VSX
  686. BEGIN_FTR_SECTION
  687. oris r8,r8,MSR_VSX@h
  688. END_FTR_SECTION_IFSET(CPU_FTR_VSX)
  689. #endif
  690. mtmsrd r8
  691. isync
  692. lfd fr0,VCPU_FPSCR(r4)
  693. MTFSF_L(fr0)
  694. #ifdef CONFIG_VSX
  695. BEGIN_FTR_SECTION
  696. reg = 0
  697. .rept 32
  698. li r7,reg*16+VCPU_VSRS
  699. lxvd2x reg,r7,r4
  700. reg = reg + 1
  701. .endr
  702. FTR_SECTION_ELSE
  703. #endif
  704. reg = 0
  705. .rept 32
  706. lfd reg,reg*8+VCPU_FPRS(r4)
  707. reg = reg + 1
  708. .endr
  709. #ifdef CONFIG_VSX
  710. ALT_FTR_SECTION_END_IFSET(CPU_FTR_VSX)
  711. #endif
  712. #ifdef CONFIG_ALTIVEC
  713. BEGIN_FTR_SECTION
  714. li r7,VCPU_VSCR
  715. lvx vr0,r7,r4
  716. mtvscr vr0
  717. reg = 0
  718. .rept 32
  719. li r7,reg*16+VCPU_VRS
  720. lvx reg,r7,r4
  721. reg = reg + 1
  722. .endr
  723. END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
  724. #endif
  725. lwz r7,VCPU_VRSAVE(r4)
  726. mtspr SPRN_VRSAVE,r7
  727. blr