booke_interrupts.S 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483
  1. /*
  2. * This program is free software; you can redistribute it and/or modify
  3. * it under the terms of the GNU General Public License, version 2, as
  4. * published by the Free Software Foundation.
  5. *
  6. * This program is distributed in the hope that it will be useful,
  7. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  8. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  9. * GNU General Public License for more details.
  10. *
  11. * You should have received a copy of the GNU General Public License
  12. * along with this program; if not, write to the Free Software
  13. * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
  14. *
  15. * Copyright IBM Corp. 2007
  16. * Copyright 2011 Freescale Semiconductor, Inc.
  17. *
  18. * Authors: Hollis Blanchard <hollisb@us.ibm.com>
  19. */
  20. #include <asm/ppc_asm.h>
  21. #include <asm/kvm_asm.h>
  22. #include <asm/reg.h>
  23. #include <asm/mmu-44x.h>
  24. #include <asm/page.h>
  25. #include <asm/asm-offsets.h>
  26. /* The host stack layout: */
  27. #define HOST_R1 0 /* Implied by stwu. */
  28. #define HOST_CALLEE_LR 4
  29. #define HOST_RUN 8
  30. /* r2 is special: it holds 'current', and it made nonvolatile in the
  31. * kernel with the -ffixed-r2 gcc option. */
  32. #define HOST_R2 12
  33. #define HOST_CR 16
  34. #define HOST_NV_GPRS 20
  35. #define __HOST_NV_GPR(n) (HOST_NV_GPRS + ((n - 14) * 4))
  36. #define HOST_NV_GPR(n) __HOST_NV_GPR(__REG_##n)
  37. #define HOST_MIN_STACK_SIZE (HOST_NV_GPR(R31) + 4)
  38. #define HOST_STACK_SIZE (((HOST_MIN_STACK_SIZE + 15) / 16) * 16) /* Align. */
  39. #define HOST_STACK_LR (HOST_STACK_SIZE + 4) /* In caller stack frame. */
  40. #define NEED_INST_MASK ((1<<BOOKE_INTERRUPT_PROGRAM) | \
  41. (1<<BOOKE_INTERRUPT_DTLB_MISS) | \
  42. (1<<BOOKE_INTERRUPT_DEBUG))
  43. #define NEED_DEAR_MASK ((1<<BOOKE_INTERRUPT_DATA_STORAGE) | \
  44. (1<<BOOKE_INTERRUPT_DTLB_MISS))
  45. #define NEED_ESR_MASK ((1<<BOOKE_INTERRUPT_DATA_STORAGE) | \
  46. (1<<BOOKE_INTERRUPT_INST_STORAGE) | \
  47. (1<<BOOKE_INTERRUPT_PROGRAM) | \
  48. (1<<BOOKE_INTERRUPT_DTLB_MISS))
  49. .macro KVM_HANDLER ivor_nr scratch srr0
  50. _GLOBAL(kvmppc_handler_\ivor_nr)
  51. /* Get pointer to vcpu and record exit number. */
  52. mtspr \scratch , r4
  53. mfspr r4, SPRN_SPRG_THREAD
  54. lwz r4, THREAD_KVM_VCPU(r4)
  55. stw r3, VCPU_GPR(R3)(r4)
  56. stw r5, VCPU_GPR(R5)(r4)
  57. stw r6, VCPU_GPR(R6)(r4)
  58. mfspr r3, \scratch
  59. mfctr r5
  60. stw r3, VCPU_GPR(R4)(r4)
  61. stw r5, VCPU_CTR(r4)
  62. mfspr r3, \srr0
  63. lis r6, kvmppc_resume_host@h
  64. stw r3, VCPU_PC(r4)
  65. li r5, \ivor_nr
  66. ori r6, r6, kvmppc_resume_host@l
  67. mtctr r6
  68. bctr
  69. .endm
  70. _GLOBAL(kvmppc_handlers_start)
  71. KVM_HANDLER BOOKE_INTERRUPT_CRITICAL SPRN_SPRG_RSCRATCH_CRIT SPRN_CSRR0
  72. KVM_HANDLER BOOKE_INTERRUPT_MACHINE_CHECK SPRN_SPRG_RSCRATCH_MC SPRN_MCSRR0
  73. KVM_HANDLER BOOKE_INTERRUPT_DATA_STORAGE SPRN_SPRG_RSCRATCH0 SPRN_SRR0
  74. KVM_HANDLER BOOKE_INTERRUPT_INST_STORAGE SPRN_SPRG_RSCRATCH0 SPRN_SRR0
  75. KVM_HANDLER BOOKE_INTERRUPT_EXTERNAL SPRN_SPRG_RSCRATCH0 SPRN_SRR0
  76. KVM_HANDLER BOOKE_INTERRUPT_ALIGNMENT SPRN_SPRG_RSCRATCH0 SPRN_SRR0
  77. KVM_HANDLER BOOKE_INTERRUPT_PROGRAM SPRN_SPRG_RSCRATCH0 SPRN_SRR0
  78. KVM_HANDLER BOOKE_INTERRUPT_FP_UNAVAIL SPRN_SPRG_RSCRATCH0 SPRN_SRR0
  79. KVM_HANDLER BOOKE_INTERRUPT_SYSCALL SPRN_SPRG_RSCRATCH0 SPRN_SRR0
  80. KVM_HANDLER BOOKE_INTERRUPT_AP_UNAVAIL SPRN_SPRG_RSCRATCH0 SPRN_SRR0
  81. KVM_HANDLER BOOKE_INTERRUPT_DECREMENTER SPRN_SPRG_RSCRATCH0 SPRN_SRR0
  82. KVM_HANDLER BOOKE_INTERRUPT_FIT SPRN_SPRG_RSCRATCH0 SPRN_SRR0
  83. KVM_HANDLER BOOKE_INTERRUPT_WATCHDOG SPRN_SPRG_RSCRATCH_CRIT SPRN_CSRR0
  84. KVM_HANDLER BOOKE_INTERRUPT_DTLB_MISS SPRN_SPRG_RSCRATCH0 SPRN_SRR0
  85. KVM_HANDLER BOOKE_INTERRUPT_ITLB_MISS SPRN_SPRG_RSCRATCH0 SPRN_SRR0
  86. KVM_HANDLER BOOKE_INTERRUPT_DEBUG SPRN_SPRG_RSCRATCH_CRIT SPRN_CSRR0
  87. KVM_HANDLER BOOKE_INTERRUPT_SPE_UNAVAIL SPRN_SPRG_RSCRATCH0 SPRN_SRR0
  88. KVM_HANDLER BOOKE_INTERRUPT_SPE_FP_DATA SPRN_SPRG_RSCRATCH0 SPRN_SRR0
  89. KVM_HANDLER BOOKE_INTERRUPT_SPE_FP_ROUND SPRN_SPRG_RSCRATCH0 SPRN_SRR0
  90. _GLOBAL(kvmppc_handler_len)
  91. .long kvmppc_handler_1 - kvmppc_handler_0
  92. /* Registers:
  93. * SPRG_SCRATCH0: guest r4
  94. * r4: vcpu pointer
  95. * r5: KVM exit number
  96. */
  97. _GLOBAL(kvmppc_resume_host)
  98. mfcr r3
  99. stw r3, VCPU_CR(r4)
  100. stw r7, VCPU_GPR(R7)(r4)
  101. stw r8, VCPU_GPR(R8)(r4)
  102. stw r9, VCPU_GPR(R9)(r4)
  103. li r6, 1
  104. slw r6, r6, r5
  105. #ifdef CONFIG_KVM_EXIT_TIMING
  106. /* save exit time */
  107. 1:
  108. mfspr r7, SPRN_TBRU
  109. mfspr r8, SPRN_TBRL
  110. mfspr r9, SPRN_TBRU
  111. cmpw r9, r7
  112. bne 1b
  113. stw r8, VCPU_TIMING_EXIT_TBL(r4)
  114. stw r9, VCPU_TIMING_EXIT_TBU(r4)
  115. #endif
  116. /* Save the faulting instruction and all GPRs for emulation. */
  117. andi. r7, r6, NEED_INST_MASK
  118. beq ..skip_inst_copy
  119. mfspr r9, SPRN_SRR0
  120. mfmsr r8
  121. ori r7, r8, MSR_DS
  122. mtmsr r7
  123. isync
  124. lwz r9, 0(r9)
  125. mtmsr r8
  126. isync
  127. stw r9, VCPU_LAST_INST(r4)
  128. stw r15, VCPU_GPR(R15)(r4)
  129. stw r16, VCPU_GPR(R16)(r4)
  130. stw r17, VCPU_GPR(R17)(r4)
  131. stw r18, VCPU_GPR(R18)(r4)
  132. stw r19, VCPU_GPR(R19)(r4)
  133. stw r20, VCPU_GPR(R20)(r4)
  134. stw r21, VCPU_GPR(R21)(r4)
  135. stw r22, VCPU_GPR(R22)(r4)
  136. stw r23, VCPU_GPR(R23)(r4)
  137. stw r24, VCPU_GPR(R24)(r4)
  138. stw r25, VCPU_GPR(R25)(r4)
  139. stw r26, VCPU_GPR(R26)(r4)
  140. stw r27, VCPU_GPR(R27)(r4)
  141. stw r28, VCPU_GPR(R28)(r4)
  142. stw r29, VCPU_GPR(R29)(r4)
  143. stw r30, VCPU_GPR(R30)(r4)
  144. stw r31, VCPU_GPR(R31)(r4)
  145. ..skip_inst_copy:
  146. /* Also grab DEAR and ESR before the host can clobber them. */
  147. andi. r7, r6, NEED_DEAR_MASK
  148. beq ..skip_dear
  149. mfspr r9, SPRN_DEAR
  150. stw r9, VCPU_FAULT_DEAR(r4)
  151. ..skip_dear:
  152. andi. r7, r6, NEED_ESR_MASK
  153. beq ..skip_esr
  154. mfspr r9, SPRN_ESR
  155. stw r9, VCPU_FAULT_ESR(r4)
  156. ..skip_esr:
  157. /* Save remaining volatile guest register state to vcpu. */
  158. stw r0, VCPU_GPR(R0)(r4)
  159. stw r1, VCPU_GPR(R1)(r4)
  160. stw r2, VCPU_GPR(R2)(r4)
  161. stw r10, VCPU_GPR(R10)(r4)
  162. stw r11, VCPU_GPR(R11)(r4)
  163. stw r12, VCPU_GPR(R12)(r4)
  164. stw r13, VCPU_GPR(R13)(r4)
  165. stw r14, VCPU_GPR(R14)(r4) /* We need a NV GPR below. */
  166. mflr r3
  167. stw r3, VCPU_LR(r4)
  168. mfxer r3
  169. stw r3, VCPU_XER(r4)
  170. /* Restore host stack pointer and PID before IVPR, since the host
  171. * exception handlers use them. */
  172. lwz r1, VCPU_HOST_STACK(r4)
  173. lwz r3, VCPU_HOST_PID(r4)
  174. mtspr SPRN_PID, r3
  175. #ifdef CONFIG_FSL_BOOKE
  176. /* we cheat and know that Linux doesn't use PID1 which is always 0 */
  177. lis r3, 0
  178. mtspr SPRN_PID1, r3
  179. #endif
  180. /* Restore host IVPR before re-enabling interrupts. We cheat and know
  181. * that Linux IVPR is always 0xc0000000. */
  182. lis r3, 0xc000
  183. mtspr SPRN_IVPR, r3
  184. /* Switch to kernel stack and jump to handler. */
  185. LOAD_REG_ADDR(r3, kvmppc_handle_exit)
  186. mtctr r3
  187. lwz r3, HOST_RUN(r1)
  188. lwz r2, HOST_R2(r1)
  189. mr r14, r4 /* Save vcpu pointer. */
  190. bctrl /* kvmppc_handle_exit() */
  191. /* Restore vcpu pointer and the nonvolatiles we used. */
  192. mr r4, r14
  193. lwz r14, VCPU_GPR(R14)(r4)
  194. /* Sometimes instruction emulation must restore complete GPR state. */
  195. andi. r5, r3, RESUME_FLAG_NV
  196. beq ..skip_nv_load
  197. lwz r15, VCPU_GPR(R15)(r4)
  198. lwz r16, VCPU_GPR(R16)(r4)
  199. lwz r17, VCPU_GPR(R17)(r4)
  200. lwz r18, VCPU_GPR(R18)(r4)
  201. lwz r19, VCPU_GPR(R19)(r4)
  202. lwz r20, VCPU_GPR(R20)(r4)
  203. lwz r21, VCPU_GPR(R21)(r4)
  204. lwz r22, VCPU_GPR(R22)(r4)
  205. lwz r23, VCPU_GPR(R23)(r4)
  206. lwz r24, VCPU_GPR(R24)(r4)
  207. lwz r25, VCPU_GPR(R25)(r4)
  208. lwz r26, VCPU_GPR(R26)(r4)
  209. lwz r27, VCPU_GPR(R27)(r4)
  210. lwz r28, VCPU_GPR(R28)(r4)
  211. lwz r29, VCPU_GPR(R29)(r4)
  212. lwz r30, VCPU_GPR(R30)(r4)
  213. lwz r31, VCPU_GPR(R31)(r4)
  214. ..skip_nv_load:
  215. /* Should we return to the guest? */
  216. andi. r5, r3, RESUME_FLAG_HOST
  217. beq lightweight_exit
  218. srawi r3, r3, 2 /* Shift -ERR back down. */
  219. heavyweight_exit:
  220. /* Not returning to guest. */
  221. #ifdef CONFIG_SPE
  222. /* save guest SPEFSCR and load host SPEFSCR */
  223. mfspr r9, SPRN_SPEFSCR
  224. stw r9, VCPU_SPEFSCR(r4)
  225. lwz r9, VCPU_HOST_SPEFSCR(r4)
  226. mtspr SPRN_SPEFSCR, r9
  227. #endif
  228. /* We already saved guest volatile register state; now save the
  229. * non-volatiles. */
  230. stw r15, VCPU_GPR(R15)(r4)
  231. stw r16, VCPU_GPR(R16)(r4)
  232. stw r17, VCPU_GPR(R17)(r4)
  233. stw r18, VCPU_GPR(R18)(r4)
  234. stw r19, VCPU_GPR(R19)(r4)
  235. stw r20, VCPU_GPR(R20)(r4)
  236. stw r21, VCPU_GPR(R21)(r4)
  237. stw r22, VCPU_GPR(R22)(r4)
  238. stw r23, VCPU_GPR(R23)(r4)
  239. stw r24, VCPU_GPR(R24)(r4)
  240. stw r25, VCPU_GPR(R25)(r4)
  241. stw r26, VCPU_GPR(R26)(r4)
  242. stw r27, VCPU_GPR(R27)(r4)
  243. stw r28, VCPU_GPR(R28)(r4)
  244. stw r29, VCPU_GPR(R29)(r4)
  245. stw r30, VCPU_GPR(R30)(r4)
  246. stw r31, VCPU_GPR(R31)(r4)
  247. /* Load host non-volatile register state from host stack. */
  248. lwz r14, HOST_NV_GPR(R14)(r1)
  249. lwz r15, HOST_NV_GPR(R15)(r1)
  250. lwz r16, HOST_NV_GPR(R16)(r1)
  251. lwz r17, HOST_NV_GPR(R17)(r1)
  252. lwz r18, HOST_NV_GPR(R18)(r1)
  253. lwz r19, HOST_NV_GPR(R19)(r1)
  254. lwz r20, HOST_NV_GPR(R20)(r1)
  255. lwz r21, HOST_NV_GPR(R21)(r1)
  256. lwz r22, HOST_NV_GPR(R22)(r1)
  257. lwz r23, HOST_NV_GPR(R23)(r1)
  258. lwz r24, HOST_NV_GPR(R24)(r1)
  259. lwz r25, HOST_NV_GPR(R25)(r1)
  260. lwz r26, HOST_NV_GPR(R26)(r1)
  261. lwz r27, HOST_NV_GPR(R27)(r1)
  262. lwz r28, HOST_NV_GPR(R28)(r1)
  263. lwz r29, HOST_NV_GPR(R29)(r1)
  264. lwz r30, HOST_NV_GPR(R30)(r1)
  265. lwz r31, HOST_NV_GPR(R31)(r1)
  266. /* Return to kvm_vcpu_run(). */
  267. lwz r4, HOST_STACK_LR(r1)
  268. lwz r5, HOST_CR(r1)
  269. addi r1, r1, HOST_STACK_SIZE
  270. mtlr r4
  271. mtcr r5
  272. /* r3 still contains the return code from kvmppc_handle_exit(). */
  273. blr
  274. /* Registers:
  275. * r3: kvm_run pointer
  276. * r4: vcpu pointer
  277. */
  278. _GLOBAL(__kvmppc_vcpu_run)
  279. stwu r1, -HOST_STACK_SIZE(r1)
  280. stw r1, VCPU_HOST_STACK(r4) /* Save stack pointer to vcpu. */
  281. /* Save host state to stack. */
  282. stw r3, HOST_RUN(r1)
  283. mflr r3
  284. stw r3, HOST_STACK_LR(r1)
  285. mfcr r5
  286. stw r5, HOST_CR(r1)
  287. /* Save host non-volatile register state to stack. */
  288. stw r14, HOST_NV_GPR(R14)(r1)
  289. stw r15, HOST_NV_GPR(R15)(r1)
  290. stw r16, HOST_NV_GPR(R16)(r1)
  291. stw r17, HOST_NV_GPR(R17)(r1)
  292. stw r18, HOST_NV_GPR(R18)(r1)
  293. stw r19, HOST_NV_GPR(R19)(r1)
  294. stw r20, HOST_NV_GPR(R20)(r1)
  295. stw r21, HOST_NV_GPR(R21)(r1)
  296. stw r22, HOST_NV_GPR(R22)(r1)
  297. stw r23, HOST_NV_GPR(R23)(r1)
  298. stw r24, HOST_NV_GPR(R24)(r1)
  299. stw r25, HOST_NV_GPR(R25)(r1)
  300. stw r26, HOST_NV_GPR(R26)(r1)
  301. stw r27, HOST_NV_GPR(R27)(r1)
  302. stw r28, HOST_NV_GPR(R28)(r1)
  303. stw r29, HOST_NV_GPR(R29)(r1)
  304. stw r30, HOST_NV_GPR(R30)(r1)
  305. stw r31, HOST_NV_GPR(R31)(r1)
  306. /* Load guest non-volatiles. */
  307. lwz r14, VCPU_GPR(R14)(r4)
  308. lwz r15, VCPU_GPR(R15)(r4)
  309. lwz r16, VCPU_GPR(R16)(r4)
  310. lwz r17, VCPU_GPR(R17)(r4)
  311. lwz r18, VCPU_GPR(R18)(r4)
  312. lwz r19, VCPU_GPR(R19)(r4)
  313. lwz r20, VCPU_GPR(R20)(r4)
  314. lwz r21, VCPU_GPR(R21)(r4)
  315. lwz r22, VCPU_GPR(R22)(r4)
  316. lwz r23, VCPU_GPR(R23)(r4)
  317. lwz r24, VCPU_GPR(R24)(r4)
  318. lwz r25, VCPU_GPR(R25)(r4)
  319. lwz r26, VCPU_GPR(R26)(r4)
  320. lwz r27, VCPU_GPR(R27)(r4)
  321. lwz r28, VCPU_GPR(R28)(r4)
  322. lwz r29, VCPU_GPR(R29)(r4)
  323. lwz r30, VCPU_GPR(R30)(r4)
  324. lwz r31, VCPU_GPR(R31)(r4)
  325. #ifdef CONFIG_SPE
  326. /* save host SPEFSCR and load guest SPEFSCR */
  327. mfspr r3, SPRN_SPEFSCR
  328. stw r3, VCPU_HOST_SPEFSCR(r4)
  329. lwz r3, VCPU_SPEFSCR(r4)
  330. mtspr SPRN_SPEFSCR, r3
  331. #endif
  332. lightweight_exit:
  333. stw r2, HOST_R2(r1)
  334. mfspr r3, SPRN_PID
  335. stw r3, VCPU_HOST_PID(r4)
  336. lwz r3, VCPU_SHADOW_PID(r4)
  337. mtspr SPRN_PID, r3
  338. #ifdef CONFIG_FSL_BOOKE
  339. lwz r3, VCPU_SHADOW_PID1(r4)
  340. mtspr SPRN_PID1, r3
  341. #endif
  342. #ifdef CONFIG_44x
  343. iccci 0, 0 /* XXX hack */
  344. #endif
  345. /* Load some guest volatiles. */
  346. lwz r0, VCPU_GPR(R0)(r4)
  347. lwz r2, VCPU_GPR(R2)(r4)
  348. lwz r9, VCPU_GPR(R9)(r4)
  349. lwz r10, VCPU_GPR(R10)(r4)
  350. lwz r11, VCPU_GPR(R11)(r4)
  351. lwz r12, VCPU_GPR(R12)(r4)
  352. lwz r13, VCPU_GPR(R13)(r4)
  353. lwz r3, VCPU_LR(r4)
  354. mtlr r3
  355. lwz r3, VCPU_XER(r4)
  356. mtxer r3
  357. /* Switch the IVPR. XXX If we take a TLB miss after this we're screwed,
  358. * so how do we make sure vcpu won't fault? */
  359. lis r8, kvmppc_booke_handlers@ha
  360. lwz r8, kvmppc_booke_handlers@l(r8)
  361. mtspr SPRN_IVPR, r8
  362. lwz r5, VCPU_SHARED(r4)
  363. /* Can't switch the stack pointer until after IVPR is switched,
  364. * because host interrupt handlers would get confused. */
  365. lwz r1, VCPU_GPR(R1)(r4)
  366. /*
  367. * Host interrupt handlers may have clobbered these
  368. * guest-readable SPRGs, or the guest kernel may have
  369. * written directly to the shared area, so we
  370. * need to reload them here with the guest's values.
  371. */
  372. PPC_LD(r3, VCPU_SHARED_SPRG4, r5)
  373. mtspr SPRN_SPRG4W, r3
  374. PPC_LD(r3, VCPU_SHARED_SPRG5, r5)
  375. mtspr SPRN_SPRG5W, r3
  376. PPC_LD(r3, VCPU_SHARED_SPRG6, r5)
  377. mtspr SPRN_SPRG6W, r3
  378. PPC_LD(r3, VCPU_SHARED_SPRG7, r5)
  379. mtspr SPRN_SPRG7W, r3
  380. #ifdef CONFIG_KVM_EXIT_TIMING
  381. /* save enter time */
  382. 1:
  383. mfspr r6, SPRN_TBRU
  384. mfspr r7, SPRN_TBRL
  385. mfspr r8, SPRN_TBRU
  386. cmpw r8, r6
  387. bne 1b
  388. stw r7, VCPU_TIMING_LAST_ENTER_TBL(r4)
  389. stw r8, VCPU_TIMING_LAST_ENTER_TBU(r4)
  390. #endif
  391. /* Finish loading guest volatiles and jump to guest. */
  392. lwz r3, VCPU_CTR(r4)
  393. lwz r5, VCPU_CR(r4)
  394. lwz r6, VCPU_PC(r4)
  395. lwz r7, VCPU_SHADOW_MSR(r4)
  396. mtctr r3
  397. mtcr r5
  398. mtsrr0 r6
  399. mtsrr1 r7
  400. lwz r5, VCPU_GPR(R5)(r4)
  401. lwz r6, VCPU_GPR(R6)(r4)
  402. lwz r7, VCPU_GPR(R7)(r4)
  403. lwz r8, VCPU_GPR(R8)(r4)
  404. /* Clear any debug events which occurred since we disabled MSR[DE].
  405. * XXX This gives us a 3-instruction window in which a breakpoint
  406. * intended for guest context could fire in the host instead. */
  407. lis r3, 0xffff
  408. ori r3, r3, 0xffff
  409. mtspr SPRN_DBSR, r3
  410. lwz r3, VCPU_GPR(R3)(r4)
  411. lwz r4, VCPU_GPR(R4)(r4)
  412. rfi
  413. #ifdef CONFIG_SPE
  414. _GLOBAL(kvmppc_save_guest_spe)
  415. cmpi 0,r3,0
  416. beqlr-
  417. SAVE_32EVRS(0, r4, r3, VCPU_EVR)
  418. evxor evr6, evr6, evr6
  419. evmwumiaa evr6, evr6, evr6
  420. li r4,VCPU_ACC
  421. evstddx evr6, r4, r3 /* save acc */
  422. blr
  423. _GLOBAL(kvmppc_load_guest_spe)
  424. cmpi 0,r3,0
  425. beqlr-
  426. li r4,VCPU_ACC
  427. evlddx evr6,r4,r3
  428. evmra evr6,evr6 /* load acc */
  429. REST_32EVRS(0, r4, r3, VCPU_EVR)
  430. blr
  431. #endif