book3s_hv_rmhandlers.S 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958
  1. /*
  2. * This program is free software; you can redistribute it and/or modify
  3. * it under the terms of the GNU General Public License, version 2, as
  4. * published by the Free Software Foundation.
  5. *
  6. * This program is distributed in the hope that it will be useful,
  7. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  8. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  9. * GNU General Public License for more details.
  10. *
  11. * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
  12. *
  13. * Derived from book3s_rmhandlers.S and other files, which are:
  14. *
  15. * Copyright SUSE Linux Products GmbH 2009
  16. *
  17. * Authors: Alexander Graf <agraf@suse.de>
  18. */
  19. #include <asm/ppc_asm.h>
  20. #include <asm/kvm_asm.h>
  21. #include <asm/reg.h>
  22. #include <asm/page.h>
  23. #include <asm/asm-offsets.h>
  24. #include <asm/exception-64s.h>
  25. /*****************************************************************************
  26. * *
  27. * Real Mode handlers that need to be in the linear mapping *
  28. * *
  29. ****************************************************************************/
  30. #define SHADOW_VCPU_OFF PACA_KVM_SVCPU
  31. .globl kvmppc_skip_interrupt
  32. kvmppc_skip_interrupt:
  33. mfspr r13,SPRN_SRR0
  34. addi r13,r13,4
  35. mtspr SPRN_SRR0,r13
  36. GET_SCRATCH0(r13)
  37. rfid
  38. b .
  39. .globl kvmppc_skip_Hinterrupt
  40. kvmppc_skip_Hinterrupt:
  41. mfspr r13,SPRN_HSRR0
  42. addi r13,r13,4
  43. mtspr SPRN_HSRR0,r13
  44. GET_SCRATCH0(r13)
  45. hrfid
  46. b .
  47. /*
  48. * Call kvmppc_handler_trampoline_enter in real mode.
  49. * Must be called with interrupts hard-disabled.
  50. *
  51. * Input Registers:
  52. *
  53. * LR = return address to continue at after eventually re-enabling MMU
  54. */
  55. _GLOBAL(kvmppc_hv_entry_trampoline)
  56. mfmsr r10
  57. LOAD_REG_ADDR(r5, kvmppc_hv_entry)
  58. li r0,MSR_RI
  59. andc r0,r10,r0
  60. li r6,MSR_IR | MSR_DR
  61. andc r6,r10,r6
  62. mtmsrd r0,1 /* clear RI in MSR */
  63. mtsrr0 r5
  64. mtsrr1 r6
  65. RFI
  66. #define ULONG_SIZE 8
  67. #define VCPU_GPR(n) (VCPU_GPRS + (n * ULONG_SIZE))
  68. /******************************************************************************
  69. * *
  70. * Entry code *
  71. * *
  72. *****************************************************************************/
  73. .global kvmppc_hv_entry
  74. kvmppc_hv_entry:
  75. /* Required state:
  76. *
  77. * R4 = vcpu pointer
  78. * MSR = ~IR|DR
  79. * R13 = PACA
  80. * R1 = host R1
  81. * all other volatile GPRS = free
  82. */
  83. mflr r0
  84. std r0, HSTATE_VMHANDLER(r13)
  85. ld r14, VCPU_GPR(r14)(r4)
  86. ld r15, VCPU_GPR(r15)(r4)
  87. ld r16, VCPU_GPR(r16)(r4)
  88. ld r17, VCPU_GPR(r17)(r4)
  89. ld r18, VCPU_GPR(r18)(r4)
  90. ld r19, VCPU_GPR(r19)(r4)
  91. ld r20, VCPU_GPR(r20)(r4)
  92. ld r21, VCPU_GPR(r21)(r4)
  93. ld r22, VCPU_GPR(r22)(r4)
  94. ld r23, VCPU_GPR(r23)(r4)
  95. ld r24, VCPU_GPR(r24)(r4)
  96. ld r25, VCPU_GPR(r25)(r4)
  97. ld r26, VCPU_GPR(r26)(r4)
  98. ld r27, VCPU_GPR(r27)(r4)
  99. ld r28, VCPU_GPR(r28)(r4)
  100. ld r29, VCPU_GPR(r29)(r4)
  101. ld r30, VCPU_GPR(r30)(r4)
  102. ld r31, VCPU_GPR(r31)(r4)
  103. /* Load guest PMU registers */
  104. /* R4 is live here (vcpu pointer) */
  105. li r3, 1
  106. sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */
  107. mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */
  108. isync
  109. lwz r3, VCPU_PMC(r4) /* always load up guest PMU registers */
  110. lwz r5, VCPU_PMC + 4(r4) /* to prevent information leak */
  111. lwz r6, VCPU_PMC + 8(r4)
  112. lwz r7, VCPU_PMC + 12(r4)
  113. lwz r8, VCPU_PMC + 16(r4)
  114. lwz r9, VCPU_PMC + 20(r4)
  115. mtspr SPRN_PMC1, r3
  116. mtspr SPRN_PMC2, r5
  117. mtspr SPRN_PMC3, r6
  118. mtspr SPRN_PMC4, r7
  119. mtspr SPRN_PMC5, r8
  120. mtspr SPRN_PMC6, r9
  121. ld r3, VCPU_MMCR(r4)
  122. ld r5, VCPU_MMCR + 8(r4)
  123. ld r6, VCPU_MMCR + 16(r4)
  124. mtspr SPRN_MMCR1, r5
  125. mtspr SPRN_MMCRA, r6
  126. mtspr SPRN_MMCR0, r3
  127. isync
  128. /* Load up FP, VMX and VSX registers */
  129. bl kvmppc_load_fp
  130. /* Switch DSCR to guest value */
  131. ld r5, VCPU_DSCR(r4)
  132. mtspr SPRN_DSCR, r5
  133. /*
  134. * Set the decrementer to the guest decrementer.
  135. */
  136. ld r8,VCPU_DEC_EXPIRES(r4)
  137. mftb r7
  138. subf r3,r7,r8
  139. mtspr SPRN_DEC,r3
  140. stw r3,VCPU_DEC(r4)
  141. ld r5, VCPU_SPRG0(r4)
  142. ld r6, VCPU_SPRG1(r4)
  143. ld r7, VCPU_SPRG2(r4)
  144. ld r8, VCPU_SPRG3(r4)
  145. mtspr SPRN_SPRG0, r5
  146. mtspr SPRN_SPRG1, r6
  147. mtspr SPRN_SPRG2, r7
  148. mtspr SPRN_SPRG3, r8
  149. /* Save R1 in the PACA */
  150. std r1, HSTATE_HOST_R1(r13)
  151. /* Increment yield count if they have a VPA */
  152. ld r3, VCPU_VPA(r4)
  153. cmpdi r3, 0
  154. beq 25f
  155. lwz r5, LPPACA_YIELDCOUNT(r3)
  156. addi r5, r5, 1
  157. stw r5, LPPACA_YIELDCOUNT(r3)
  158. 25:
  159. /* Load up DAR and DSISR */
  160. ld r5, VCPU_DAR(r4)
  161. lwz r6, VCPU_DSISR(r4)
  162. mtspr SPRN_DAR, r5
  163. mtspr SPRN_DSISR, r6
  164. /* Set partition DABR */
  165. li r5,3
  166. ld r6,VCPU_DABR(r4)
  167. mtspr SPRN_DABRX,r5
  168. mtspr SPRN_DABR,r6
  169. /* Restore AMR and UAMOR, set AMOR to all 1s */
  170. ld r5,VCPU_AMR(r4)
  171. ld r6,VCPU_UAMOR(r4)
  172. li r7,-1
  173. mtspr SPRN_AMR,r5
  174. mtspr SPRN_UAMOR,r6
  175. mtspr SPRN_AMOR,r7
  176. /* Clear out SLB */
  177. li r6,0
  178. slbmte r6,r6
  179. slbia
  180. ptesync
  181. /* Switch to guest partition. */
  182. ld r9,VCPU_KVM(r4) /* pointer to struct kvm */
  183. ld r6,KVM_SDR1(r9)
  184. lwz r7,KVM_LPID(r9)
  185. li r0,LPID_RSVD /* switch to reserved LPID */
  186. mtspr SPRN_LPID,r0
  187. ptesync
  188. mtspr SPRN_SDR1,r6 /* switch to partition page table */
  189. mtspr SPRN_LPID,r7
  190. isync
  191. ld r8,VCPU_LPCR(r4)
  192. mtspr SPRN_LPCR,r8
  193. isync
  194. /* Check if HDEC expires soon */
  195. mfspr r3,SPRN_HDEC
  196. cmpwi r3,10
  197. li r12,BOOK3S_INTERRUPT_HV_DECREMENTER
  198. mr r9,r4
  199. blt hdec_soon
  200. /*
  201. * Invalidate the TLB if we could possibly have stale TLB
  202. * entries for this partition on this core due to the use
  203. * of tlbiel.
  204. */
  205. ld r9,VCPU_KVM(r4) /* pointer to struct kvm */
  206. lwz r5,VCPU_VCPUID(r4)
  207. lhz r6,PACAPACAINDEX(r13)
  208. lhz r8,VCPU_LAST_CPU(r4)
  209. sldi r7,r6,1 /* see if this is the same vcpu */
  210. add r7,r7,r9 /* as last ran on this pcpu */
  211. lhz r0,KVM_LAST_VCPU(r7)
  212. cmpw r6,r8 /* on the same cpu core as last time? */
  213. bne 3f
  214. cmpw r0,r5 /* same vcpu as this core last ran? */
  215. beq 1f
  216. 3: sth r6,VCPU_LAST_CPU(r4) /* if not, invalidate partition TLB */
  217. sth r5,KVM_LAST_VCPU(r7)
  218. li r6,128
  219. mtctr r6
  220. li r7,0x800 /* IS field = 0b10 */
  221. ptesync
  222. 2: tlbiel r7
  223. addi r7,r7,0x1000
  224. bdnz 2b
  225. ptesync
  226. 1:
  227. /* Save purr/spurr */
  228. mfspr r5,SPRN_PURR
  229. mfspr r6,SPRN_SPURR
  230. std r5,HSTATE_PURR(r13)
  231. std r6,HSTATE_SPURR(r13)
  232. ld r7,VCPU_PURR(r4)
  233. ld r8,VCPU_SPURR(r4)
  234. mtspr SPRN_PURR,r7
  235. mtspr SPRN_SPURR,r8
  236. /* Load up guest SLB entries */
  237. lwz r5,VCPU_SLB_MAX(r4)
  238. cmpwi r5,0
  239. beq 9f
  240. mtctr r5
  241. addi r6,r4,VCPU_SLB
  242. 1: ld r8,VCPU_SLB_E(r6)
  243. ld r9,VCPU_SLB_V(r6)
  244. slbmte r9,r8
  245. addi r6,r6,VCPU_SLB_SIZE
  246. bdnz 1b
  247. 9:
  248. /* Restore state of CTRL run bit; assume 1 on entry */
  249. lwz r5,VCPU_CTRL(r4)
  250. andi. r5,r5,1
  251. bne 4f
  252. mfspr r6,SPRN_CTRLF
  253. clrrdi r6,r6,1
  254. mtspr SPRN_CTRLT,r6
  255. 4:
  256. ld r6, VCPU_CTR(r4)
  257. lwz r7, VCPU_XER(r4)
  258. mtctr r6
  259. mtxer r7
  260. /* Move SRR0 and SRR1 into the respective regs */
  261. ld r6, VCPU_SRR0(r4)
  262. ld r7, VCPU_SRR1(r4)
  263. mtspr SPRN_SRR0, r6
  264. mtspr SPRN_SRR1, r7
  265. ld r10, VCPU_PC(r4)
  266. ld r11, VCPU_MSR(r4) /* r10 = vcpu->arch.msr & ~MSR_HV */
  267. rldicl r11, r11, 63 - MSR_HV_LG, 1
  268. rotldi r11, r11, 1 + MSR_HV_LG
  269. ori r11, r11, MSR_ME
  270. fast_guest_return:
  271. mtspr SPRN_HSRR0,r10
  272. mtspr SPRN_HSRR1,r11
  273. /* Activate guest mode, so faults get handled by KVM */
  274. li r9, KVM_GUEST_MODE_GUEST
  275. stb r9, HSTATE_IN_GUEST(r13)
  276. /* Enter guest */
  277. ld r5, VCPU_LR(r4)
  278. lwz r6, VCPU_CR(r4)
  279. mtlr r5
  280. mtcr r6
  281. ld r0, VCPU_GPR(r0)(r4)
  282. ld r1, VCPU_GPR(r1)(r4)
  283. ld r2, VCPU_GPR(r2)(r4)
  284. ld r3, VCPU_GPR(r3)(r4)
  285. ld r5, VCPU_GPR(r5)(r4)
  286. ld r6, VCPU_GPR(r6)(r4)
  287. ld r7, VCPU_GPR(r7)(r4)
  288. ld r8, VCPU_GPR(r8)(r4)
  289. ld r9, VCPU_GPR(r9)(r4)
  290. ld r10, VCPU_GPR(r10)(r4)
  291. ld r11, VCPU_GPR(r11)(r4)
  292. ld r12, VCPU_GPR(r12)(r4)
  293. ld r13, VCPU_GPR(r13)(r4)
  294. ld r4, VCPU_GPR(r4)(r4)
  295. hrfid
  296. b .
  297. /******************************************************************************
  298. * *
  299. * Exit code *
  300. * *
  301. *****************************************************************************/
  302. /*
  303. * We come here from the first-level interrupt handlers.
  304. */
  305. .globl kvmppc_interrupt
  306. kvmppc_interrupt:
  307. /*
  308. * Register contents:
  309. * R12 = interrupt vector
  310. * R13 = PACA
  311. * guest CR, R12 saved in shadow VCPU SCRATCH1/0
  312. * guest R13 saved in SPRN_SCRATCH0
  313. */
  314. /* abuse host_r2 as third scratch area; we get r2 from PACATOC(r13) */
  315. std r9, HSTATE_HOST_R2(r13)
  316. ld r9, HSTATE_KVM_VCPU(r13)
  317. /* Save registers */
  318. std r0, VCPU_GPR(r0)(r9)
  319. std r1, VCPU_GPR(r1)(r9)
  320. std r2, VCPU_GPR(r2)(r9)
  321. std r3, VCPU_GPR(r3)(r9)
  322. std r4, VCPU_GPR(r4)(r9)
  323. std r5, VCPU_GPR(r5)(r9)
  324. std r6, VCPU_GPR(r6)(r9)
  325. std r7, VCPU_GPR(r7)(r9)
  326. std r8, VCPU_GPR(r8)(r9)
  327. ld r0, HSTATE_HOST_R2(r13)
  328. std r0, VCPU_GPR(r9)(r9)
  329. std r10, VCPU_GPR(r10)(r9)
  330. std r11, VCPU_GPR(r11)(r9)
  331. ld r3, HSTATE_SCRATCH0(r13)
  332. lwz r4, HSTATE_SCRATCH1(r13)
  333. std r3, VCPU_GPR(r12)(r9)
  334. stw r4, VCPU_CR(r9)
  335. /* Restore R1/R2 so we can handle faults */
  336. ld r1, HSTATE_HOST_R1(r13)
  337. ld r2, PACATOC(r13)
  338. mfspr r10, SPRN_SRR0
  339. mfspr r11, SPRN_SRR1
  340. std r10, VCPU_SRR0(r9)
  341. std r11, VCPU_SRR1(r9)
  342. andi. r0, r12, 2 /* need to read HSRR0/1? */
  343. beq 1f
  344. mfspr r10, SPRN_HSRR0
  345. mfspr r11, SPRN_HSRR1
  346. clrrdi r12, r12, 2
  347. 1: std r10, VCPU_PC(r9)
  348. std r11, VCPU_MSR(r9)
  349. GET_SCRATCH0(r3)
  350. mflr r4
  351. std r3, VCPU_GPR(r13)(r9)
  352. std r4, VCPU_LR(r9)
  353. /* Unset guest mode */
  354. li r0, KVM_GUEST_MODE_NONE
  355. stb r0, HSTATE_IN_GUEST(r13)
  356. stw r12,VCPU_TRAP(r9)
  357. /* See if this is a leftover HDEC interrupt */
  358. cmpwi r12,BOOK3S_INTERRUPT_HV_DECREMENTER
  359. bne 2f
  360. mfspr r3,SPRN_HDEC
  361. cmpwi r3,0
  362. bge ignore_hdec
  363. 2:
  364. /* See if this is something we can handle in real mode */
  365. cmpwi r12,BOOK3S_INTERRUPT_SYSCALL
  366. beq hcall_try_real_mode
  367. hcall_real_cont:
  368. /* Check for mediated interrupts (could be done earlier really ...) */
  369. cmpwi r12,BOOK3S_INTERRUPT_EXTERNAL
  370. bne+ 1f
  371. ld r5,VCPU_LPCR(r9)
  372. andi. r0,r11,MSR_EE
  373. beq 1f
  374. andi. r0,r5,LPCR_MER
  375. bne bounce_ext_interrupt
  376. 1:
  377. /* Save DEC */
  378. mfspr r5,SPRN_DEC
  379. mftb r6
  380. extsw r5,r5
  381. add r5,r5,r6
  382. std r5,VCPU_DEC_EXPIRES(r9)
  383. /* Save HEIR (HV emulation assist reg) in last_inst
  384. if this is an HEI (HV emulation interrupt, e40) */
  385. li r3,-1
  386. cmpwi r12,BOOK3S_INTERRUPT_H_EMUL_ASSIST
  387. bne 11f
  388. mfspr r3,SPRN_HEIR
  389. 11: stw r3,VCPU_LAST_INST(r9)
  390. /* Save more register state */
  391. mfxer r5
  392. mfdar r6
  393. mfdsisr r7
  394. mfctr r8
  395. stw r5, VCPU_XER(r9)
  396. std r6, VCPU_DAR(r9)
  397. stw r7, VCPU_DSISR(r9)
  398. std r8, VCPU_CTR(r9)
  399. /* grab HDAR & HDSISR if HV data storage interrupt (HDSI) */
  400. cmpwi r12,BOOK3S_INTERRUPT_H_DATA_STORAGE
  401. beq 6f
  402. 7: std r6, VCPU_FAULT_DAR(r9)
  403. stw r7, VCPU_FAULT_DSISR(r9)
  404. /* Save guest CTRL register, set runlatch to 1 */
  405. mfspr r6,SPRN_CTRLF
  406. stw r6,VCPU_CTRL(r9)
  407. andi. r0,r6,1
  408. bne 4f
  409. ori r6,r6,1
  410. mtspr SPRN_CTRLT,r6
  411. 4:
  412. /* Read the guest SLB and save it away */
  413. lwz r0,VCPU_SLB_NR(r9) /* number of entries in SLB */
  414. mtctr r0
  415. li r6,0
  416. addi r7,r9,VCPU_SLB
  417. li r5,0
  418. 1: slbmfee r8,r6
  419. andis. r0,r8,SLB_ESID_V@h
  420. beq 2f
  421. add r8,r8,r6 /* put index in */
  422. slbmfev r3,r6
  423. std r8,VCPU_SLB_E(r7)
  424. std r3,VCPU_SLB_V(r7)
  425. addi r7,r7,VCPU_SLB_SIZE
  426. addi r5,r5,1
  427. 2: addi r6,r6,1
  428. bdnz 1b
  429. stw r5,VCPU_SLB_MAX(r9)
  430. /*
  431. * Save the guest PURR/SPURR
  432. */
  433. mfspr r5,SPRN_PURR
  434. mfspr r6,SPRN_SPURR
  435. ld r7,VCPU_PURR(r9)
  436. ld r8,VCPU_SPURR(r9)
  437. std r5,VCPU_PURR(r9)
  438. std r6,VCPU_SPURR(r9)
  439. subf r5,r7,r5
  440. subf r6,r8,r6
  441. /*
  442. * Restore host PURR/SPURR and add guest times
  443. * so that the time in the guest gets accounted.
  444. */
  445. ld r3,HSTATE_PURR(r13)
  446. ld r4,HSTATE_SPURR(r13)
  447. add r3,r3,r5
  448. add r4,r4,r6
  449. mtspr SPRN_PURR,r3
  450. mtspr SPRN_SPURR,r4
  451. /* Clear out SLB */
  452. li r5,0
  453. slbmte r5,r5
  454. slbia
  455. ptesync
  456. hdec_soon:
  457. /* Switch back to host partition */
  458. ld r4,VCPU_KVM(r9) /* pointer to struct kvm */
  459. ld r6,KVM_HOST_SDR1(r4)
  460. lwz r7,KVM_HOST_LPID(r4)
  461. li r8,LPID_RSVD /* switch to reserved LPID */
  462. mtspr SPRN_LPID,r8
  463. ptesync
  464. mtspr SPRN_SDR1,r6 /* switch to partition page table */
  465. mtspr SPRN_LPID,r7
  466. isync
  467. lis r8,0x7fff /* MAX_INT@h */
  468. mtspr SPRN_HDEC,r8
  469. ld r8,KVM_HOST_LPCR(r4)
  470. mtspr SPRN_LPCR,r8
  471. isync
  472. /* load host SLB entries */
  473. ld r8,PACA_SLBSHADOWPTR(r13)
  474. .rept SLB_NUM_BOLTED
  475. ld r5,SLBSHADOW_SAVEAREA(r8)
  476. ld r6,SLBSHADOW_SAVEAREA+8(r8)
  477. andis. r7,r5,SLB_ESID_V@h
  478. beq 1f
  479. slbmte r6,r5
  480. 1: addi r8,r8,16
  481. .endr
  482. /* Save and reset AMR and UAMOR before turning on the MMU */
  483. mfspr r5,SPRN_AMR
  484. mfspr r6,SPRN_UAMOR
  485. std r5,VCPU_AMR(r9)
  486. std r6,VCPU_UAMOR(r9)
  487. li r6,0
  488. mtspr SPRN_AMR,r6
  489. /* Restore host DABR and DABRX */
  490. ld r5,HSTATE_DABR(r13)
  491. li r6,7
  492. mtspr SPRN_DABR,r5
  493. mtspr SPRN_DABRX,r6
  494. /* Switch DSCR back to host value */
  495. mfspr r8, SPRN_DSCR
  496. ld r7, HSTATE_DSCR(r13)
  497. std r8, VCPU_DSCR(r7)
  498. mtspr SPRN_DSCR, r7
  499. /* Save non-volatile GPRs */
  500. std r14, VCPU_GPR(r14)(r9)
  501. std r15, VCPU_GPR(r15)(r9)
  502. std r16, VCPU_GPR(r16)(r9)
  503. std r17, VCPU_GPR(r17)(r9)
  504. std r18, VCPU_GPR(r18)(r9)
  505. std r19, VCPU_GPR(r19)(r9)
  506. std r20, VCPU_GPR(r20)(r9)
  507. std r21, VCPU_GPR(r21)(r9)
  508. std r22, VCPU_GPR(r22)(r9)
  509. std r23, VCPU_GPR(r23)(r9)
  510. std r24, VCPU_GPR(r24)(r9)
  511. std r25, VCPU_GPR(r25)(r9)
  512. std r26, VCPU_GPR(r26)(r9)
  513. std r27, VCPU_GPR(r27)(r9)
  514. std r28, VCPU_GPR(r28)(r9)
  515. std r29, VCPU_GPR(r29)(r9)
  516. std r30, VCPU_GPR(r30)(r9)
  517. std r31, VCPU_GPR(r31)(r9)
  518. /* Save SPRGs */
  519. mfspr r3, SPRN_SPRG0
  520. mfspr r4, SPRN_SPRG1
  521. mfspr r5, SPRN_SPRG2
  522. mfspr r6, SPRN_SPRG3
  523. std r3, VCPU_SPRG0(r9)
  524. std r4, VCPU_SPRG1(r9)
  525. std r5, VCPU_SPRG2(r9)
  526. std r6, VCPU_SPRG3(r9)
  527. /* Increment yield count if they have a VPA */
  528. ld r8, VCPU_VPA(r9) /* do they have a VPA? */
  529. cmpdi r8, 0
  530. beq 25f
  531. lwz r3, LPPACA_YIELDCOUNT(r8)
  532. addi r3, r3, 1
  533. stw r3, LPPACA_YIELDCOUNT(r8)
  534. 25:
  535. /* Save PMU registers if requested */
  536. /* r8 and cr0.eq are live here */
  537. li r3, 1
  538. sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */
  539. mfspr r4, SPRN_MMCR0 /* save MMCR0 */
  540. mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */
  541. isync
  542. beq 21f /* if no VPA, save PMU stuff anyway */
  543. lbz r7, LPPACA_PMCINUSE(r8)
  544. cmpwi r7, 0 /* did they ask for PMU stuff to be saved? */
  545. bne 21f
  546. std r3, VCPU_MMCR(r9) /* if not, set saved MMCR0 to FC */
  547. b 22f
  548. 21: mfspr r5, SPRN_MMCR1
  549. mfspr r6, SPRN_MMCRA
  550. std r4, VCPU_MMCR(r9)
  551. std r5, VCPU_MMCR + 8(r9)
  552. std r6, VCPU_MMCR + 16(r9)
  553. mfspr r3, SPRN_PMC1
  554. mfspr r4, SPRN_PMC2
  555. mfspr r5, SPRN_PMC3
  556. mfspr r6, SPRN_PMC4
  557. mfspr r7, SPRN_PMC5
  558. mfspr r8, SPRN_PMC6
  559. stw r3, VCPU_PMC(r9)
  560. stw r4, VCPU_PMC + 4(r9)
  561. stw r5, VCPU_PMC + 8(r9)
  562. stw r6, VCPU_PMC + 12(r9)
  563. stw r7, VCPU_PMC + 16(r9)
  564. stw r8, VCPU_PMC + 20(r9)
  565. 22:
  566. /* save FP state */
  567. mr r3, r9
  568. bl .kvmppc_save_fp
  569. /*
  570. * Reload DEC. HDEC interrupts were disabled when
  571. * we reloaded the host's LPCR value.
  572. */
  573. ld r3, HSTATE_DECEXP(r13)
  574. mftb r4
  575. subf r4, r4, r3
  576. mtspr SPRN_DEC, r4
  577. /* Reload the host's PMU registers */
  578. ld r3, PACALPPACAPTR(r13) /* is the host using the PMU? */
  579. lbz r4, LPPACA_PMCINUSE(r3)
  580. cmpwi r4, 0
  581. beq 23f /* skip if not */
  582. lwz r3, HSTATE_PMC(r13)
  583. lwz r4, HSTATE_PMC + 4(r13)
  584. lwz r5, HSTATE_PMC + 8(r13)
  585. lwz r6, HSTATE_PMC + 12(r13)
  586. lwz r8, HSTATE_PMC + 16(r13)
  587. lwz r9, HSTATE_PMC + 20(r13)
  588. mtspr SPRN_PMC1, r3
  589. mtspr SPRN_PMC2, r4
  590. mtspr SPRN_PMC3, r5
  591. mtspr SPRN_PMC4, r6
  592. mtspr SPRN_PMC5, r8
  593. mtspr SPRN_PMC6, r9
  594. ld r3, HSTATE_MMCR(r13)
  595. ld r4, HSTATE_MMCR + 8(r13)
  596. ld r5, HSTATE_MMCR + 16(r13)
  597. mtspr SPRN_MMCR1, r4
  598. mtspr SPRN_MMCRA, r5
  599. mtspr SPRN_MMCR0, r3
  600. isync
  601. 23:
  602. /*
  603. * For external and machine check interrupts, we need
  604. * to call the Linux handler to process the interrupt.
  605. * We do that by jumping to the interrupt vector address
  606. * which we have in r12. The [h]rfid at the end of the
  607. * handler will return to the book3s_hv_interrupts.S code.
  608. * For other interrupts we do the rfid to get back
  609. * to the book3s_interrupts.S code here.
  610. */
  611. ld r8, HSTATE_VMHANDLER(r13)
  612. ld r7, HSTATE_HOST_MSR(r13)
  613. cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL
  614. beq 11f
  615. cmpwi r12, BOOK3S_INTERRUPT_MACHINE_CHECK
  616. /* RFI into the highmem handler, or branch to interrupt handler */
  617. mfmsr r6
  618. mtctr r12
  619. li r0, MSR_RI
  620. andc r6, r6, r0
  621. mtmsrd r6, 1 /* Clear RI in MSR */
  622. mtsrr0 r8
  623. mtsrr1 r7
  624. beqctr
  625. RFI
  626. 11: mtspr SPRN_HSRR0, r8
  627. mtspr SPRN_HSRR1, r7
  628. ba 0x500
  629. 6: mfspr r6,SPRN_HDAR
  630. mfspr r7,SPRN_HDSISR
  631. b 7b
  632. /*
  633. * Try to handle an hcall in real mode.
  634. * Returns to the guest if we handle it, or continues on up to
  635. * the kernel if we can't (i.e. if we don't have a handler for
  636. * it, or if the handler returns H_TOO_HARD).
  637. */
  638. .globl hcall_try_real_mode
  639. hcall_try_real_mode:
  640. ld r3,VCPU_GPR(r3)(r9)
  641. andi. r0,r11,MSR_PR
  642. bne hcall_real_cont
  643. clrrdi r3,r3,2
  644. cmpldi r3,hcall_real_table_end - hcall_real_table
  645. bge hcall_real_cont
  646. LOAD_REG_ADDR(r4, hcall_real_table)
  647. lwzx r3,r3,r4
  648. cmpwi r3,0
  649. beq hcall_real_cont
  650. add r3,r3,r4
  651. mtctr r3
  652. mr r3,r9 /* get vcpu pointer */
  653. ld r4,VCPU_GPR(r4)(r9)
  654. bctrl
  655. cmpdi r3,H_TOO_HARD
  656. beq hcall_real_fallback
  657. ld r4,HSTATE_KVM_VCPU(r13)
  658. std r3,VCPU_GPR(r3)(r4)
  659. ld r10,VCPU_PC(r4)
  660. ld r11,VCPU_MSR(r4)
  661. b fast_guest_return
  662. /* We've attempted a real mode hcall, but it's punted it back
  663. * to userspace. We need to restore some clobbered volatiles
  664. * before resuming the pass-it-to-qemu path */
  665. hcall_real_fallback:
  666. li r12,BOOK3S_INTERRUPT_SYSCALL
  667. ld r9, HSTATE_KVM_VCPU(r13)
  668. ld r11, VCPU_MSR(r9)
  669. b hcall_real_cont
  670. .globl hcall_real_table
  671. hcall_real_table:
  672. .long 0 /* 0 - unused */
  673. .long .kvmppc_h_remove - hcall_real_table
  674. .long .kvmppc_h_enter - hcall_real_table
  675. .long .kvmppc_h_read - hcall_real_table
  676. .long 0 /* 0x10 - H_CLEAR_MOD */
  677. .long 0 /* 0x14 - H_CLEAR_REF */
  678. .long .kvmppc_h_protect - hcall_real_table
  679. .long 0 /* 0x1c - H_GET_TCE */
  680. .long 0 /* 0x20 - H_SET_TCE */
  681. .long 0 /* 0x24 - H_SET_SPRG0 */
  682. .long .kvmppc_h_set_dabr - hcall_real_table
  683. .long 0 /* 0x2c */
  684. .long 0 /* 0x30 */
  685. .long 0 /* 0x34 */
  686. .long 0 /* 0x38 */
  687. .long 0 /* 0x3c */
  688. .long 0 /* 0x40 */
  689. .long 0 /* 0x44 */
  690. .long 0 /* 0x48 */
  691. .long 0 /* 0x4c */
  692. .long 0 /* 0x50 */
  693. .long 0 /* 0x54 */
  694. .long 0 /* 0x58 */
  695. .long 0 /* 0x5c */
  696. .long 0 /* 0x60 */
  697. .long 0 /* 0x64 */
  698. .long 0 /* 0x68 */
  699. .long 0 /* 0x6c */
  700. .long 0 /* 0x70 */
  701. .long 0 /* 0x74 */
  702. .long 0 /* 0x78 */
  703. .long 0 /* 0x7c */
  704. .long 0 /* 0x80 */
  705. .long 0 /* 0x84 */
  706. .long 0 /* 0x88 */
  707. .long 0 /* 0x8c */
  708. .long 0 /* 0x90 */
  709. .long 0 /* 0x94 */
  710. .long 0 /* 0x98 */
  711. .long 0 /* 0x9c */
  712. .long 0 /* 0xa0 */
  713. .long 0 /* 0xa4 */
  714. .long 0 /* 0xa8 */
  715. .long 0 /* 0xac */
  716. .long 0 /* 0xb0 */
  717. .long 0 /* 0xb4 */
  718. .long 0 /* 0xb8 */
  719. .long 0 /* 0xbc */
  720. .long 0 /* 0xc0 */
  721. .long 0 /* 0xc4 */
  722. .long 0 /* 0xc8 */
  723. .long 0 /* 0xcc */
  724. .long 0 /* 0xd0 */
  725. .long 0 /* 0xd4 */
  726. .long 0 /* 0xd8 */
  727. .long 0 /* 0xdc */
  728. .long 0 /* 0xe0 */
  729. .long 0 /* 0xe4 */
  730. .long 0 /* 0xe8 */
  731. .long 0 /* 0xec */
  732. .long 0 /* 0xf0 */
  733. .long 0 /* 0xf4 */
  734. .long 0 /* 0xf8 */
  735. .long 0 /* 0xfc */
  736. .long 0 /* 0x100 */
  737. .long 0 /* 0x104 */
  738. .long 0 /* 0x108 */
  739. .long 0 /* 0x10c */
  740. .long 0 /* 0x110 */
  741. .long 0 /* 0x114 */
  742. .long 0 /* 0x118 */
  743. .long 0 /* 0x11c */
  744. .long 0 /* 0x120 */
  745. .long .kvmppc_h_bulk_remove - hcall_real_table
  746. hcall_real_table_end:
  747. ignore_hdec:
  748. mr r4,r9
  749. b fast_guest_return
  750. bounce_ext_interrupt:
  751. mr r4,r9
  752. mtspr SPRN_SRR0,r10
  753. mtspr SPRN_SRR1,r11
  754. li r10,BOOK3S_INTERRUPT_EXTERNAL
  755. LOAD_REG_IMMEDIATE(r11,MSR_SF | MSR_ME);
  756. b fast_guest_return
  757. _GLOBAL(kvmppc_h_set_dabr)
  758. std r4,VCPU_DABR(r3)
  759. mtspr SPRN_DABR,r4
  760. li r3,0
  761. blr
  762. /*
  763. * Save away FP, VMX and VSX registers.
  764. * r3 = vcpu pointer
  765. */
  766. _GLOBAL(kvmppc_save_fp)
  767. mfmsr r9
  768. ori r8,r9,MSR_FP
  769. #ifdef CONFIG_ALTIVEC
  770. BEGIN_FTR_SECTION
  771. oris r8,r8,MSR_VEC@h
  772. END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
  773. #endif
  774. #ifdef CONFIG_VSX
  775. BEGIN_FTR_SECTION
  776. oris r8,r8,MSR_VSX@h
  777. END_FTR_SECTION_IFSET(CPU_FTR_VSX)
  778. #endif
  779. mtmsrd r8
  780. isync
  781. #ifdef CONFIG_VSX
  782. BEGIN_FTR_SECTION
  783. reg = 0
  784. .rept 32
  785. li r6,reg*16+VCPU_VSRS
  786. stxvd2x reg,r6,r3
  787. reg = reg + 1
  788. .endr
  789. FTR_SECTION_ELSE
  790. #endif
  791. reg = 0
  792. .rept 32
  793. stfd reg,reg*8+VCPU_FPRS(r3)
  794. reg = reg + 1
  795. .endr
  796. #ifdef CONFIG_VSX
  797. ALT_FTR_SECTION_END_IFSET(CPU_FTR_VSX)
  798. #endif
  799. mffs fr0
  800. stfd fr0,VCPU_FPSCR(r3)
  801. #ifdef CONFIG_ALTIVEC
  802. BEGIN_FTR_SECTION
  803. reg = 0
  804. .rept 32
  805. li r6,reg*16+VCPU_VRS
  806. stvx reg,r6,r3
  807. reg = reg + 1
  808. .endr
  809. mfvscr vr0
  810. li r6,VCPU_VSCR
  811. stvx vr0,r6,r3
  812. END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
  813. #endif
  814. mfspr r6,SPRN_VRSAVE
  815. stw r6,VCPU_VRSAVE(r3)
  816. mtmsrd r9
  817. isync
  818. blr
  819. /*
  820. * Load up FP, VMX and VSX registers
  821. * r4 = vcpu pointer
  822. */
  823. .globl kvmppc_load_fp
  824. kvmppc_load_fp:
  825. mfmsr r9
  826. ori r8,r9,MSR_FP
  827. #ifdef CONFIG_ALTIVEC
  828. BEGIN_FTR_SECTION
  829. oris r8,r8,MSR_VEC@h
  830. END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
  831. #endif
  832. #ifdef CONFIG_VSX
  833. BEGIN_FTR_SECTION
  834. oris r8,r8,MSR_VSX@h
  835. END_FTR_SECTION_IFSET(CPU_FTR_VSX)
  836. #endif
  837. mtmsrd r8
  838. isync
  839. lfd fr0,VCPU_FPSCR(r4)
  840. MTFSF_L(fr0)
  841. #ifdef CONFIG_VSX
  842. BEGIN_FTR_SECTION
  843. reg = 0
  844. .rept 32
  845. li r7,reg*16+VCPU_VSRS
  846. lxvd2x reg,r7,r4
  847. reg = reg + 1
  848. .endr
  849. FTR_SECTION_ELSE
  850. #endif
  851. reg = 0
  852. .rept 32
  853. lfd reg,reg*8+VCPU_FPRS(r4)
  854. reg = reg + 1
  855. .endr
  856. #ifdef CONFIG_VSX
  857. ALT_FTR_SECTION_END_IFSET(CPU_FTR_VSX)
  858. #endif
  859. #ifdef CONFIG_ALTIVEC
  860. BEGIN_FTR_SECTION
  861. li r7,VCPU_VSCR
  862. lvx vr0,r7,r4
  863. mtvscr vr0
  864. reg = 0
  865. .rept 32
  866. li r7,reg*16+VCPU_VRS
  867. lvx reg,r7,r4
  868. reg = reg + 1
  869. .endr
  870. END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
  871. #endif
  872. lwz r7,VCPU_VRSAVE(r4)
  873. mtspr SPRN_VRSAVE,r7
  874. blr