book3s_64_slb.S 6.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262
  1. /*
  2. * This program is free software; you can redistribute it and/or modify
  3. * it under the terms of the GNU General Public License, version 2, as
  4. * published by the Free Software Foundation.
  5. *
  6. * This program is distributed in the hope that it will be useful,
  7. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  8. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  9. * GNU General Public License for more details.
  10. *
  11. * You should have received a copy of the GNU General Public License
  12. * along with this program; if not, write to the Free Software
  13. * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
  14. *
  15. * Copyright SUSE Linux Products GmbH 2009
  16. *
  17. * Authors: Alexander Graf <agraf@suse.de>
  18. */
  19. #define SHADOW_SLB_ESID(num) (SLBSHADOW_SAVEAREA + (num * 0x10))
  20. #define SHADOW_SLB_VSID(num) (SLBSHADOW_SAVEAREA + (num * 0x10) + 0x8)
  21. #define UNBOLT_SLB_ENTRY(num) \
  22. ld r9, SHADOW_SLB_ESID(num)(r12); \
  23. /* Invalid? Skip. */; \
  24. rldicl. r0, r9, 37, 63; \
  25. beq slb_entry_skip_ ## num; \
  26. xoris r9, r9, SLB_ESID_V@h; \
  27. std r9, SHADOW_SLB_ESID(num)(r12); \
  28. slb_entry_skip_ ## num:
  29. #define REBOLT_SLB_ENTRY(num) \
  30. ld r10, SHADOW_SLB_ESID(num)(r11); \
  31. cmpdi r10, 0; \
  32. beq slb_exit_skip_1; \
  33. oris r10, r10, SLB_ESID_V@h; \
  34. ld r9, SHADOW_SLB_VSID(num)(r11); \
  35. slbmte r9, r10; \
  36. std r10, SHADOW_SLB_ESID(num)(r11); \
  37. slb_exit_skip_ ## num:
  38. /******************************************************************************
  39. * *
  40. * Entry code *
  41. * *
  42. *****************************************************************************/
  43. .global kvmppc_handler_trampoline_enter
  44. kvmppc_handler_trampoline_enter:
  45. /* Required state:
  46. *
  47. * MSR = ~IR|DR
  48. * R13 = PACA
  49. * R9 = guest IP
  50. * R10 = guest MSR
  51. * R11 = free
  52. * R12 = free
  53. * PACA[PACA_EXMC + EX_R9] = guest R9
  54. * PACA[PACA_EXMC + EX_R10] = guest R10
  55. * PACA[PACA_EXMC + EX_R11] = guest R11
  56. * PACA[PACA_EXMC + EX_R12] = guest R12
  57. * PACA[PACA_EXMC + EX_R13] = guest R13
  58. * PACA[PACA_EXMC + EX_CCR] = guest CR
  59. * PACA[PACA_EXMC + EX_R3] = guest XER
  60. */
  61. mtsrr0 r9
  62. mtsrr1 r10
  63. mtspr SPRN_SPRG_SCRATCH0, r0
  64. /* Remove LPAR shadow entries */
  65. #if SLB_NUM_BOLTED == 3
  66. ld r12, PACA_SLBSHADOWPTR(r13)
  67. /* Save off the first entry so we can slbie it later */
  68. ld r10, SHADOW_SLB_ESID(0)(r12)
  69. ld r11, SHADOW_SLB_VSID(0)(r12)
  70. /* Remove bolted entries */
  71. UNBOLT_SLB_ENTRY(0)
  72. UNBOLT_SLB_ENTRY(1)
  73. UNBOLT_SLB_ENTRY(2)
  74. #else
  75. #error unknown number of bolted entries
  76. #endif
  77. /* Flush SLB */
  78. slbia
  79. /* r0 = esid & ESID_MASK */
  80. rldicr r10, r10, 0, 35
  81. /* r0 |= CLASS_BIT(VSID) */
  82. rldic r12, r11, 56 - 36, 36
  83. or r10, r10, r12
  84. slbie r10
  85. isync
  86. /* Fill SLB with our shadow */
  87. lbz r12, PACA_KVM_SLB_MAX(r13)
  88. mulli r12, r12, 16
  89. addi r12, r12, PACA_KVM_SLB
  90. add r12, r12, r13
  91. /* for (r11 = kvm_slb; r11 < kvm_slb + kvm_slb_size; r11+=slb_entry) */
  92. li r11, PACA_KVM_SLB
  93. add r11, r11, r13
  94. slb_loop_enter:
  95. ld r10, 0(r11)
  96. rldicl. r0, r10, 37, 63
  97. beq slb_loop_enter_skip
  98. ld r9, 8(r11)
  99. slbmte r9, r10
  100. slb_loop_enter_skip:
  101. addi r11, r11, 16
  102. cmpd cr0, r11, r12
  103. blt slb_loop_enter
  104. slb_do_enter:
  105. /* Enter guest */
  106. mfspr r0, SPRN_SPRG_SCRATCH0
  107. ld r9, (PACA_EXMC+EX_R9)(r13)
  108. ld r10, (PACA_EXMC+EX_R10)(r13)
  109. ld r12, (PACA_EXMC+EX_R12)(r13)
  110. lwz r11, (PACA_EXMC+EX_CCR)(r13)
  111. mtcr r11
  112. ld r11, (PACA_EXMC+EX_R3)(r13)
  113. mtxer r11
  114. ld r11, (PACA_EXMC+EX_R11)(r13)
  115. ld r13, (PACA_EXMC+EX_R13)(r13)
  116. RFI
  117. kvmppc_handler_trampoline_enter_end:
  118. /******************************************************************************
  119. * *
  120. * Exit code *
  121. * *
  122. *****************************************************************************/
  123. .global kvmppc_handler_trampoline_exit
  124. kvmppc_handler_trampoline_exit:
  125. /* Register usage at this point:
  126. *
  127. * SPRG_SCRATCH0 = guest R13
  128. * R01 = host R1
  129. * R02 = host R2
  130. * R10 = guest PC
  131. * R11 = guest MSR
  132. * R12 = exit handler id
  133. * R13 = PACA
  134. * PACA.exmc.CCR = guest CR
  135. * PACA.exmc.R9 = guest R1
  136. * PACA.exmc.R10 = guest R10
  137. * PACA.exmc.R11 = guest R11
  138. * PACA.exmc.R12 = guest R12
  139. * PACA.exmc.R13 = guest R2
  140. *
  141. */
  142. /* Save registers */
  143. std r0, (PACA_EXMC+EX_SRR0)(r13)
  144. std r9, (PACA_EXMC+EX_R3)(r13)
  145. std r10, (PACA_EXMC+EX_LR)(r13)
  146. std r11, (PACA_EXMC+EX_DAR)(r13)
  147. /*
  148. * In order for us to easily get the last instruction,
  149. * we got the #vmexit at, we exploit the fact that the
  150. * virtual layout is still the same here, so we can just
  151. * ld from the guest's PC address
  152. */
  153. /* We only load the last instruction when it's safe */
  154. cmpwi r12, BOOK3S_INTERRUPT_DATA_STORAGE
  155. beq ld_last_inst
  156. cmpwi r12, BOOK3S_INTERRUPT_PROGRAM
  157. beq ld_last_inst
  158. b no_ld_last_inst
  159. ld_last_inst:
  160. /* Save off the guest instruction we're at */
  161. /* 1) enable paging for data */
  162. mfmsr r9
  163. ori r11, r9, MSR_DR /* Enable paging for data */
  164. mtmsr r11
  165. /* 2) fetch the instruction */
  166. lwz r0, 0(r10)
  167. /* 3) disable paging again */
  168. mtmsr r9
  169. no_ld_last_inst:
  170. /* Restore bolted entries from the shadow and fix it along the way */
  171. /* We don't store anything in entry 0, so we don't need to take care of it */
  172. slbia
  173. isync
  174. #if SLB_NUM_BOLTED == 3
  175. ld r11, PACA_SLBSHADOWPTR(r13)
  176. REBOLT_SLB_ENTRY(0)
  177. REBOLT_SLB_ENTRY(1)
  178. REBOLT_SLB_ENTRY(2)
  179. #else
  180. #error unknown number of bolted entries
  181. #endif
  182. slb_do_exit:
  183. /* Restore registers */
  184. ld r11, (PACA_EXMC+EX_DAR)(r13)
  185. ld r10, (PACA_EXMC+EX_LR)(r13)
  186. ld r9, (PACA_EXMC+EX_R3)(r13)
  187. /* Save last inst */
  188. stw r0, (PACA_EXMC+EX_LR)(r13)
  189. /* Save DAR and DSISR before going to paged mode */
  190. mfdar r0
  191. std r0, (PACA_EXMC+EX_DAR)(r13)
  192. mfdsisr r0
  193. stw r0, (PACA_EXMC+EX_DSISR)(r13)
  194. /* RFI into the highmem handler */
  195. mfmsr r0
  196. ori r0, r0, MSR_IR|MSR_DR|MSR_RI /* Enable paging */
  197. mtsrr1 r0
  198. ld r0, PACASAVEDMSR(r13) /* Highmem handler address */
  199. mtsrr0 r0
  200. mfspr r0, SPRN_SPRG_SCRATCH0
  201. RFI
  202. kvmppc_handler_trampoline_exit_end: