kvm_emul.S 7.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297
  1. /*
  2. * This program is free software; you can redistribute it and/or modify
  3. * it under the terms of the GNU General Public License, version 2, as
  4. * published by the Free Software Foundation.
  5. *
  6. * This program is distributed in the hope that it will be useful,
  7. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  8. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  9. * GNU General Public License for more details.
  10. *
  11. * You should have received a copy of the GNU General Public License
  12. * along with this program; if not, write to the Free Software
  13. * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
  14. *
  15. * Copyright SUSE Linux Products GmbH 2010
  16. *
  17. * Authors: Alexander Graf <agraf@suse.de>
  18. */
  19. #include <asm/ppc_asm.h>
  20. #include <asm/kvm_asm.h>
  21. #include <asm/reg.h>
  22. #include <asm/page.h>
  23. #include <asm/asm-offsets.h>
  24. /* Hypercall entry point. Will be patched with device tree instructions. */
  25. .global kvm_hypercall_start
  26. kvm_hypercall_start:
  27. li r3, -1
  28. nop
  29. nop
  30. nop
  31. blr
  32. #define KVM_MAGIC_PAGE (-4096)
  33. #ifdef CONFIG_64BIT
  34. #define LL64(reg, offs, reg2) ld reg, (offs)(reg2)
  35. #define STL64(reg, offs, reg2) std reg, (offs)(reg2)
  36. #else
  37. #define LL64(reg, offs, reg2) lwz reg, (offs + 4)(reg2)
  38. #define STL64(reg, offs, reg2) stw reg, (offs + 4)(reg2)
  39. #endif
  40. #define SCRATCH_SAVE \
  41. /* Enable critical section. We are critical if \
  42. shared->critical == r1 */ \
  43. STL64(r1, KVM_MAGIC_PAGE + KVM_MAGIC_CRITICAL, 0); \
  44. \
  45. /* Save state */ \
  46. PPC_STL r31, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH1)(0); \
  47. PPC_STL r30, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH2)(0); \
  48. mfcr r31; \
  49. stw r31, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH3)(0);
  50. #define SCRATCH_RESTORE \
  51. /* Restore state */ \
  52. PPC_LL r31, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH1)(0); \
  53. lwz r30, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH3)(0); \
  54. mtcr r30; \
  55. PPC_LL r30, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH2)(0); \
  56. \
  57. /* Disable critical section. We are critical if \
  58. shared->critical == r1 and r2 is always != r1 */ \
  59. STL64(r2, KVM_MAGIC_PAGE + KVM_MAGIC_CRITICAL, 0);
  60. .global kvm_emulate_mtmsrd
  61. kvm_emulate_mtmsrd:
  62. SCRATCH_SAVE
  63. /* Put MSR & ~(MSR_EE|MSR_RI) in r31 */
  64. LL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
  65. lis r30, (~(MSR_EE | MSR_RI))@h
  66. ori r30, r30, (~(MSR_EE | MSR_RI))@l
  67. and r31, r31, r30
  68. /* OR the register's (MSR_EE|MSR_RI) on MSR */
  69. kvm_emulate_mtmsrd_reg:
  70. andi. r30, r0, (MSR_EE|MSR_RI)
  71. or r31, r31, r30
  72. /* Put MSR back into magic page */
  73. STL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
  74. /* Check if we have to fetch an interrupt */
  75. lwz r31, (KVM_MAGIC_PAGE + KVM_MAGIC_INT)(0)
  76. cmpwi r31, 0
  77. beq+ no_check
  78. /* Check if we may trigger an interrupt */
  79. andi. r30, r30, MSR_EE
  80. beq no_check
  81. SCRATCH_RESTORE
  82. /* Nag hypervisor */
  83. tlbsync
  84. b kvm_emulate_mtmsrd_branch
  85. no_check:
  86. SCRATCH_RESTORE
  87. /* Go back to caller */
  88. kvm_emulate_mtmsrd_branch:
  89. b .
  90. kvm_emulate_mtmsrd_end:
  91. .global kvm_emulate_mtmsrd_branch_offs
  92. kvm_emulate_mtmsrd_branch_offs:
  93. .long (kvm_emulate_mtmsrd_branch - kvm_emulate_mtmsrd) / 4
  94. .global kvm_emulate_mtmsrd_reg_offs
  95. kvm_emulate_mtmsrd_reg_offs:
  96. .long (kvm_emulate_mtmsrd_reg - kvm_emulate_mtmsrd) / 4
  97. .global kvm_emulate_mtmsrd_len
  98. kvm_emulate_mtmsrd_len:
  99. .long (kvm_emulate_mtmsrd_end - kvm_emulate_mtmsrd) / 4
  100. #define MSR_SAFE_BITS (MSR_EE | MSR_CE | MSR_ME | MSR_RI)
  101. #define MSR_CRITICAL_BITS ~MSR_SAFE_BITS
  102. .global kvm_emulate_mtmsr
  103. kvm_emulate_mtmsr:
  104. SCRATCH_SAVE
  105. /* Fetch old MSR in r31 */
  106. LL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
  107. /* Find the changed bits between old and new MSR */
  108. kvm_emulate_mtmsr_reg1:
  109. xor r31, r0, r31
  110. /* Check if we need to really do mtmsr */
  111. LOAD_REG_IMMEDIATE(r30, MSR_CRITICAL_BITS)
  112. and. r31, r31, r30
  113. /* No critical bits changed? Maybe we can stay in the guest. */
  114. beq maybe_stay_in_guest
  115. do_mtmsr:
  116. SCRATCH_RESTORE
  117. /* Just fire off the mtmsr if it's critical */
  118. kvm_emulate_mtmsr_orig_ins:
  119. mtmsr r0
  120. b kvm_emulate_mtmsr_branch
  121. maybe_stay_in_guest:
  122. /* Check if we have to fetch an interrupt */
  123. lwz r31, (KVM_MAGIC_PAGE + KVM_MAGIC_INT)(0)
  124. cmpwi r31, 0
  125. beq+ no_mtmsr
  126. /* Check if we may trigger an interrupt */
  127. kvm_emulate_mtmsr_reg2:
  128. andi. r31, r0, MSR_EE
  129. beq no_mtmsr
  130. b do_mtmsr
  131. no_mtmsr:
  132. /* Put MSR into magic page because we don't call mtmsr */
  133. kvm_emulate_mtmsr_reg3:
  134. STL64(r0, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
  135. SCRATCH_RESTORE
  136. /* Go back to caller */
  137. kvm_emulate_mtmsr_branch:
  138. b .
  139. kvm_emulate_mtmsr_end:
  140. .global kvm_emulate_mtmsr_branch_offs
  141. kvm_emulate_mtmsr_branch_offs:
  142. .long (kvm_emulate_mtmsr_branch - kvm_emulate_mtmsr) / 4
  143. .global kvm_emulate_mtmsr_reg1_offs
  144. kvm_emulate_mtmsr_reg1_offs:
  145. .long (kvm_emulate_mtmsr_reg1 - kvm_emulate_mtmsr) / 4
  146. .global kvm_emulate_mtmsr_reg2_offs
  147. kvm_emulate_mtmsr_reg2_offs:
  148. .long (kvm_emulate_mtmsr_reg2 - kvm_emulate_mtmsr) / 4
  149. .global kvm_emulate_mtmsr_reg3_offs
  150. kvm_emulate_mtmsr_reg3_offs:
  151. .long (kvm_emulate_mtmsr_reg3 - kvm_emulate_mtmsr) / 4
  152. .global kvm_emulate_mtmsr_orig_ins_offs
  153. kvm_emulate_mtmsr_orig_ins_offs:
  154. .long (kvm_emulate_mtmsr_orig_ins - kvm_emulate_mtmsr) / 4
  155. .global kvm_emulate_mtmsr_len
  156. kvm_emulate_mtmsr_len:
  157. .long (kvm_emulate_mtmsr_end - kvm_emulate_mtmsr) / 4
  158. .global kvm_emulate_wrteei
  159. kvm_emulate_wrteei:
  160. SCRATCH_SAVE
  161. /* Fetch old MSR in r31 */
  162. LL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
  163. /* Remove MSR_EE from old MSR */
  164. li r30, 0
  165. ori r30, r30, MSR_EE
  166. andc r31, r31, r30
  167. /* OR new MSR_EE onto the old MSR */
  168. kvm_emulate_wrteei_ee:
  169. ori r31, r31, 0
  170. /* Write new MSR value back */
  171. STL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
  172. SCRATCH_RESTORE
  173. /* Go back to caller */
  174. kvm_emulate_wrteei_branch:
  175. b .
  176. kvm_emulate_wrteei_end:
  177. .global kvm_emulate_wrteei_branch_offs
  178. kvm_emulate_wrteei_branch_offs:
  179. .long (kvm_emulate_wrteei_branch - kvm_emulate_wrteei) / 4
  180. .global kvm_emulate_wrteei_ee_offs
  181. kvm_emulate_wrteei_ee_offs:
  182. .long (kvm_emulate_wrteei_ee - kvm_emulate_wrteei) / 4
  183. .global kvm_emulate_wrteei_len
  184. kvm_emulate_wrteei_len:
  185. .long (kvm_emulate_wrteei_end - kvm_emulate_wrteei) / 4
  186. .global kvm_emulate_mtsrin
  187. kvm_emulate_mtsrin:
  188. SCRATCH_SAVE
  189. LL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
  190. andi. r31, r31, MSR_DR | MSR_IR
  191. beq kvm_emulate_mtsrin_reg1
  192. SCRATCH_RESTORE
  193. kvm_emulate_mtsrin_orig_ins:
  194. nop
  195. b kvm_emulate_mtsrin_branch
  196. kvm_emulate_mtsrin_reg1:
  197. /* rX >> 26 */
  198. rlwinm r30,r0,6,26,29
  199. kvm_emulate_mtsrin_reg2:
  200. stw r0, (KVM_MAGIC_PAGE + KVM_MAGIC_SR)(r30)
  201. SCRATCH_RESTORE
  202. /* Go back to caller */
  203. kvm_emulate_mtsrin_branch:
  204. b .
  205. kvm_emulate_mtsrin_end:
  206. .global kvm_emulate_mtsrin_branch_offs
  207. kvm_emulate_mtsrin_branch_offs:
  208. .long (kvm_emulate_mtsrin_branch - kvm_emulate_mtsrin) / 4
  209. .global kvm_emulate_mtsrin_reg1_offs
  210. kvm_emulate_mtsrin_reg1_offs:
  211. .long (kvm_emulate_mtsrin_reg1 - kvm_emulate_mtsrin) / 4
  212. .global kvm_emulate_mtsrin_reg2_offs
  213. kvm_emulate_mtsrin_reg2_offs:
  214. .long (kvm_emulate_mtsrin_reg2 - kvm_emulate_mtsrin) / 4
  215. .global kvm_emulate_mtsrin_orig_ins_offs
  216. kvm_emulate_mtsrin_orig_ins_offs:
  217. .long (kvm_emulate_mtsrin_orig_ins - kvm_emulate_mtsrin) / 4
  218. .global kvm_emulate_mtsrin_len
  219. kvm_emulate_mtsrin_len:
  220. .long (kvm_emulate_mtsrin_end - kvm_emulate_mtsrin) / 4