kvm_emul.S 8.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358
  1. /*
  2. * This program is free software; you can redistribute it and/or modify
  3. * it under the terms of the GNU General Public License, version 2, as
  4. * published by the Free Software Foundation.
  5. *
  6. * This program is distributed in the hope that it will be useful,
  7. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  8. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  9. * GNU General Public License for more details.
  10. *
  11. * You should have received a copy of the GNU General Public License
  12. * along with this program; if not, write to the Free Software
  13. * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
  14. *
  15. * Copyright SUSE Linux Products GmbH 2010
  16. * Copyright 2010-2011 Freescale Semiconductor, Inc.
  17. *
  18. * Authors: Alexander Graf <agraf@suse.de>
  19. */
  20. #include <asm/ppc_asm.h>
  21. #include <asm/kvm_asm.h>
  22. #include <asm/reg.h>
  23. #include <asm/page.h>
  24. #include <asm/asm-offsets.h>
  25. /* Hypercall entry point. Will be patched with device tree instructions. */
  26. .global kvm_hypercall_start
  27. kvm_hypercall_start:
  28. li r3, -1
  29. nop
  30. nop
  31. nop
  32. blr
  33. #define KVM_MAGIC_PAGE (-4096)
  34. #ifdef CONFIG_64BIT
  35. #define LL64(reg, offs, reg2) ld reg, (offs)(reg2)
  36. #define STL64(reg, offs, reg2) std reg, (offs)(reg2)
  37. #else
  38. #define LL64(reg, offs, reg2) lwz reg, (offs + 4)(reg2)
  39. #define STL64(reg, offs, reg2) stw reg, (offs + 4)(reg2)
  40. #endif
  41. #define SCRATCH_SAVE \
  42. /* Enable critical section. We are critical if \
  43. shared->critical == r1 */ \
  44. STL64(r1, KVM_MAGIC_PAGE + KVM_MAGIC_CRITICAL, 0); \
  45. \
  46. /* Save state */ \
  47. PPC_STL r31, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH1)(0); \
  48. PPC_STL r30, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH2)(0); \
  49. mfcr r31; \
  50. stw r31, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH3)(0);
  51. #define SCRATCH_RESTORE \
  52. /* Restore state */ \
  53. PPC_LL r31, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH1)(0); \
  54. lwz r30, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH3)(0); \
  55. mtcr r30; \
  56. PPC_LL r30, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH2)(0); \
  57. \
  58. /* Disable critical section. We are critical if \
  59. shared->critical == r1 and r2 is always != r1 */ \
  60. STL64(r2, KVM_MAGIC_PAGE + KVM_MAGIC_CRITICAL, 0);
  61. .global kvm_template_start
  62. kvm_template_start:
  63. .global kvm_emulate_mtmsrd
  64. kvm_emulate_mtmsrd:
  65. SCRATCH_SAVE
  66. /* Put MSR & ~(MSR_EE|MSR_RI) in r31 */
  67. LL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
  68. lis r30, (~(MSR_EE | MSR_RI))@h
  69. ori r30, r30, (~(MSR_EE | MSR_RI))@l
  70. and r31, r31, r30
  71. /* OR the register's (MSR_EE|MSR_RI) on MSR */
  72. kvm_emulate_mtmsrd_reg:
  73. ori r30, r0, 0
  74. andi. r30, r30, (MSR_EE|MSR_RI)
  75. or r31, r31, r30
  76. /* Put MSR back into magic page */
  77. STL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
  78. /* Check if we have to fetch an interrupt */
  79. lwz r31, (KVM_MAGIC_PAGE + KVM_MAGIC_INT)(0)
  80. cmpwi r31, 0
  81. beq+ no_check
  82. /* Check if we may trigger an interrupt */
  83. andi. r30, r30, MSR_EE
  84. beq no_check
  85. SCRATCH_RESTORE
  86. /* Nag hypervisor */
  87. kvm_emulate_mtmsrd_orig_ins:
  88. tlbsync
  89. b kvm_emulate_mtmsrd_branch
  90. no_check:
  91. SCRATCH_RESTORE
  92. /* Go back to caller */
  93. kvm_emulate_mtmsrd_branch:
  94. b .
  95. kvm_emulate_mtmsrd_end:
  96. .global kvm_emulate_mtmsrd_branch_offs
  97. kvm_emulate_mtmsrd_branch_offs:
  98. .long (kvm_emulate_mtmsrd_branch - kvm_emulate_mtmsrd) / 4
  99. .global kvm_emulate_mtmsrd_reg_offs
  100. kvm_emulate_mtmsrd_reg_offs:
  101. .long (kvm_emulate_mtmsrd_reg - kvm_emulate_mtmsrd) / 4
  102. .global kvm_emulate_mtmsrd_orig_ins_offs
  103. kvm_emulate_mtmsrd_orig_ins_offs:
  104. .long (kvm_emulate_mtmsrd_orig_ins - kvm_emulate_mtmsrd) / 4
  105. .global kvm_emulate_mtmsrd_len
  106. kvm_emulate_mtmsrd_len:
  107. .long (kvm_emulate_mtmsrd_end - kvm_emulate_mtmsrd) / 4
  108. #define MSR_SAFE_BITS (MSR_EE | MSR_CE | MSR_ME | MSR_RI)
  109. #define MSR_CRITICAL_BITS ~MSR_SAFE_BITS
  110. .global kvm_emulate_mtmsr
  111. kvm_emulate_mtmsr:
  112. SCRATCH_SAVE
  113. /* Fetch old MSR in r31 */
  114. LL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
  115. /* Find the changed bits between old and new MSR */
  116. kvm_emulate_mtmsr_reg1:
  117. ori r30, r0, 0
  118. xor r31, r30, r31
  119. /* Check if we need to really do mtmsr */
  120. LOAD_REG_IMMEDIATE(r30, MSR_CRITICAL_BITS)
  121. and. r31, r31, r30
  122. /* No critical bits changed? Maybe we can stay in the guest. */
  123. beq maybe_stay_in_guest
  124. do_mtmsr:
  125. SCRATCH_RESTORE
  126. /* Just fire off the mtmsr if it's critical */
  127. kvm_emulate_mtmsr_orig_ins:
  128. mtmsr r0
  129. b kvm_emulate_mtmsr_branch
  130. maybe_stay_in_guest:
  131. /* Get the target register in r30 */
  132. kvm_emulate_mtmsr_reg2:
  133. ori r30, r0, 0
  134. /* Put MSR into magic page because we don't call mtmsr */
  135. STL64(r30, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
  136. /* Check if we have to fetch an interrupt */
  137. lwz r31, (KVM_MAGIC_PAGE + KVM_MAGIC_INT)(0)
  138. cmpwi r31, 0
  139. beq+ no_mtmsr
  140. /* Check if we may trigger an interrupt */
  141. andi. r31, r30, MSR_EE
  142. bne do_mtmsr
  143. no_mtmsr:
  144. SCRATCH_RESTORE
  145. /* Go back to caller */
  146. kvm_emulate_mtmsr_branch:
  147. b .
  148. kvm_emulate_mtmsr_end:
  149. .global kvm_emulate_mtmsr_branch_offs
  150. kvm_emulate_mtmsr_branch_offs:
  151. .long (kvm_emulate_mtmsr_branch - kvm_emulate_mtmsr) / 4
  152. .global kvm_emulate_mtmsr_reg1_offs
  153. kvm_emulate_mtmsr_reg1_offs:
  154. .long (kvm_emulate_mtmsr_reg1 - kvm_emulate_mtmsr) / 4
  155. .global kvm_emulate_mtmsr_reg2_offs
  156. kvm_emulate_mtmsr_reg2_offs:
  157. .long (kvm_emulate_mtmsr_reg2 - kvm_emulate_mtmsr) / 4
  158. .global kvm_emulate_mtmsr_orig_ins_offs
  159. kvm_emulate_mtmsr_orig_ins_offs:
  160. .long (kvm_emulate_mtmsr_orig_ins - kvm_emulate_mtmsr) / 4
  161. .global kvm_emulate_mtmsr_len
  162. kvm_emulate_mtmsr_len:
  163. .long (kvm_emulate_mtmsr_end - kvm_emulate_mtmsr) / 4
  164. /* also used for wrteei 1 */
  165. .global kvm_emulate_wrtee
  166. kvm_emulate_wrtee:
  167. SCRATCH_SAVE
  168. /* Fetch old MSR in r31 */
  169. LL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
  170. /* Insert new MSR[EE] */
  171. kvm_emulate_wrtee_reg:
  172. ori r30, r0, 0
  173. rlwimi r31, r30, 0, MSR_EE
  174. /*
  175. * If MSR[EE] is now set, check for a pending interrupt.
  176. * We could skip this if MSR[EE] was already on, but that
  177. * should be rare, so don't bother.
  178. */
  179. andi. r30, r30, MSR_EE
  180. /* Put MSR into magic page because we don't call wrtee */
  181. STL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
  182. beq no_wrtee
  183. /* Check if we have to fetch an interrupt */
  184. lwz r30, (KVM_MAGIC_PAGE + KVM_MAGIC_INT)(0)
  185. cmpwi r30, 0
  186. bne do_wrtee
  187. no_wrtee:
  188. SCRATCH_RESTORE
  189. /* Go back to caller */
  190. kvm_emulate_wrtee_branch:
  191. b .
  192. do_wrtee:
  193. SCRATCH_RESTORE
  194. /* Just fire off the wrtee if it's critical */
  195. kvm_emulate_wrtee_orig_ins:
  196. wrtee r0
  197. b kvm_emulate_wrtee_branch
  198. kvm_emulate_wrtee_end:
  199. .global kvm_emulate_wrtee_branch_offs
  200. kvm_emulate_wrtee_branch_offs:
  201. .long (kvm_emulate_wrtee_branch - kvm_emulate_wrtee) / 4
  202. .global kvm_emulate_wrtee_reg_offs
  203. kvm_emulate_wrtee_reg_offs:
  204. .long (kvm_emulate_wrtee_reg - kvm_emulate_wrtee) / 4
  205. .global kvm_emulate_wrtee_orig_ins_offs
  206. kvm_emulate_wrtee_orig_ins_offs:
  207. .long (kvm_emulate_wrtee_orig_ins - kvm_emulate_wrtee) / 4
  208. .global kvm_emulate_wrtee_len
  209. kvm_emulate_wrtee_len:
  210. .long (kvm_emulate_wrtee_end - kvm_emulate_wrtee) / 4
  211. .global kvm_emulate_wrteei_0
  212. kvm_emulate_wrteei_0:
  213. SCRATCH_SAVE
  214. /* Fetch old MSR in r31 */
  215. LL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
  216. /* Remove MSR_EE from old MSR */
  217. rlwinm r31, r31, 0, ~MSR_EE
  218. /* Write new MSR value back */
  219. STL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
  220. SCRATCH_RESTORE
  221. /* Go back to caller */
  222. kvm_emulate_wrteei_0_branch:
  223. b .
  224. kvm_emulate_wrteei_0_end:
  225. .global kvm_emulate_wrteei_0_branch_offs
  226. kvm_emulate_wrteei_0_branch_offs:
  227. .long (kvm_emulate_wrteei_0_branch - kvm_emulate_wrteei_0) / 4
  228. .global kvm_emulate_wrteei_0_len
  229. kvm_emulate_wrteei_0_len:
  230. .long (kvm_emulate_wrteei_0_end - kvm_emulate_wrteei_0) / 4
  231. .global kvm_emulate_mtsrin
  232. kvm_emulate_mtsrin:
  233. SCRATCH_SAVE
  234. LL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
  235. andi. r31, r31, MSR_DR | MSR_IR
  236. beq kvm_emulate_mtsrin_reg1
  237. SCRATCH_RESTORE
  238. kvm_emulate_mtsrin_orig_ins:
  239. nop
  240. b kvm_emulate_mtsrin_branch
  241. kvm_emulate_mtsrin_reg1:
  242. /* rX >> 26 */
  243. rlwinm r30,r0,6,26,29
  244. kvm_emulate_mtsrin_reg2:
  245. stw r0, (KVM_MAGIC_PAGE + KVM_MAGIC_SR)(r30)
  246. SCRATCH_RESTORE
  247. /* Go back to caller */
  248. kvm_emulate_mtsrin_branch:
  249. b .
  250. kvm_emulate_mtsrin_end:
  251. .global kvm_emulate_mtsrin_branch_offs
  252. kvm_emulate_mtsrin_branch_offs:
  253. .long (kvm_emulate_mtsrin_branch - kvm_emulate_mtsrin) / 4
  254. .global kvm_emulate_mtsrin_reg1_offs
  255. kvm_emulate_mtsrin_reg1_offs:
  256. .long (kvm_emulate_mtsrin_reg1 - kvm_emulate_mtsrin) / 4
  257. .global kvm_emulate_mtsrin_reg2_offs
  258. kvm_emulate_mtsrin_reg2_offs:
  259. .long (kvm_emulate_mtsrin_reg2 - kvm_emulate_mtsrin) / 4
  260. .global kvm_emulate_mtsrin_orig_ins_offs
  261. kvm_emulate_mtsrin_orig_ins_offs:
  262. .long (kvm_emulate_mtsrin_orig_ins - kvm_emulate_mtsrin) / 4
  263. .global kvm_emulate_mtsrin_len
  264. kvm_emulate_mtsrin_len:
  265. .long (kvm_emulate_mtsrin_end - kvm_emulate_mtsrin) / 4
  266. .global kvm_template_end
  267. kvm_template_end: