kvm_emul.S 3.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122
  1. /*
  2. * This program is free software; you can redistribute it and/or modify
  3. * it under the terms of the GNU General Public License, version 2, as
  4. * published by the Free Software Foundation.
  5. *
  6. * This program is distributed in the hope that it will be useful,
  7. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  8. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  9. * GNU General Public License for more details.
  10. *
  11. * You should have received a copy of the GNU General Public License
  12. * along with this program; if not, write to the Free Software
  13. * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
  14. *
  15. * Copyright SUSE Linux Products GmbH 2010
  16. *
  17. * Authors: Alexander Graf <agraf@suse.de>
  18. */
  19. #include <asm/ppc_asm.h>
  20. #include <asm/kvm_asm.h>
  21. #include <asm/reg.h>
  22. #include <asm/page.h>
  23. #include <asm/asm-offsets.h>
  24. /* Hypercall entry point. Will be patched with device tree instructions. */
  25. .global kvm_hypercall_start
  26. kvm_hypercall_start:
  27. li r3, -1
  28. nop
  29. nop
  30. nop
  31. blr
  32. #define KVM_MAGIC_PAGE (-4096)
  33. #ifdef CONFIG_64BIT
  34. #define LL64(reg, offs, reg2) ld reg, (offs)(reg2)
  35. #define STL64(reg, offs, reg2) std reg, (offs)(reg2)
  36. #else
  37. #define LL64(reg, offs, reg2) lwz reg, (offs + 4)(reg2)
  38. #define STL64(reg, offs, reg2) stw reg, (offs + 4)(reg2)
  39. #endif
  40. #define SCRATCH_SAVE \
  41. /* Enable critical section. We are critical if \
  42. shared->critical == r1 */ \
  43. STL64(r1, KVM_MAGIC_PAGE + KVM_MAGIC_CRITICAL, 0); \
  44. \
  45. /* Save state */ \
  46. PPC_STL r31, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH1)(0); \
  47. PPC_STL r30, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH2)(0); \
  48. mfcr r31; \
  49. stw r31, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH3)(0);
  50. #define SCRATCH_RESTORE \
  51. /* Restore state */ \
  52. PPC_LL r31, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH1)(0); \
  53. lwz r30, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH3)(0); \
  54. mtcr r30; \
  55. PPC_LL r30, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH2)(0); \
  56. \
  57. /* Disable critical section. We are critical if \
  58. shared->critical == r1 and r2 is always != r1 */ \
  59. STL64(r2, KVM_MAGIC_PAGE + KVM_MAGIC_CRITICAL, 0);
  60. .global kvm_emulate_mtmsrd
  61. kvm_emulate_mtmsrd:
  62. SCRATCH_SAVE
  63. /* Put MSR & ~(MSR_EE|MSR_RI) in r31 */
  64. LL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
  65. lis r30, (~(MSR_EE | MSR_RI))@h
  66. ori r30, r30, (~(MSR_EE | MSR_RI))@l
  67. and r31, r31, r30
  68. /* OR the register's (MSR_EE|MSR_RI) on MSR */
  69. kvm_emulate_mtmsrd_reg:
  70. andi. r30, r0, (MSR_EE|MSR_RI)
  71. or r31, r31, r30
  72. /* Put MSR back into magic page */
  73. STL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
  74. /* Check if we have to fetch an interrupt */
  75. lwz r31, (KVM_MAGIC_PAGE + KVM_MAGIC_INT)(0)
  76. cmpwi r31, 0
  77. beq+ no_check
  78. /* Check if we may trigger an interrupt */
  79. andi. r30, r30, MSR_EE
  80. beq no_check
  81. SCRATCH_RESTORE
  82. /* Nag hypervisor */
  83. tlbsync
  84. b kvm_emulate_mtmsrd_branch
  85. no_check:
  86. SCRATCH_RESTORE
  87. /* Go back to caller */
  88. kvm_emulate_mtmsrd_branch:
  89. b .
  90. kvm_emulate_mtmsrd_end:
  91. .global kvm_emulate_mtmsrd_branch_offs
  92. kvm_emulate_mtmsrd_branch_offs:
  93. .long (kvm_emulate_mtmsrd_branch - kvm_emulate_mtmsrd) / 4
  94. .global kvm_emulate_mtmsrd_reg_offs
  95. kvm_emulate_mtmsrd_reg_offs:
  96. .long (kvm_emulate_mtmsrd_reg - kvm_emulate_mtmsrd) / 4
  97. .global kvm_emulate_mtmsrd_len
  98. kvm_emulate_mtmsrd_len:
  99. .long (kvm_emulate_mtmsrd_end - kvm_emulate_mtmsrd) / 4