kvm_para.h 5.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233
  1. #ifndef _ASM_X86_KVM_PARA_H
  2. #define _ASM_X86_KVM_PARA_H
  3. #include <linux/types.h>
  4. #include <asm/hyperv.h>
  5. /* This CPUID returns the signature 'KVMKVMKVM' in ebx, ecx, and edx. It
  6. * should be used to determine that a VM is running under KVM.
  7. */
  8. #define KVM_CPUID_SIGNATURE 0x40000000
  9. /* This CPUID returns a feature bitmap in eax. Before enabling a particular
  10. * paravirtualization, the appropriate feature bit should be checked.
  11. */
  12. #define KVM_CPUID_FEATURES 0x40000001
  13. #define KVM_FEATURE_CLOCKSOURCE 0
  14. #define KVM_FEATURE_NOP_IO_DELAY 1
  15. #define KVM_FEATURE_MMU_OP 2
  16. /* This indicates that the new set of kvmclock msrs
  17. * are available. The use of 0x11 and 0x12 is deprecated
  18. */
  19. #define KVM_FEATURE_CLOCKSOURCE2 3
  20. #define KVM_FEATURE_ASYNC_PF 4
  21. #define KVM_FEATURE_STEAL_TIME 5
  22. #define KVM_FEATURE_PV_EOI 6
  23. /* The last 8 bits are used to indicate how to interpret the flags field
  24. * in pvclock structure. If no bits are set, all flags are ignored.
  25. */
  26. #define KVM_FEATURE_CLOCKSOURCE_STABLE_BIT 24
  27. #define MSR_KVM_WALL_CLOCK 0x11
  28. #define MSR_KVM_SYSTEM_TIME 0x12
  29. #define KVM_MSR_ENABLED 1
  30. /* Custom MSRs falls in the range 0x4b564d00-0x4b564dff */
  31. #define MSR_KVM_WALL_CLOCK_NEW 0x4b564d00
  32. #define MSR_KVM_SYSTEM_TIME_NEW 0x4b564d01
  33. #define MSR_KVM_ASYNC_PF_EN 0x4b564d02
  34. #define MSR_KVM_STEAL_TIME 0x4b564d03
  35. #define MSR_KVM_PV_EOI_EN 0x4b564d04
  36. struct kvm_steal_time {
  37. __u64 steal;
  38. __u32 version;
  39. __u32 flags;
  40. __u32 pad[12];
  41. };
  42. #define KVM_STEAL_ALIGNMENT_BITS 5
  43. #define KVM_STEAL_VALID_BITS ((-1ULL << (KVM_STEAL_ALIGNMENT_BITS + 1)))
  44. #define KVM_STEAL_RESERVED_MASK (((1 << KVM_STEAL_ALIGNMENT_BITS) - 1 ) << 1)
  45. #define KVM_MAX_MMU_OP_BATCH 32
  46. #define KVM_ASYNC_PF_ENABLED (1 << 0)
  47. #define KVM_ASYNC_PF_SEND_ALWAYS (1 << 1)
  48. /* Operations for KVM_HC_MMU_OP */
  49. #define KVM_MMU_OP_WRITE_PTE 1
  50. #define KVM_MMU_OP_FLUSH_TLB 2
  51. #define KVM_MMU_OP_RELEASE_PT 3
  52. /* Payload for KVM_HC_MMU_OP */
  53. struct kvm_mmu_op_header {
  54. __u32 op;
  55. __u32 pad;
  56. };
  57. struct kvm_mmu_op_write_pte {
  58. struct kvm_mmu_op_header header;
  59. __u64 pte_phys;
  60. __u64 pte_val;
  61. };
  62. struct kvm_mmu_op_flush_tlb {
  63. struct kvm_mmu_op_header header;
  64. };
  65. struct kvm_mmu_op_release_pt {
  66. struct kvm_mmu_op_header header;
  67. __u64 pt_phys;
  68. };
  69. #define KVM_PV_REASON_PAGE_NOT_PRESENT 1
  70. #define KVM_PV_REASON_PAGE_READY 2
  71. struct kvm_vcpu_pv_apf_data {
  72. __u32 reason;
  73. __u8 pad[60];
  74. __u32 enabled;
  75. };
  76. #define KVM_PV_EOI_BIT 0
  77. #define KVM_PV_EOI_MASK (0x1 << KVM_PV_EOI_BIT)
  78. #define KVM_PV_EOI_ENABLED KVM_PV_EOI_MASK
  79. #define KVM_PV_EOI_DISABLED 0x0
  80. #ifdef __KERNEL__
  81. #include <asm/processor.h>
  82. extern void kvmclock_init(void);
  83. extern int kvm_register_clock(char *txt);
  84. #ifdef CONFIG_KVM_CLOCK
  85. bool kvm_check_and_clear_guest_paused(void);
  86. #else
  87. static inline bool kvm_check_and_clear_guest_paused(void)
  88. {
  89. return false;
  90. }
  91. #endif /* CONFIG_KVMCLOCK */
  92. /* This instruction is vmcall. On non-VT architectures, it will generate a
  93. * trap that we will then rewrite to the appropriate instruction.
  94. */
  95. #define KVM_HYPERCALL ".byte 0x0f,0x01,0xc1"
  96. /* For KVM hypercalls, a three-byte sequence of either the vmrun or the vmmrun
  97. * instruction. The hypervisor may replace it with something else but only the
  98. * instructions are guaranteed to be supported.
  99. *
  100. * Up to four arguments may be passed in rbx, rcx, rdx, and rsi respectively.
  101. * The hypercall number should be placed in rax and the return value will be
  102. * placed in rax. No other registers will be clobbered unless explicited
  103. * noted by the particular hypercall.
  104. */
  105. static inline long kvm_hypercall0(unsigned int nr)
  106. {
  107. long ret;
  108. asm volatile(KVM_HYPERCALL
  109. : "=a"(ret)
  110. : "a"(nr)
  111. : "memory");
  112. return ret;
  113. }
  114. static inline long kvm_hypercall1(unsigned int nr, unsigned long p1)
  115. {
  116. long ret;
  117. asm volatile(KVM_HYPERCALL
  118. : "=a"(ret)
  119. : "a"(nr), "b"(p1)
  120. : "memory");
  121. return ret;
  122. }
  123. static inline long kvm_hypercall2(unsigned int nr, unsigned long p1,
  124. unsigned long p2)
  125. {
  126. long ret;
  127. asm volatile(KVM_HYPERCALL
  128. : "=a"(ret)
  129. : "a"(nr), "b"(p1), "c"(p2)
  130. : "memory");
  131. return ret;
  132. }
  133. static inline long kvm_hypercall3(unsigned int nr, unsigned long p1,
  134. unsigned long p2, unsigned long p3)
  135. {
  136. long ret;
  137. asm volatile(KVM_HYPERCALL
  138. : "=a"(ret)
  139. : "a"(nr), "b"(p1), "c"(p2), "d"(p3)
  140. : "memory");
  141. return ret;
  142. }
  143. static inline long kvm_hypercall4(unsigned int nr, unsigned long p1,
  144. unsigned long p2, unsigned long p3,
  145. unsigned long p4)
  146. {
  147. long ret;
  148. asm volatile(KVM_HYPERCALL
  149. : "=a"(ret)
  150. : "a"(nr), "b"(p1), "c"(p2), "d"(p3), "S"(p4)
  151. : "memory");
  152. return ret;
  153. }
  154. static inline int kvm_para_available(void)
  155. {
  156. unsigned int eax, ebx, ecx, edx;
  157. char signature[13];
  158. if (boot_cpu_data.cpuid_level < 0)
  159. return 0; /* So we don't blow up on old processors */
  160. if (cpu_has_hypervisor) {
  161. cpuid(KVM_CPUID_SIGNATURE, &eax, &ebx, &ecx, &edx);
  162. memcpy(signature + 0, &ebx, 4);
  163. memcpy(signature + 4, &ecx, 4);
  164. memcpy(signature + 8, &edx, 4);
  165. signature[12] = 0;
  166. if (strcmp(signature, "KVMKVMKVM") == 0)
  167. return 1;
  168. }
  169. return 0;
  170. }
  171. static inline unsigned int kvm_arch_para_features(void)
  172. {
  173. return cpuid_eax(KVM_CPUID_FEATURES);
  174. }
  175. #ifdef CONFIG_KVM_GUEST
  176. void __init kvm_guest_init(void);
  177. void kvm_async_pf_task_wait(u32 token);
  178. void kvm_async_pf_task_wake(u32 token);
  179. u32 kvm_read_and_reset_pf_reason(void);
  180. extern void kvm_disable_steal_time(void);
  181. #else
  182. #define kvm_guest_init() do { } while (0)
  183. #define kvm_async_pf_task_wait(T) do {} while(0)
  184. #define kvm_async_pf_task_wake(T) do {} while(0)
  185. static inline u32 kvm_read_and_reset_pf_reason(void)
  186. {
  187. return 0;
  188. }
  189. static inline void kvm_disable_steal_time(void)
  190. {
  191. return;
  192. }
  193. #endif
  194. #endif /* __KERNEL__ */
  195. #endif /* _ASM_X86_KVM_PARA_H */