kvm_para.h 4.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193
  1. #ifndef _ASM_X86_KVM_PARA_H
  2. #define _ASM_X86_KVM_PARA_H
  3. #include <linux/types.h>
  4. #include <asm/hyperv.h>
  5. /* This CPUID returns the signature 'KVMKVMKVM' in ebx, ecx, and edx. It
  6. * should be used to determine that a VM is running under KVM.
  7. */
  8. #define KVM_CPUID_SIGNATURE 0x40000000
  9. /* This CPUID returns a feature bitmap in eax. Before enabling a particular
  10. * paravirtualization, the appropriate feature bit should be checked.
  11. */
  12. #define KVM_CPUID_FEATURES 0x40000001
  13. #define KVM_FEATURE_CLOCKSOURCE 0
  14. #define KVM_FEATURE_NOP_IO_DELAY 1
  15. #define KVM_FEATURE_MMU_OP 2
  16. /* This indicates that the new set of kvmclock msrs
  17. * are available. The use of 0x11 and 0x12 is deprecated
  18. */
  19. #define KVM_FEATURE_CLOCKSOURCE2 3
  20. #define KVM_FEATURE_ASYNC_PF 4
  21. /* The last 8 bits are used to indicate how to interpret the flags field
  22. * in pvclock structure. If no bits are set, all flags are ignored.
  23. */
  24. #define KVM_FEATURE_CLOCKSOURCE_STABLE_BIT 24
  25. #define MSR_KVM_WALL_CLOCK 0x11
  26. #define MSR_KVM_SYSTEM_TIME 0x12
  27. /* Custom MSRs falls in the range 0x4b564d00-0x4b564dff */
  28. #define MSR_KVM_WALL_CLOCK_NEW 0x4b564d00
  29. #define MSR_KVM_SYSTEM_TIME_NEW 0x4b564d01
  30. #define MSR_KVM_ASYNC_PF_EN 0x4b564d02
  31. #define KVM_MAX_MMU_OP_BATCH 32
  32. #define KVM_ASYNC_PF_ENABLED (1 << 0)
  33. #define KVM_ASYNC_PF_SEND_ALWAYS (1 << 1)
  34. /* Operations for KVM_HC_MMU_OP */
  35. #define KVM_MMU_OP_WRITE_PTE 1
  36. #define KVM_MMU_OP_FLUSH_TLB 2
  37. #define KVM_MMU_OP_RELEASE_PT 3
  38. /* Payload for KVM_HC_MMU_OP */
  39. struct kvm_mmu_op_header {
  40. __u32 op;
  41. __u32 pad;
  42. };
  43. struct kvm_mmu_op_write_pte {
  44. struct kvm_mmu_op_header header;
  45. __u64 pte_phys;
  46. __u64 pte_val;
  47. };
  48. struct kvm_mmu_op_flush_tlb {
  49. struct kvm_mmu_op_header header;
  50. };
  51. struct kvm_mmu_op_release_pt {
  52. struct kvm_mmu_op_header header;
  53. __u64 pt_phys;
  54. };
  55. #define KVM_PV_REASON_PAGE_NOT_PRESENT 1
  56. #define KVM_PV_REASON_PAGE_READY 2
  57. struct kvm_vcpu_pv_apf_data {
  58. __u32 reason;
  59. __u8 pad[60];
  60. __u32 enabled;
  61. };
  62. #ifdef __KERNEL__
  63. #include <asm/processor.h>
  64. extern void kvmclock_init(void);
  65. extern int kvm_register_clock(char *txt);
  66. /* This instruction is vmcall. On non-VT architectures, it will generate a
  67. * trap that we will then rewrite to the appropriate instruction.
  68. */
  69. #define KVM_HYPERCALL ".byte 0x0f,0x01,0xc1"
  70. /* For KVM hypercalls, a three-byte sequence of either the vmrun or the vmmrun
  71. * instruction. The hypervisor may replace it with something else but only the
  72. * instructions are guaranteed to be supported.
  73. *
  74. * Up to four arguments may be passed in rbx, rcx, rdx, and rsi respectively.
  75. * The hypercall number should be placed in rax and the return value will be
  76. * placed in rax. No other registers will be clobbered unless explicited
  77. * noted by the particular hypercall.
  78. */
  79. static inline long kvm_hypercall0(unsigned int nr)
  80. {
  81. long ret;
  82. asm volatile(KVM_HYPERCALL
  83. : "=a"(ret)
  84. : "a"(nr)
  85. : "memory");
  86. return ret;
  87. }
  88. static inline long kvm_hypercall1(unsigned int nr, unsigned long p1)
  89. {
  90. long ret;
  91. asm volatile(KVM_HYPERCALL
  92. : "=a"(ret)
  93. : "a"(nr), "b"(p1)
  94. : "memory");
  95. return ret;
  96. }
  97. static inline long kvm_hypercall2(unsigned int nr, unsigned long p1,
  98. unsigned long p2)
  99. {
  100. long ret;
  101. asm volatile(KVM_HYPERCALL
  102. : "=a"(ret)
  103. : "a"(nr), "b"(p1), "c"(p2)
  104. : "memory");
  105. return ret;
  106. }
  107. static inline long kvm_hypercall3(unsigned int nr, unsigned long p1,
  108. unsigned long p2, unsigned long p3)
  109. {
  110. long ret;
  111. asm volatile(KVM_HYPERCALL
  112. : "=a"(ret)
  113. : "a"(nr), "b"(p1), "c"(p2), "d"(p3)
  114. : "memory");
  115. return ret;
  116. }
  117. static inline long kvm_hypercall4(unsigned int nr, unsigned long p1,
  118. unsigned long p2, unsigned long p3,
  119. unsigned long p4)
  120. {
  121. long ret;
  122. asm volatile(KVM_HYPERCALL
  123. : "=a"(ret)
  124. : "a"(nr), "b"(p1), "c"(p2), "d"(p3), "S"(p4)
  125. : "memory");
  126. return ret;
  127. }
  128. static inline int kvm_para_available(void)
  129. {
  130. unsigned int eax, ebx, ecx, edx;
  131. char signature[13];
  132. cpuid(KVM_CPUID_SIGNATURE, &eax, &ebx, &ecx, &edx);
  133. memcpy(signature + 0, &ebx, 4);
  134. memcpy(signature + 4, &ecx, 4);
  135. memcpy(signature + 8, &edx, 4);
  136. signature[12] = 0;
  137. if (strcmp(signature, "KVMKVMKVM") == 0)
  138. return 1;
  139. return 0;
  140. }
  141. static inline unsigned int kvm_arch_para_features(void)
  142. {
  143. return cpuid_eax(KVM_CPUID_FEATURES);
  144. }
  145. #ifdef CONFIG_KVM_GUEST
  146. void __init kvm_guest_init(void);
  147. void kvm_async_pf_task_wait(u32 token);
  148. void kvm_async_pf_task_wake(u32 token);
  149. u32 kvm_read_and_reset_pf_reason(void);
  150. #else
  151. #define kvm_guest_init() do { } while (0)
  152. #define kvm_async_pf_task_wait(T) do {} while(0)
  153. #define kvm_async_pf_task_wake(T) do {} while(0)
  154. static inline u32 kvm_read_and_reset_pf_reason(void)
  155. {
  156. return 0;
  157. }
  158. #endif
  159. #endif /* __KERNEL__ */
  160. #endif /* _ASM_X86_KVM_PARA_H */