kvm_para.h 4.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192
  1. #ifndef _ASM_X86_KVM_PARA_H
  2. #define _ASM_X86_KVM_PARA_H
  3. #include <linux/types.h>
  4. #include <asm/hyperv.h>
  5. /* This CPUID returns the signature 'KVMKVMKVM' in ebx, ecx, and edx. It
  6. * should be used to determine that a VM is running under KVM.
  7. */
  8. #define KVM_CPUID_SIGNATURE 0x40000000
  9. /* This CPUID returns a feature bitmap in eax. Before enabling a particular
  10. * paravirtualization, the appropriate feature bit should be checked.
  11. */
  12. #define KVM_CPUID_FEATURES 0x40000001
  13. #define KVM_FEATURE_CLOCKSOURCE 0
  14. #define KVM_FEATURE_NOP_IO_DELAY 1
  15. #define KVM_FEATURE_MMU_OP 2
  16. /* This indicates that the new set of kvmclock msrs
  17. * are available. The use of 0x11 and 0x12 is deprecated
  18. */
  19. #define KVM_FEATURE_CLOCKSOURCE2 3
  20. #define KVM_FEATURE_ASYNC_PF 4
  21. /* The last 8 bits are used to indicate how to interpret the flags field
  22. * in pvclock structure. If no bits are set, all flags are ignored.
  23. */
  24. #define KVM_FEATURE_CLOCKSOURCE_STABLE_BIT 24
  25. #define MSR_KVM_WALL_CLOCK 0x11
  26. #define MSR_KVM_SYSTEM_TIME 0x12
  27. /* Custom MSRs falls in the range 0x4b564d00-0x4b564dff */
  28. #define MSR_KVM_WALL_CLOCK_NEW 0x4b564d00
  29. #define MSR_KVM_SYSTEM_TIME_NEW 0x4b564d01
  30. #define MSR_KVM_ASYNC_PF_EN 0x4b564d02
  31. #define KVM_MAX_MMU_OP_BATCH 32
  32. #define KVM_ASYNC_PF_ENABLED (1 << 0)
  33. /* Operations for KVM_HC_MMU_OP */
  34. #define KVM_MMU_OP_WRITE_PTE 1
  35. #define KVM_MMU_OP_FLUSH_TLB 2
  36. #define KVM_MMU_OP_RELEASE_PT 3
  37. /* Payload for KVM_HC_MMU_OP */
  38. struct kvm_mmu_op_header {
  39. __u32 op;
  40. __u32 pad;
  41. };
  42. struct kvm_mmu_op_write_pte {
  43. struct kvm_mmu_op_header header;
  44. __u64 pte_phys;
  45. __u64 pte_val;
  46. };
  47. struct kvm_mmu_op_flush_tlb {
  48. struct kvm_mmu_op_header header;
  49. };
  50. struct kvm_mmu_op_release_pt {
  51. struct kvm_mmu_op_header header;
  52. __u64 pt_phys;
  53. };
  54. #define KVM_PV_REASON_PAGE_NOT_PRESENT 1
  55. #define KVM_PV_REASON_PAGE_READY 2
  56. struct kvm_vcpu_pv_apf_data {
  57. __u32 reason;
  58. __u8 pad[60];
  59. __u32 enabled;
  60. };
  61. #ifdef __KERNEL__
  62. #include <asm/processor.h>
  63. extern void kvmclock_init(void);
  64. extern int kvm_register_clock(char *txt);
  65. /* This instruction is vmcall. On non-VT architectures, it will generate a
  66. * trap that we will then rewrite to the appropriate instruction.
  67. */
  68. #define KVM_HYPERCALL ".byte 0x0f,0x01,0xc1"
  69. /* For KVM hypercalls, a three-byte sequence of either the vmrun or the vmmrun
  70. * instruction. The hypervisor may replace it with something else but only the
  71. * instructions are guaranteed to be supported.
  72. *
  73. * Up to four arguments may be passed in rbx, rcx, rdx, and rsi respectively.
  74. * The hypercall number should be placed in rax and the return value will be
  75. * placed in rax. No other registers will be clobbered unless explicited
  76. * noted by the particular hypercall.
  77. */
  78. static inline long kvm_hypercall0(unsigned int nr)
  79. {
  80. long ret;
  81. asm volatile(KVM_HYPERCALL
  82. : "=a"(ret)
  83. : "a"(nr)
  84. : "memory");
  85. return ret;
  86. }
  87. static inline long kvm_hypercall1(unsigned int nr, unsigned long p1)
  88. {
  89. long ret;
  90. asm volatile(KVM_HYPERCALL
  91. : "=a"(ret)
  92. : "a"(nr), "b"(p1)
  93. : "memory");
  94. return ret;
  95. }
  96. static inline long kvm_hypercall2(unsigned int nr, unsigned long p1,
  97. unsigned long p2)
  98. {
  99. long ret;
  100. asm volatile(KVM_HYPERCALL
  101. : "=a"(ret)
  102. : "a"(nr), "b"(p1), "c"(p2)
  103. : "memory");
  104. return ret;
  105. }
  106. static inline long kvm_hypercall3(unsigned int nr, unsigned long p1,
  107. unsigned long p2, unsigned long p3)
  108. {
  109. long ret;
  110. asm volatile(KVM_HYPERCALL
  111. : "=a"(ret)
  112. : "a"(nr), "b"(p1), "c"(p2), "d"(p3)
  113. : "memory");
  114. return ret;
  115. }
  116. static inline long kvm_hypercall4(unsigned int nr, unsigned long p1,
  117. unsigned long p2, unsigned long p3,
  118. unsigned long p4)
  119. {
  120. long ret;
  121. asm volatile(KVM_HYPERCALL
  122. : "=a"(ret)
  123. : "a"(nr), "b"(p1), "c"(p2), "d"(p3), "S"(p4)
  124. : "memory");
  125. return ret;
  126. }
  127. static inline int kvm_para_available(void)
  128. {
  129. unsigned int eax, ebx, ecx, edx;
  130. char signature[13];
  131. cpuid(KVM_CPUID_SIGNATURE, &eax, &ebx, &ecx, &edx);
  132. memcpy(signature + 0, &ebx, 4);
  133. memcpy(signature + 4, &ecx, 4);
  134. memcpy(signature + 8, &edx, 4);
  135. signature[12] = 0;
  136. if (strcmp(signature, "KVMKVMKVM") == 0)
  137. return 1;
  138. return 0;
  139. }
  140. static inline unsigned int kvm_arch_para_features(void)
  141. {
  142. return cpuid_eax(KVM_CPUID_FEATURES);
  143. }
  144. #ifdef CONFIG_KVM_GUEST
  145. void __init kvm_guest_init(void);
  146. void kvm_async_pf_task_wait(u32 token);
  147. void kvm_async_pf_task_wake(u32 token);
  148. u32 kvm_read_and_reset_pf_reason(void);
  149. #else
  150. #define kvm_guest_init() do { } while (0)
  151. #define kvm_async_pf_task_wait(T) do {} while(0)
  152. #define kvm_async_pf_task_wake(T) do {} while(0)
  153. static u32 kvm_read_and_reset_pf_reason(void)
  154. {
  155. return 0;
  156. }
  157. #endif
  158. #endif /* __KERNEL__ */
  159. #endif /* _ASM_X86_KVM_PARA_H */