msr_64.h 4.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187
  1. #ifndef X86_64_MSR_H
  2. #define X86_64_MSR_H 1
  3. #include <asm/msr-index.h>
  4. #ifndef __ASSEMBLY__
  5. #include <linux/errno.h>
  6. /*
  7. * Access to machine-specific registers (available on 586 and better only)
  8. * Note: the rd* operations modify the parameters directly (without using
  9. * pointer indirection), this allows gcc to optimize better
  10. */
  11. #define rdmsr(msr,val1,val2) \
  12. __asm__ __volatile__("rdmsr" \
  13. : "=a" (val1), "=d" (val2) \
  14. : "c" (msr))
  15. #define rdmsrl(msr,val) do { unsigned long a__,b__; \
  16. __asm__ __volatile__("rdmsr" \
  17. : "=a" (a__), "=d" (b__) \
  18. : "c" (msr)); \
  19. val = a__ | (b__<<32); \
  20. } while(0)
  21. #define wrmsr(msr,val1,val2) \
  22. __asm__ __volatile__("wrmsr" \
  23. : /* no outputs */ \
  24. : "c" (msr), "a" (val1), "d" (val2))
  25. #define wrmsrl(msr,val) wrmsr(msr,(__u32)((__u64)(val)),((__u64)(val))>>32)
  26. /* wrmsr with exception handling */
  27. #define wrmsr_safe(msr,a,b) ({ int ret__; \
  28. asm volatile("2: wrmsr ; xorl %0,%0\n" \
  29. "1:\n\t" \
  30. ".section .fixup,\"ax\"\n\t" \
  31. "3: movl %4,%0 ; jmp 1b\n\t" \
  32. ".previous\n\t" \
  33. ".section __ex_table,\"a\"\n" \
  34. " .align 8\n\t" \
  35. " .quad 2b,3b\n\t" \
  36. ".previous" \
  37. : "=a" (ret__) \
  38. : "c" (msr), "0" (a), "d" (b), "i" (-EFAULT)); \
  39. ret__; })
  40. #define checking_wrmsrl(msr,val) wrmsr_safe(msr,(u32)(val),(u32)((val)>>32))
  41. #define rdmsr_safe(msr,a,b) \
  42. ({ int ret__; \
  43. asm volatile ("1: rdmsr\n" \
  44. "2:\n" \
  45. ".section .fixup,\"ax\"\n" \
  46. "3: movl %4,%0\n" \
  47. " jmp 2b\n" \
  48. ".previous\n" \
  49. ".section __ex_table,\"a\"\n" \
  50. " .align 8\n" \
  51. " .quad 1b,3b\n" \
  52. ".previous":"=&bDS" (ret__), "=a"(*(a)), "=d"(*(b))\
  53. :"c"(msr), "i"(-EIO), "0"(0)); \
  54. ret__; })
  55. #define rdtsc(low,high) \
  56. __asm__ __volatile__("rdtsc" : "=a" (low), "=d" (high))
  57. #define rdtscl(low) \
  58. __asm__ __volatile__ ("rdtsc" : "=a" (low) : : "edx")
  59. #define rdtscp(low,high,aux) \
  60. asm volatile (".byte 0x0f,0x01,0xf9" : "=a" (low), "=d" (high), "=c" (aux))
  61. #define rdtscll(val) do { \
  62. unsigned int __a,__d; \
  63. asm volatile("rdtsc" : "=a" (__a), "=d" (__d)); \
  64. (val) = ((unsigned long)__a) | (((unsigned long)__d)<<32); \
  65. } while(0)
  66. #define rdtscpll(val, aux) do { \
  67. unsigned long __a, __d; \
  68. asm volatile (".byte 0x0f,0x01,0xf9" : "=a" (__a), "=d" (__d), "=c" (aux)); \
  69. (val) = (__d << 32) | __a; \
  70. } while (0)
  71. #define write_tsc(val1,val2) wrmsr(0x10, val1, val2)
  72. #define write_rdtscp_aux(val) wrmsr(0xc0000103, val, 0)
  73. #define rdpmc(counter,low,high) \
  74. __asm__ __volatile__("rdpmc" \
  75. : "=a" (low), "=d" (high) \
  76. : "c" (counter))
  77. static inline void cpuid(int op, unsigned int *eax, unsigned int *ebx,
  78. unsigned int *ecx, unsigned int *edx)
  79. {
  80. __asm__("cpuid"
  81. : "=a" (*eax),
  82. "=b" (*ebx),
  83. "=c" (*ecx),
  84. "=d" (*edx)
  85. : "0" (op));
  86. }
  87. /* Some CPUID calls want 'count' to be placed in ecx */
  88. static inline void cpuid_count(int op, int count, int *eax, int *ebx, int *ecx,
  89. int *edx)
  90. {
  91. __asm__("cpuid"
  92. : "=a" (*eax),
  93. "=b" (*ebx),
  94. "=c" (*ecx),
  95. "=d" (*edx)
  96. : "0" (op), "c" (count));
  97. }
  98. /*
  99. * CPUID functions returning a single datum
  100. */
  101. static inline unsigned int cpuid_eax(unsigned int op)
  102. {
  103. unsigned int eax;
  104. __asm__("cpuid"
  105. : "=a" (eax)
  106. : "0" (op)
  107. : "bx", "cx", "dx");
  108. return eax;
  109. }
  110. static inline unsigned int cpuid_ebx(unsigned int op)
  111. {
  112. unsigned int eax, ebx;
  113. __asm__("cpuid"
  114. : "=a" (eax), "=b" (ebx)
  115. : "0" (op)
  116. : "cx", "dx" );
  117. return ebx;
  118. }
  119. static inline unsigned int cpuid_ecx(unsigned int op)
  120. {
  121. unsigned int eax, ecx;
  122. __asm__("cpuid"
  123. : "=a" (eax), "=c" (ecx)
  124. : "0" (op)
  125. : "bx", "dx" );
  126. return ecx;
  127. }
  128. static inline unsigned int cpuid_edx(unsigned int op)
  129. {
  130. unsigned int eax, edx;
  131. __asm__("cpuid"
  132. : "=a" (eax), "=d" (edx)
  133. : "0" (op)
  134. : "bx", "cx");
  135. return edx;
  136. }
  137. #ifdef CONFIG_SMP
  138. void rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
  139. void wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
  140. int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
  141. int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
  142. #else /* CONFIG_SMP */
  143. static inline void rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
  144. {
  145. rdmsr(msr_no, *l, *h);
  146. }
  147. static inline void wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
  148. {
  149. wrmsr(msr_no, l, h);
  150. }
  151. static inline int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
  152. {
  153. return rdmsr_safe(msr_no, l, h);
  154. }
  155. static inline int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
  156. {
  157. return wrmsr_safe(msr_no, l, h);
  158. }
  159. #endif /* CONFIG_SMP */
  160. #endif /* __ASSEMBLY__ */
  161. #endif /* X86_64_MSR_H */