msr.h 8.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354
  1. #ifndef __ASM_X86_MSR_H_
  2. #define __ASM_X86_MSR_H_
  3. #include <asm/msr-index.h>
  4. #ifdef __i386__
  5. #ifdef __KERNEL__
  6. #ifndef __ASSEMBLY__
  7. #include <asm/errno.h>
  8. static inline unsigned long long native_read_msr(unsigned int msr)
  9. {
  10. unsigned long long val;
  11. asm volatile("rdmsr" : "=A" (val) : "c" (msr));
  12. return val;
  13. }
  14. static inline unsigned long long native_read_msr_safe(unsigned int msr,
  15. int *err)
  16. {
  17. unsigned long long val;
  18. asm volatile("2: rdmsr ; xorl %0,%0\n"
  19. "1:\n\t"
  20. ".section .fixup,\"ax\"\n\t"
  21. "3: movl %3,%0 ; jmp 1b\n\t"
  22. ".previous\n\t"
  23. ".section __ex_table,\"a\"\n"
  24. " .align 4\n\t"
  25. " .long 2b,3b\n\t"
  26. ".previous"
  27. : "=r" (*err), "=A" (val)
  28. : "c" (msr), "i" (-EFAULT));
  29. return val;
  30. }
  31. static inline void native_write_msr(unsigned int msr, unsigned long long val)
  32. {
  33. asm volatile("wrmsr" : : "c" (msr), "A"(val));
  34. }
  35. static inline int native_write_msr_safe(unsigned int msr,
  36. unsigned long long val)
  37. {
  38. int err;
  39. asm volatile("2: wrmsr ; xorl %0,%0\n"
  40. "1:\n\t"
  41. ".section .fixup,\"ax\"\n\t"
  42. "3: movl %4,%0 ; jmp 1b\n\t"
  43. ".previous\n\t"
  44. ".section __ex_table,\"a\"\n"
  45. " .align 4\n\t"
  46. " .long 2b,3b\n\t"
  47. ".previous"
  48. : "=a" (err)
  49. : "c" (msr), "0" ((u32)val), "d" ((u32)(val>>32)),
  50. "i" (-EFAULT));
  51. return err;
  52. }
  53. static inline unsigned long long native_read_tsc(void)
  54. {
  55. unsigned long long val;
  56. asm volatile("rdtsc" : "=A" (val));
  57. return val;
  58. }
  59. static inline unsigned long long native_read_pmc(void)
  60. {
  61. unsigned long long val;
  62. asm volatile("rdpmc" : "=A" (val));
  63. return val;
  64. }
  65. #ifdef CONFIG_PARAVIRT
  66. #include <asm/paravirt.h>
  67. #else
  68. #include <linux/errno.h>
  69. /*
  70. * Access to machine-specific registers (available on 586 and better only)
  71. * Note: the rd* operations modify the parameters directly (without using
  72. * pointer indirection), this allows gcc to optimize better
  73. */
  74. #define rdmsr(msr,val1,val2) \
  75. do { \
  76. u64 __val = native_read_msr(msr); \
  77. (val1) = (u32)__val; \
  78. (val2) = (u32)(__val >> 32); \
  79. } while(0)
  80. static inline void wrmsr(u32 __msr, u32 __low, u32 __high)
  81. {
  82. native_write_msr(__msr, ((u64)__high << 32) | __low);
  83. }
  84. #define rdmsrl(msr,val) \
  85. ((val) = native_read_msr(msr))
  86. #define wrmsrl(msr,val) native_write_msr(msr, val)
  87. /* wrmsr with exception handling */
  88. static inline int wrmsr_safe(u32 __msr, u32 __low, u32 __high)
  89. {
  90. return native_write_msr_safe(__msr, ((u64)__high << 32) | __low);
  91. }
  92. /* rdmsr with exception handling */
  93. #define rdmsr_safe(msr,p1,p2) \
  94. ({ \
  95. int __err; \
  96. u64 __val = native_read_msr_safe(msr, &__err); \
  97. (*p1) = (u32)__val; \
  98. (*p2) = (u32)(__val >> 32); \
  99. __err; \
  100. })
  101. #define rdtscl(low) \
  102. ((low) = (u32)native_read_tsc())
  103. #define rdtscll(val) \
  104. ((val) = native_read_tsc())
  105. #define write_tsc(val1,val2) wrmsr(0x10, val1, val2)
  106. #define rdpmc(counter,low,high) \
  107. do { \
  108. u64 _l = native_read_pmc(); \
  109. (low) = (u32)_l; \
  110. (high) = (u32)(_l >> 32); \
  111. } while(0)
  112. #endif /* !CONFIG_PARAVIRT */
  113. #ifdef CONFIG_SMP
  114. void rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
  115. void wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
  116. int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
  117. int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
  118. #else /* CONFIG_SMP */
  119. static inline void rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
  120. {
  121. rdmsr(msr_no, *l, *h);
  122. }
  123. static inline void wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
  124. {
  125. wrmsr(msr_no, l, h);
  126. }
  127. static inline int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
  128. {
  129. return rdmsr_safe(msr_no, l, h);
  130. }
  131. static inline int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
  132. {
  133. return wrmsr_safe(msr_no, l, h);
  134. }
  135. #endif /* CONFIG_SMP */
  136. #endif /* ! __ASSEMBLY__ */
  137. #endif /* __KERNEL__ */
  138. #else /* __i386__ */
  139. #ifndef __ASSEMBLY__
  140. #include <linux/errno.h>
  141. /*
  142. * Access to machine-specific registers (available on 586 and better only)
  143. * Note: the rd* operations modify the parameters directly (without using
  144. * pointer indirection), this allows gcc to optimize better
  145. */
  146. #define rdmsr(msr,val1,val2) \
  147. __asm__ __volatile__("rdmsr" \
  148. : "=a" (val1), "=d" (val2) \
  149. : "c" (msr))
  150. #define rdmsrl(msr,val) do { unsigned long a__,b__; \
  151. __asm__ __volatile__("rdmsr" \
  152. : "=a" (a__), "=d" (b__) \
  153. : "c" (msr)); \
  154. val = a__ | (b__<<32); \
  155. } while(0)
  156. #define wrmsr(msr,val1,val2) \
  157. __asm__ __volatile__("wrmsr" \
  158. : /* no outputs */ \
  159. : "c" (msr), "a" (val1), "d" (val2))
  160. #define wrmsrl(msr,val) wrmsr(msr,(__u32)((__u64)(val)),((__u64)(val))>>32)
  161. #define rdtsc(low,high) \
  162. __asm__ __volatile__("rdtsc" : "=a" (low), "=d" (high))
  163. #define rdtscl(low) \
  164. __asm__ __volatile__ ("rdtsc" : "=a" (low) : : "edx")
  165. #define rdtscp(low,high,aux) \
  166. __asm__ __volatile__ (".byte 0x0f,0x01,0xf9" : "=a" (low), "=d" (high), "=c" (aux))
  167. #define rdtscll(val) do { \
  168. unsigned int __a,__d; \
  169. __asm__ __volatile__("rdtsc" : "=a" (__a), "=d" (__d)); \
  170. (val) = ((unsigned long)__a) | (((unsigned long)__d)<<32); \
  171. } while(0)
  172. #define rdtscpll(val, aux) do { \
  173. unsigned long __a, __d; \
  174. __asm__ __volatile__ (".byte 0x0f,0x01,0xf9" : "=a" (__a), "=d" (__d), "=c" (aux)); \
  175. (val) = (__d << 32) | __a; \
  176. } while (0)
  177. #define write_tsc(val1,val2) wrmsr(0x10, val1, val2)
  178. #define write_rdtscp_aux(val) wrmsr(0xc0000103, val, 0)
  179. #define rdpmc(counter,low,high) \
  180. __asm__ __volatile__("rdpmc" \
  181. : "=a" (low), "=d" (high) \
  182. : "c" (counter))
  183. static inline void cpuid(int op, unsigned int *eax, unsigned int *ebx,
  184. unsigned int *ecx, unsigned int *edx)
  185. {
  186. __asm__("cpuid"
  187. : "=a" (*eax),
  188. "=b" (*ebx),
  189. "=c" (*ecx),
  190. "=d" (*edx)
  191. : "0" (op));
  192. }
  193. /* Some CPUID calls want 'count' to be placed in ecx */
  194. static inline void cpuid_count(int op, int count, int *eax, int *ebx, int *ecx,
  195. int *edx)
  196. {
  197. __asm__("cpuid"
  198. : "=a" (*eax),
  199. "=b" (*ebx),
  200. "=c" (*ecx),
  201. "=d" (*edx)
  202. : "0" (op), "c" (count));
  203. }
  204. /*
  205. * CPUID functions returning a single datum
  206. */
  207. static inline unsigned int cpuid_eax(unsigned int op)
  208. {
  209. unsigned int eax;
  210. __asm__("cpuid"
  211. : "=a" (eax)
  212. : "0" (op)
  213. : "bx", "cx", "dx");
  214. return eax;
  215. }
  216. static inline unsigned int cpuid_ebx(unsigned int op)
  217. {
  218. unsigned int eax, ebx;
  219. __asm__("cpuid"
  220. : "=a" (eax), "=b" (ebx)
  221. : "0" (op)
  222. : "cx", "dx" );
  223. return ebx;
  224. }
  225. static inline unsigned int cpuid_ecx(unsigned int op)
  226. {
  227. unsigned int eax, ecx;
  228. __asm__("cpuid"
  229. : "=a" (eax), "=c" (ecx)
  230. : "0" (op)
  231. : "bx", "dx" );
  232. return ecx;
  233. }
  234. static inline unsigned int cpuid_edx(unsigned int op)
  235. {
  236. unsigned int eax, edx;
  237. __asm__("cpuid"
  238. : "=a" (eax), "=d" (edx)
  239. : "0" (op)
  240. : "bx", "cx");
  241. return edx;
  242. }
  243. #ifdef __KERNEL__
  244. /* wrmsr with exception handling */
  245. #define wrmsr_safe(msr,a,b) ({ int ret__; \
  246. asm volatile("2: wrmsr ; xorl %0,%0\n" \
  247. "1:\n\t" \
  248. ".section .fixup,\"ax\"\n\t" \
  249. "3: movl %4,%0 ; jmp 1b\n\t" \
  250. ".previous\n\t" \
  251. ".section __ex_table,\"a\"\n" \
  252. " .align 8\n\t" \
  253. " .quad 2b,3b\n\t" \
  254. ".previous" \
  255. : "=a" (ret__) \
  256. : "c" (msr), "0" (a), "d" (b), "i" (-EFAULT)); \
  257. ret__; })
  258. #define checking_wrmsrl(msr,val) wrmsr_safe(msr,(u32)(val),(u32)((val)>>32))
  259. #define rdmsr_safe(msr,a,b) \
  260. ({ int ret__; \
  261. asm volatile ("1: rdmsr\n" \
  262. "2:\n" \
  263. ".section .fixup,\"ax\"\n" \
  264. "3: movl %4,%0\n" \
  265. " jmp 2b\n" \
  266. ".previous\n" \
  267. ".section __ex_table,\"a\"\n" \
  268. " .align 8\n" \
  269. " .quad 1b,3b\n" \
  270. ".previous":"=&bDS" (ret__), "=a"(*(a)), "=d"(*(b)) \
  271. :"c"(msr), "i"(-EIO), "0"(0)); \
  272. ret__; })
  273. #ifdef CONFIG_SMP
  274. void rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
  275. void wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
  276. int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
  277. int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
  278. #else /* CONFIG_SMP */
  279. static inline void rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
  280. {
  281. rdmsr(msr_no, *l, *h);
  282. }
  283. static inline void wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
  284. {
  285. wrmsr(msr_no, l, h);
  286. }
  287. static inline int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
  288. {
  289. return rdmsr_safe(msr_no, l, h);
  290. }
  291. static inline int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
  292. {
  293. return wrmsr_safe(msr_no, l, h);
  294. }
  295. #endif /* CONFIG_SMP */
  296. #endif /* __KERNEL__ */
  297. #endif /* __ASSEMBLY__ */
  298. #endif /* !__i386__ */
  299. #endif