msr.h 7.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312
  1. #ifndef _ASM_X86_MSR_H
  2. #define _ASM_X86_MSR_H
  3. #include <asm/msr-index.h>
  4. #ifndef __ASSEMBLY__
  5. #include <linux/types.h>
  6. #include <linux/ioctl.h>
  7. #define X86_IOC_RDMSR_REGS _IOWR('c', 0xA0, __u32[8])
  8. #define X86_IOC_WRMSR_REGS _IOWR('c', 0xA1, __u32[8])
  9. #ifdef __KERNEL__
  10. #include <asm/asm.h>
  11. #include <asm/errno.h>
  12. #include <asm/cpumask.h>
  13. struct msr {
  14. union {
  15. struct {
  16. u32 l;
  17. u32 h;
  18. };
  19. u64 q;
  20. };
  21. };
  22. struct msr_info {
  23. u32 msr_no;
  24. struct msr reg;
  25. struct msr *msrs;
  26. int err;
  27. };
  28. struct msr_regs_info {
  29. u32 *regs;
  30. int err;
  31. };
  32. static inline unsigned long long native_read_tscp(unsigned int *aux)
  33. {
  34. unsigned long low, high;
  35. asm volatile(".byte 0x0f,0x01,0xf9"
  36. : "=a" (low), "=d" (high), "=c" (*aux));
  37. return low | ((u64)high << 32);
  38. }
  39. /*
  40. * both i386 and x86_64 returns 64-bit value in edx:eax, but gcc's "A"
  41. * constraint has different meanings. For i386, "A" means exactly
  42. * edx:eax, while for x86_64 it doesn't mean rdx:rax or edx:eax. Instead,
  43. * it means rax *or* rdx.
  44. */
  45. #ifdef CONFIG_X86_64
  46. #define DECLARE_ARGS(val, low, high) unsigned low, high
  47. #define EAX_EDX_VAL(val, low, high) ((low) | ((u64)(high) << 32))
  48. #define EAX_EDX_ARGS(val, low, high) "a" (low), "d" (high)
  49. #define EAX_EDX_RET(val, low, high) "=a" (low), "=d" (high)
  50. #else
  51. #define DECLARE_ARGS(val, low, high) unsigned long long val
  52. #define EAX_EDX_VAL(val, low, high) (val)
  53. #define EAX_EDX_ARGS(val, low, high) "A" (val)
  54. #define EAX_EDX_RET(val, low, high) "=A" (val)
  55. #endif
  56. static inline unsigned long long native_read_msr(unsigned int msr)
  57. {
  58. DECLARE_ARGS(val, low, high);
  59. asm volatile("rdmsr" : EAX_EDX_RET(val, low, high) : "c" (msr));
  60. return EAX_EDX_VAL(val, low, high);
  61. }
  62. static inline unsigned long long native_read_msr_safe(unsigned int msr,
  63. int *err)
  64. {
  65. DECLARE_ARGS(val, low, high);
  66. asm volatile("2: rdmsr ; xor %[err],%[err]\n"
  67. "1:\n\t"
  68. ".section .fixup,\"ax\"\n\t"
  69. "3: mov %[fault],%[err] ; jmp 1b\n\t"
  70. ".previous\n\t"
  71. _ASM_EXTABLE(2b, 3b)
  72. : [err] "=r" (*err), EAX_EDX_RET(val, low, high)
  73. : "c" (msr), [fault] "i" (-EIO));
  74. return EAX_EDX_VAL(val, low, high);
  75. }
  76. static inline void native_write_msr(unsigned int msr,
  77. unsigned low, unsigned high)
  78. {
  79. asm volatile("wrmsr" : : "c" (msr), "a"(low), "d" (high) : "memory");
  80. }
  81. /* Can be uninlined because referenced by paravirt */
  82. notrace static inline int native_write_msr_safe(unsigned int msr,
  83. unsigned low, unsigned high)
  84. {
  85. int err;
  86. asm volatile("2: wrmsr ; xor %[err],%[err]\n"
  87. "1:\n\t"
  88. ".section .fixup,\"ax\"\n\t"
  89. "3: mov %[fault],%[err] ; jmp 1b\n\t"
  90. ".previous\n\t"
  91. _ASM_EXTABLE(2b, 3b)
  92. : [err] "=a" (err)
  93. : "c" (msr), "0" (low), "d" (high),
  94. [fault] "i" (-EIO)
  95. : "memory");
  96. return err;
  97. }
  98. extern unsigned long long native_read_tsc(void);
  99. extern int native_rdmsr_safe_regs(u32 regs[8]);
  100. extern int native_wrmsr_safe_regs(u32 regs[8]);
  101. static __always_inline unsigned long long __native_read_tsc(void)
  102. {
  103. DECLARE_ARGS(val, low, high);
  104. asm volatile("rdtsc" : EAX_EDX_RET(val, low, high));
  105. return EAX_EDX_VAL(val, low, high);
  106. }
  107. static inline unsigned long long native_read_pmc(int counter)
  108. {
  109. DECLARE_ARGS(val, low, high);
  110. asm volatile("rdpmc" : EAX_EDX_RET(val, low, high) : "c" (counter));
  111. return EAX_EDX_VAL(val, low, high);
  112. }
  113. #ifdef CONFIG_PARAVIRT
  114. #include <asm/paravirt.h>
  115. #else
  116. #include <linux/errno.h>
  117. /*
  118. * Access to machine-specific registers (available on 586 and better only)
  119. * Note: the rd* operations modify the parameters directly (without using
  120. * pointer indirection), this allows gcc to optimize better
  121. */
  122. #define rdmsr(msr, val1, val2) \
  123. do { \
  124. u64 __val = native_read_msr((msr)); \
  125. (val1) = (u32)__val; \
  126. (val2) = (u32)(__val >> 32); \
  127. } while (0)
  128. static inline void wrmsr(unsigned msr, unsigned low, unsigned high)
  129. {
  130. native_write_msr(msr, low, high);
  131. }
  132. #define rdmsrl(msr, val) \
  133. ((val) = native_read_msr((msr)))
  134. #define wrmsrl(msr, val) \
  135. native_write_msr((msr), (u32)((u64)(val)), (u32)((u64)(val) >> 32))
  136. /* wrmsr with exception handling */
  137. static inline int wrmsr_safe(unsigned msr, unsigned low, unsigned high)
  138. {
  139. return native_write_msr_safe(msr, low, high);
  140. }
  141. /* rdmsr with exception handling */
  142. #define rdmsr_safe(msr, p1, p2) \
  143. ({ \
  144. int __err; \
  145. u64 __val = native_read_msr_safe((msr), &__err); \
  146. (*p1) = (u32)__val; \
  147. (*p2) = (u32)(__val >> 32); \
  148. __err; \
  149. })
  150. static inline int rdmsrl_safe(unsigned msr, unsigned long long *p)
  151. {
  152. int err;
  153. *p = native_read_msr_safe(msr, &err);
  154. return err;
  155. }
  156. static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p)
  157. {
  158. u32 gprs[8] = { 0 };
  159. int err;
  160. gprs[1] = msr;
  161. gprs[7] = 0x9c5a203a;
  162. err = native_rdmsr_safe_regs(gprs);
  163. *p = gprs[0] | ((u64)gprs[2] << 32);
  164. return err;
  165. }
  166. static inline int wrmsrl_amd_safe(unsigned msr, unsigned long long val)
  167. {
  168. u32 gprs[8] = { 0 };
  169. gprs[0] = (u32)val;
  170. gprs[1] = msr;
  171. gprs[2] = val >> 32;
  172. gprs[7] = 0x9c5a203a;
  173. return native_wrmsr_safe_regs(gprs);
  174. }
  175. static inline int rdmsr_safe_regs(u32 regs[8])
  176. {
  177. return native_rdmsr_safe_regs(regs);
  178. }
  179. static inline int wrmsr_safe_regs(u32 regs[8])
  180. {
  181. return native_wrmsr_safe_regs(regs);
  182. }
  183. #define rdtscl(low) \
  184. ((low) = (u32)__native_read_tsc())
  185. #define rdtscll(val) \
  186. ((val) = __native_read_tsc())
  187. #define rdpmc(counter, low, high) \
  188. do { \
  189. u64 _l = native_read_pmc((counter)); \
  190. (low) = (u32)_l; \
  191. (high) = (u32)(_l >> 32); \
  192. } while (0)
  193. #define rdtscp(low, high, aux) \
  194. do { \
  195. unsigned long long _val = native_read_tscp(&(aux)); \
  196. (low) = (u32)_val; \
  197. (high) = (u32)(_val >> 32); \
  198. } while (0)
  199. #define rdtscpll(val, aux) (val) = native_read_tscp(&(aux))
  200. #endif /* !CONFIG_PARAVIRT */
  201. #define checking_wrmsrl(msr, val) wrmsr_safe((msr), (u32)(val), \
  202. (u32)((val) >> 32))
  203. #define write_tsc(val1, val2) wrmsr(MSR_IA32_TSC, (val1), (val2))
  204. #define write_rdtscp_aux(val) wrmsr(MSR_TSC_AUX, (val), 0)
  205. struct msr *msrs_alloc(void);
  206. void msrs_free(struct msr *msrs);
  207. #ifdef CONFIG_SMP
  208. int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
  209. int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
  210. void rdmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs);
  211. void wrmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs);
  212. int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
  213. int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
  214. int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8]);
  215. int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8]);
  216. #else /* CONFIG_SMP */
  217. static inline int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
  218. {
  219. rdmsr(msr_no, *l, *h);
  220. return 0;
  221. }
  222. static inline int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
  223. {
  224. wrmsr(msr_no, l, h);
  225. return 0;
  226. }
  227. static inline void rdmsr_on_cpus(const struct cpumask *m, u32 msr_no,
  228. struct msr *msrs)
  229. {
  230. rdmsr_on_cpu(0, msr_no, &(msrs[0].l), &(msrs[0].h));
  231. }
  232. static inline void wrmsr_on_cpus(const struct cpumask *m, u32 msr_no,
  233. struct msr *msrs)
  234. {
  235. wrmsr_on_cpu(0, msr_no, msrs[0].l, msrs[0].h);
  236. }
  237. static inline int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no,
  238. u32 *l, u32 *h)
  239. {
  240. return rdmsr_safe(msr_no, l, h);
  241. }
  242. static inline int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
  243. {
  244. return wrmsr_safe(msr_no, l, h);
  245. }
  246. static inline int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8])
  247. {
  248. return rdmsr_safe_regs(regs);
  249. }
  250. static inline int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8])
  251. {
  252. return wrmsr_safe_regs(regs);
  253. }
  254. #endif /* CONFIG_SMP */
  255. #endif /* __KERNEL__ */
  256. #endif /* __ASSEMBLY__ */
  257. #endif /* _ASM_X86_MSR_H */