msr.c 4.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236
  1. #include <linux/module.h>
  2. #include <linux/preempt.h>
  3. #include <linux/smp.h>
  4. #include <asm/msr.h>
  5. struct msr_info {
  6. u32 msr_no;
  7. struct msr reg;
  8. struct msr *msrs;
  9. int err;
  10. };
  11. static void __rdmsr_on_cpu(void *info)
  12. {
  13. struct msr_info *rv = info;
  14. struct msr *reg;
  15. int this_cpu = raw_smp_processor_id();
  16. if (rv->msrs)
  17. reg = per_cpu_ptr(rv->msrs, this_cpu);
  18. else
  19. reg = &rv->reg;
  20. rdmsr(rv->msr_no, reg->l, reg->h);
  21. }
  22. static void __wrmsr_on_cpu(void *info)
  23. {
  24. struct msr_info *rv = info;
  25. struct msr *reg;
  26. int this_cpu = raw_smp_processor_id();
  27. if (rv->msrs)
  28. reg = per_cpu_ptr(rv->msrs, this_cpu);
  29. else
  30. reg = &rv->reg;
  31. wrmsr(rv->msr_no, reg->l, reg->h);
  32. }
  33. int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
  34. {
  35. int err;
  36. struct msr_info rv;
  37. memset(&rv, 0, sizeof(rv));
  38. rv.msr_no = msr_no;
  39. err = smp_call_function_single(cpu, __rdmsr_on_cpu, &rv, 1);
  40. *l = rv.reg.l;
  41. *h = rv.reg.h;
  42. return err;
  43. }
  44. EXPORT_SYMBOL(rdmsr_on_cpu);
  45. int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
  46. {
  47. int err;
  48. struct msr_info rv;
  49. memset(&rv, 0, sizeof(rv));
  50. rv.msr_no = msr_no;
  51. rv.reg.l = l;
  52. rv.reg.h = h;
  53. err = smp_call_function_single(cpu, __wrmsr_on_cpu, &rv, 1);
  54. return err;
  55. }
  56. EXPORT_SYMBOL(wrmsr_on_cpu);
  57. static void __rwmsr_on_cpus(const struct cpumask *mask, u32 msr_no,
  58. struct msr *msrs,
  59. void (*msr_func) (void *info))
  60. {
  61. struct msr_info rv;
  62. int this_cpu;
  63. memset(&rv, 0, sizeof(rv));
  64. rv.msrs = msrs;
  65. rv.msr_no = msr_no;
  66. this_cpu = get_cpu();
  67. if (cpumask_test_cpu(this_cpu, mask))
  68. msr_func(&rv);
  69. smp_call_function_many(mask, msr_func, &rv, 1);
  70. put_cpu();
  71. }
  72. /* rdmsr on a bunch of CPUs
  73. *
  74. * @mask: which CPUs
  75. * @msr_no: which MSR
  76. * @msrs: array of MSR values
  77. *
  78. */
  79. void rdmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs)
  80. {
  81. __rwmsr_on_cpus(mask, msr_no, msrs, __rdmsr_on_cpu);
  82. }
  83. EXPORT_SYMBOL(rdmsr_on_cpus);
  84. /*
  85. * wrmsr on a bunch of CPUs
  86. *
  87. * @mask: which CPUs
  88. * @msr_no: which MSR
  89. * @msrs: array of MSR values
  90. *
  91. */
  92. void wrmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs)
  93. {
  94. __rwmsr_on_cpus(mask, msr_no, msrs, __wrmsr_on_cpu);
  95. }
  96. EXPORT_SYMBOL(wrmsr_on_cpus);
  97. struct msr *msrs_alloc(void)
  98. {
  99. struct msr *msrs = NULL;
  100. msrs = alloc_percpu(struct msr);
  101. if (!msrs) {
  102. pr_warning("%s: error allocating msrs\n", __func__);
  103. return NULL;
  104. }
  105. return msrs;
  106. }
  107. EXPORT_SYMBOL(msrs_alloc);
  108. void msrs_free(struct msr *msrs)
  109. {
  110. free_percpu(msrs);
  111. }
  112. EXPORT_SYMBOL(msrs_free);
  113. /* These "safe" variants are slower and should be used when the target MSR
  114. may not actually exist. */
  115. static void __rdmsr_safe_on_cpu(void *info)
  116. {
  117. struct msr_info *rv = info;
  118. rv->err = rdmsr_safe(rv->msr_no, &rv->reg.l, &rv->reg.h);
  119. }
  120. static void __wrmsr_safe_on_cpu(void *info)
  121. {
  122. struct msr_info *rv = info;
  123. rv->err = wrmsr_safe(rv->msr_no, rv->reg.l, rv->reg.h);
  124. }
  125. int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
  126. {
  127. int err;
  128. struct msr_info rv;
  129. memset(&rv, 0, sizeof(rv));
  130. rv.msr_no = msr_no;
  131. err = smp_call_function_single(cpu, __rdmsr_safe_on_cpu, &rv, 1);
  132. *l = rv.reg.l;
  133. *h = rv.reg.h;
  134. return err ? err : rv.err;
  135. }
  136. EXPORT_SYMBOL(rdmsr_safe_on_cpu);
  137. int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
  138. {
  139. int err;
  140. struct msr_info rv;
  141. memset(&rv, 0, sizeof(rv));
  142. rv.msr_no = msr_no;
  143. rv.reg.l = l;
  144. rv.reg.h = h;
  145. err = smp_call_function_single(cpu, __wrmsr_safe_on_cpu, &rv, 1);
  146. return err ? err : rv.err;
  147. }
  148. EXPORT_SYMBOL(wrmsr_safe_on_cpu);
  149. /*
  150. * These variants are significantly slower, but allows control over
  151. * the entire 32-bit GPR set.
  152. */
  153. struct msr_regs_info {
  154. u32 *regs;
  155. int err;
  156. };
  157. static void __rdmsr_safe_regs_on_cpu(void *info)
  158. {
  159. struct msr_regs_info *rv = info;
  160. rv->err = rdmsr_safe_regs(rv->regs);
  161. }
  162. static void __wrmsr_safe_regs_on_cpu(void *info)
  163. {
  164. struct msr_regs_info *rv = info;
  165. rv->err = wrmsr_safe_regs(rv->regs);
  166. }
  167. int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 *regs)
  168. {
  169. int err;
  170. struct msr_regs_info rv;
  171. rv.regs = regs;
  172. rv.err = -EIO;
  173. err = smp_call_function_single(cpu, __rdmsr_safe_regs_on_cpu, &rv, 1);
  174. return err ? err : rv.err;
  175. }
  176. EXPORT_SYMBOL(rdmsr_safe_regs_on_cpu);
  177. int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 *regs)
  178. {
  179. int err;
  180. struct msr_regs_info rv;
  181. rv.regs = regs;
  182. rv.err = -EIO;
  183. err = smp_call_function_single(cpu, __wrmsr_safe_regs_on_cpu, &rv, 1);
  184. return err ? err : rv.err;
  185. }
  186. EXPORT_SYMBOL(wrmsr_safe_regs_on_cpu);