msr.c 4.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218
  1. #include <linux/module.h>
  2. #include <linux/preempt.h>
  3. #include <linux/smp.h>
  4. #include <asm/msr.h>
  5. struct msr_info {
  6. u32 msr_no;
  7. struct msr reg;
  8. struct msr *msrs;
  9. int off;
  10. int err;
  11. };
  12. static void __rdmsr_on_cpu(void *info)
  13. {
  14. struct msr_info *rv = info;
  15. struct msr *reg;
  16. int this_cpu = raw_smp_processor_id();
  17. if (rv->msrs)
  18. reg = &rv->msrs[this_cpu - rv->off];
  19. else
  20. reg = &rv->reg;
  21. rdmsr(rv->msr_no, reg->l, reg->h);
  22. }
  23. static void __wrmsr_on_cpu(void *info)
  24. {
  25. struct msr_info *rv = info;
  26. struct msr *reg;
  27. int this_cpu = raw_smp_processor_id();
  28. if (rv->msrs)
  29. reg = &rv->msrs[this_cpu - rv->off];
  30. else
  31. reg = &rv->reg;
  32. wrmsr(rv->msr_no, reg->l, reg->h);
  33. }
  34. int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
  35. {
  36. int err;
  37. struct msr_info rv;
  38. memset(&rv, 0, sizeof(rv));
  39. rv.msr_no = msr_no;
  40. err = smp_call_function_single(cpu, __rdmsr_on_cpu, &rv, 1);
  41. *l = rv.reg.l;
  42. *h = rv.reg.h;
  43. return err;
  44. }
  45. EXPORT_SYMBOL(rdmsr_on_cpu);
  46. int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
  47. {
  48. int err;
  49. struct msr_info rv;
  50. memset(&rv, 0, sizeof(rv));
  51. rv.msr_no = msr_no;
  52. rv.reg.l = l;
  53. rv.reg.h = h;
  54. err = smp_call_function_single(cpu, __wrmsr_on_cpu, &rv, 1);
  55. return err;
  56. }
  57. EXPORT_SYMBOL(wrmsr_on_cpu);
  58. static void __rwmsr_on_cpus(const struct cpumask *mask, u32 msr_no,
  59. struct msr *msrs,
  60. void (*msr_func) (void *info))
  61. {
  62. struct msr_info rv;
  63. int this_cpu;
  64. memset(&rv, 0, sizeof(rv));
  65. rv.off = cpumask_first(mask);
  66. rv.msrs = msrs;
  67. rv.msr_no = msr_no;
  68. this_cpu = get_cpu();
  69. if (cpumask_test_cpu(this_cpu, mask))
  70. msr_func(&rv);
  71. smp_call_function_many(mask, msr_func, &rv, 1);
  72. put_cpu();
  73. }
  74. /* rdmsr on a bunch of CPUs
  75. *
  76. * @mask: which CPUs
  77. * @msr_no: which MSR
  78. * @msrs: array of MSR values
  79. *
  80. */
  81. void rdmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs)
  82. {
  83. __rwmsr_on_cpus(mask, msr_no, msrs, __rdmsr_on_cpu);
  84. }
  85. EXPORT_SYMBOL(rdmsr_on_cpus);
  86. /*
  87. * wrmsr on a bunch of CPUs
  88. *
  89. * @mask: which CPUs
  90. * @msr_no: which MSR
  91. * @msrs: array of MSR values
  92. *
  93. */
  94. void wrmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs)
  95. {
  96. __rwmsr_on_cpus(mask, msr_no, msrs, __wrmsr_on_cpu);
  97. }
  98. EXPORT_SYMBOL(wrmsr_on_cpus);
  99. /* These "safe" variants are slower and should be used when the target MSR
  100. may not actually exist. */
  101. static void __rdmsr_safe_on_cpu(void *info)
  102. {
  103. struct msr_info *rv = info;
  104. rv->err = rdmsr_safe(rv->msr_no, &rv->reg.l, &rv->reg.h);
  105. }
  106. static void __wrmsr_safe_on_cpu(void *info)
  107. {
  108. struct msr_info *rv = info;
  109. rv->err = wrmsr_safe(rv->msr_no, rv->reg.l, rv->reg.h);
  110. }
  111. int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
  112. {
  113. int err;
  114. struct msr_info rv;
  115. memset(&rv, 0, sizeof(rv));
  116. rv.msr_no = msr_no;
  117. err = smp_call_function_single(cpu, __rdmsr_safe_on_cpu, &rv, 1);
  118. *l = rv.reg.l;
  119. *h = rv.reg.h;
  120. return err ? err : rv.err;
  121. }
  122. EXPORT_SYMBOL(rdmsr_safe_on_cpu);
  123. int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
  124. {
  125. int err;
  126. struct msr_info rv;
  127. memset(&rv, 0, sizeof(rv));
  128. rv.msr_no = msr_no;
  129. rv.reg.l = l;
  130. rv.reg.h = h;
  131. err = smp_call_function_single(cpu, __wrmsr_safe_on_cpu, &rv, 1);
  132. return err ? err : rv.err;
  133. }
  134. EXPORT_SYMBOL(wrmsr_safe_on_cpu);
  135. /*
  136. * These variants are significantly slower, but allows control over
  137. * the entire 32-bit GPR set.
  138. */
  139. struct msr_regs_info {
  140. u32 *regs;
  141. int err;
  142. };
  143. static void __rdmsr_safe_regs_on_cpu(void *info)
  144. {
  145. struct msr_regs_info *rv = info;
  146. rv->err = rdmsr_safe_regs(rv->regs);
  147. }
  148. static void __wrmsr_safe_regs_on_cpu(void *info)
  149. {
  150. struct msr_regs_info *rv = info;
  151. rv->err = wrmsr_safe_regs(rv->regs);
  152. }
  153. int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 *regs)
  154. {
  155. int err;
  156. struct msr_regs_info rv;
  157. rv.regs = regs;
  158. rv.err = -EIO;
  159. err = smp_call_function_single(cpu, __rdmsr_safe_regs_on_cpu, &rv, 1);
  160. return err ? err : rv.err;
  161. }
  162. EXPORT_SYMBOL(rdmsr_safe_regs_on_cpu);
  163. int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 *regs)
  164. {
  165. int err;
  166. struct msr_regs_info rv;
  167. rv.regs = regs;
  168. rv.err = -EIO;
  169. err = smp_call_function_single(cpu, __wrmsr_safe_regs_on_cpu, &rv, 1);
  170. return err ? err : rv.err;
  171. }
  172. EXPORT_SYMBOL(wrmsr_safe_regs_on_cpu);