msr.c 4.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226
  1. #include <linux/module.h>
  2. #include <linux/preempt.h>
  3. #include <linux/smp.h>
  4. #include <asm/msr.h>
  5. struct msr_info {
  6. u32 msr_no;
  7. struct msr reg;
  8. struct msr *msrs;
  9. int off;
  10. int err;
  11. };
  12. static void __rdmsr_on_cpu(void *info)
  13. {
  14. struct msr_info *rv = info;
  15. struct msr *reg;
  16. int this_cpu = raw_smp_processor_id();
  17. if (rv->msrs)
  18. reg = &rv->msrs[this_cpu - rv->off];
  19. else
  20. reg = &rv->reg;
  21. rdmsr(rv->msr_no, reg->l, reg->h);
  22. }
  23. static void __wrmsr_on_cpu(void *info)
  24. {
  25. struct msr_info *rv = info;
  26. struct msr *reg;
  27. int this_cpu = raw_smp_processor_id();
  28. if (rv->msrs)
  29. reg = &rv->msrs[this_cpu - rv->off];
  30. else
  31. reg = &rv->reg;
  32. wrmsr(rv->msr_no, reg->l, reg->h);
  33. }
  34. int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
  35. {
  36. int err;
  37. struct msr_info rv;
  38. memset(&rv, 0, sizeof(rv));
  39. rv.msr_no = msr_no;
  40. err = smp_call_function_single(cpu, __rdmsr_on_cpu, &rv, 1);
  41. *l = rv.reg.l;
  42. *h = rv.reg.h;
  43. return err;
  44. }
  45. EXPORT_SYMBOL(rdmsr_on_cpu);
  46. int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
  47. {
  48. int err;
  49. struct msr_info rv;
  50. memset(&rv, 0, sizeof(rv));
  51. rv.msr_no = msr_no;
  52. rv.reg.l = l;
  53. rv.reg.h = h;
  54. err = smp_call_function_single(cpu, __wrmsr_on_cpu, &rv, 1);
  55. return err;
  56. }
  57. EXPORT_SYMBOL(wrmsr_on_cpu);
  58. /* rdmsr on a bunch of CPUs
  59. *
  60. * @mask: which CPUs
  61. * @msr_no: which MSR
  62. * @msrs: array of MSR values
  63. *
  64. */
  65. void rdmsr_on_cpus(const cpumask_t *mask, u32 msr_no, struct msr *msrs)
  66. {
  67. struct msr_info rv;
  68. int this_cpu;
  69. memset(&rv, 0, sizeof(rv));
  70. rv.off = cpumask_first(mask);
  71. rv.msrs = msrs;
  72. rv.msr_no = msr_no;
  73. this_cpu = get_cpu();
  74. if (cpumask_test_cpu(this_cpu, mask))
  75. __rdmsr_on_cpu(&rv);
  76. smp_call_function_many(mask, __rdmsr_on_cpu, &rv, 1);
  77. put_cpu();
  78. }
  79. EXPORT_SYMBOL(rdmsr_on_cpus);
  80. /*
  81. * wrmsr on a bunch of CPUs
  82. *
  83. * @mask: which CPUs
  84. * @msr_no: which MSR
  85. * @msrs: array of MSR values
  86. *
  87. */
  88. void wrmsr_on_cpus(const cpumask_t *mask, u32 msr_no, struct msr *msrs)
  89. {
  90. struct msr_info rv;
  91. int this_cpu;
  92. memset(&rv, 0, sizeof(rv));
  93. rv.off = cpumask_first(mask);
  94. rv.msrs = msrs;
  95. rv.msr_no = msr_no;
  96. this_cpu = get_cpu();
  97. if (cpumask_test_cpu(this_cpu, mask))
  98. __wrmsr_on_cpu(&rv);
  99. smp_call_function_many(mask, __wrmsr_on_cpu, &rv, 1);
  100. put_cpu();
  101. }
  102. EXPORT_SYMBOL(wrmsr_on_cpus);
  103. /* These "safe" variants are slower and should be used when the target MSR
  104. may not actually exist. */
  105. static void __rdmsr_safe_on_cpu(void *info)
  106. {
  107. struct msr_info *rv = info;
  108. rv->err = rdmsr_safe(rv->msr_no, &rv->reg.l, &rv->reg.h);
  109. }
  110. static void __wrmsr_safe_on_cpu(void *info)
  111. {
  112. struct msr_info *rv = info;
  113. rv->err = wrmsr_safe(rv->msr_no, rv->reg.l, rv->reg.h);
  114. }
  115. int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
  116. {
  117. int err;
  118. struct msr_info rv;
  119. memset(&rv, 0, sizeof(rv));
  120. rv.msr_no = msr_no;
  121. err = smp_call_function_single(cpu, __rdmsr_safe_on_cpu, &rv, 1);
  122. *l = rv.reg.l;
  123. *h = rv.reg.h;
  124. return err ? err : rv.err;
  125. }
  126. EXPORT_SYMBOL(rdmsr_safe_on_cpu);
  127. int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
  128. {
  129. int err;
  130. struct msr_info rv;
  131. memset(&rv, 0, sizeof(rv));
  132. rv.msr_no = msr_no;
  133. rv.reg.l = l;
  134. rv.reg.h = h;
  135. err = smp_call_function_single(cpu, __wrmsr_safe_on_cpu, &rv, 1);
  136. return err ? err : rv.err;
  137. }
  138. EXPORT_SYMBOL(wrmsr_safe_on_cpu);
  139. /*
  140. * These variants are significantly slower, but allows control over
  141. * the entire 32-bit GPR set.
  142. */
  143. struct msr_regs_info {
  144. u32 *regs;
  145. int err;
  146. };
  147. static void __rdmsr_safe_regs_on_cpu(void *info)
  148. {
  149. struct msr_regs_info *rv = info;
  150. rv->err = rdmsr_safe_regs(rv->regs);
  151. }
  152. static void __wrmsr_safe_regs_on_cpu(void *info)
  153. {
  154. struct msr_regs_info *rv = info;
  155. rv->err = wrmsr_safe_regs(rv->regs);
  156. }
  157. int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 *regs)
  158. {
  159. int err;
  160. struct msr_regs_info rv;
  161. rv.regs = regs;
  162. rv.err = -EIO;
  163. err = smp_call_function_single(cpu, __rdmsr_safe_regs_on_cpu, &rv, 1);
  164. return err ? err : rv.err;
  165. }
  166. EXPORT_SYMBOL(rdmsr_safe_regs_on_cpu);
  167. int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 *regs)
  168. {
  169. int err;
  170. struct msr_regs_info rv;
  171. rv.regs = regs;
  172. rv.err = -EIO;
  173. err = smp_call_function_single(cpu, __wrmsr_safe_regs_on_cpu, &rv, 1);
  174. return err ? err : rv.err;
  175. }
  176. EXPORT_SYMBOL(wrmsr_safe_regs_on_cpu);