msr-on-cpu.c 2.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119
  1. #include <linux/module.h>
  2. #include <linux/preempt.h>
  3. #include <linux/smp.h>
  4. #include <asm/msr.h>
  5. struct msr_info {
  6. u32 msr_no;
  7. u32 l, h;
  8. int err;
  9. };
  10. static void __rdmsr_on_cpu(void *info)
  11. {
  12. struct msr_info *rv = info;
  13. rdmsr(rv->msr_no, rv->l, rv->h);
  14. }
  15. static void __rdmsr_safe_on_cpu(void *info)
  16. {
  17. struct msr_info *rv = info;
  18. rv->err = rdmsr_safe(rv->msr_no, &rv->l, &rv->h);
  19. }
  20. static int _rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h, int safe)
  21. {
  22. int err = 0;
  23. preempt_disable();
  24. if (smp_processor_id() == cpu)
  25. if (safe)
  26. err = rdmsr_safe(msr_no, l, h);
  27. else
  28. rdmsr(msr_no, *l, *h);
  29. else {
  30. struct msr_info rv;
  31. rv.msr_no = msr_no;
  32. if (safe) {
  33. smp_call_function_single(cpu, __rdmsr_safe_on_cpu,
  34. &rv, 0, 1);
  35. err = rv.err;
  36. } else {
  37. smp_call_function_single(cpu, __rdmsr_on_cpu, &rv, 0, 1);
  38. }
  39. *l = rv.l;
  40. *h = rv.h;
  41. }
  42. preempt_enable();
  43. return err;
  44. }
  45. static void __wrmsr_on_cpu(void *info)
  46. {
  47. struct msr_info *rv = info;
  48. wrmsr(rv->msr_no, rv->l, rv->h);
  49. }
  50. static void __wrmsr_safe_on_cpu(void *info)
  51. {
  52. struct msr_info *rv = info;
  53. rv->err = wrmsr_safe(rv->msr_no, rv->l, rv->h);
  54. }
  55. static int _wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h, int safe)
  56. {
  57. int err = 0;
  58. preempt_disable();
  59. if (smp_processor_id() == cpu)
  60. if (safe)
  61. err = wrmsr_safe(msr_no, l, h);
  62. else
  63. wrmsr(msr_no, l, h);
  64. else {
  65. struct msr_info rv;
  66. rv.msr_no = msr_no;
  67. rv.l = l;
  68. rv.h = h;
  69. if (safe) {
  70. smp_call_function_single(cpu, __wrmsr_safe_on_cpu,
  71. &rv, 0, 1);
  72. err = rv.err;
  73. } else {
  74. smp_call_function_single(cpu, __wrmsr_on_cpu, &rv, 0, 1);
  75. }
  76. }
  77. preempt_enable();
  78. return err;
  79. }
  80. void wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
  81. {
  82. _wrmsr_on_cpu(cpu, msr_no, l, h, 0);
  83. }
  84. void rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
  85. {
  86. _rdmsr_on_cpu(cpu, msr_no, l, h, 0);
  87. }
  88. /* These "safe" variants are slower and should be used when the target MSR
  89. may not actually exist. */
  90. int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
  91. {
  92. return _wrmsr_on_cpu(cpu, msr_no, l, h, 1);
  93. }
  94. int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
  95. {
  96. return _rdmsr_on_cpu(cpu, msr_no, l, h, 1);
  97. }
  98. EXPORT_SYMBOL(rdmsr_on_cpu);
  99. EXPORT_SYMBOL(wrmsr_on_cpu);
  100. EXPORT_SYMBOL(rdmsr_safe_on_cpu);
  101. EXPORT_SYMBOL(wrmsr_safe_on_cpu);