|
@@ -6,21 +6,37 @@
|
|
struct msr_info {
|
|
struct msr_info {
|
|
u32 msr_no;
|
|
u32 msr_no;
|
|
struct msr reg;
|
|
struct msr reg;
|
|
|
|
+ struct msr *msrs;
|
|
|
|
+ int off;
|
|
int err;
|
|
int err;
|
|
};
|
|
};
|
|
|
|
|
|
static void __rdmsr_on_cpu(void *info)
|
|
static void __rdmsr_on_cpu(void *info)
|
|
{
|
|
{
|
|
struct msr_info *rv = info;
|
|
struct msr_info *rv = info;
|
|
|
|
+ struct msr *reg;
|
|
|
|
+ int this_cpu = raw_smp_processor_id();
|
|
|
|
|
|
- rdmsr(rv->msr_no, rv->reg.l, rv->reg.h);
|
|
|
|
|
|
+ if (rv->msrs)
|
|
|
|
+ reg = &rv->msrs[this_cpu - rv->off];
|
|
|
|
+ else
|
|
|
|
+ reg = &rv->reg;
|
|
|
|
+
|
|
|
|
+ rdmsr(rv->msr_no, reg->l, reg->h);
|
|
}
|
|
}
|
|
|
|
|
|
static void __wrmsr_on_cpu(void *info)
|
|
static void __wrmsr_on_cpu(void *info)
|
|
{
|
|
{
|
|
struct msr_info *rv = info;
|
|
struct msr_info *rv = info;
|
|
|
|
+ struct msr *reg;
|
|
|
|
+ int this_cpu = raw_smp_processor_id();
|
|
|
|
+
|
|
|
|
+ if (rv->msrs)
|
|
|
|
+ reg = &rv->msrs[this_cpu - rv->off];
|
|
|
|
+ else
|
|
|
|
+ reg = &rv->reg;
|
|
|
|
|
|
- wrmsr(rv->msr_no, rv->reg.l, rv->reg.h);
|
|
|
|
|
|
+ wrmsr(rv->msr_no, reg->l, reg->h);
|
|
}
|
|
}
|
|
|
|
|
|
int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
|
|
int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
|
|
@@ -28,6 +44,8 @@ int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
|
|
int err;
|
|
int err;
|
|
struct msr_info rv;
|
|
struct msr_info rv;
|
|
|
|
|
|
|
|
+ memset(&rv, 0, sizeof(rv));
|
|
|
|
+
|
|
rv.msr_no = msr_no;
|
|
rv.msr_no = msr_no;
|
|
err = smp_call_function_single(cpu, __rdmsr_on_cpu, &rv, 1);
|
|
err = smp_call_function_single(cpu, __rdmsr_on_cpu, &rv, 1);
|
|
*l = rv.reg.l;
|
|
*l = rv.reg.l;
|
|
@@ -35,12 +53,15 @@ int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
|
|
|
|
|
|
return err;
|
|
return err;
|
|
}
|
|
}
|
|
|
|
+EXPORT_SYMBOL(rdmsr_on_cpu);
|
|
|
|
|
|
int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
|
|
int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
|
|
{
|
|
{
|
|
int err;
|
|
int err;
|
|
struct msr_info rv;
|
|
struct msr_info rv;
|
|
|
|
|
|
|
|
+ memset(&rv, 0, sizeof(rv));
|
|
|
|
+
|
|
rv.msr_no = msr_no;
|
|
rv.msr_no = msr_no;
|
|
rv.reg.l = l;
|
|
rv.reg.l = l;
|
|
rv.reg.h = h;
|
|
rv.reg.h = h;
|
|
@@ -48,6 +69,70 @@ int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
|
|
|
|
|
|
return err;
|
|
return err;
|
|
}
|
|
}
|
|
|
|
+EXPORT_SYMBOL(wrmsr_on_cpu);
|
|
|
|
+
|
|
|
|
+/* rdmsr on a bunch of CPUs
|
|
|
|
+ *
|
|
|
|
+ * @mask: which CPUs
|
|
|
|
+ * @msr_no: which MSR
|
|
|
|
+ * @msrs: array of MSR values
|
|
|
|
+ *
|
|
|
|
+ */
|
|
|
|
+void rdmsr_on_cpus(const cpumask_t *mask, u32 msr_no, struct msr *msrs)
|
|
|
|
+{
|
|
|
|
+ struct msr_info rv;
|
|
|
|
+ int this_cpu;
|
|
|
|
+
|
|
|
|
+ memset(&rv, 0, sizeof(rv));
|
|
|
|
+
|
|
|
|
+ rv.off = cpumask_first(mask);
|
|
|
|
+ rv.msrs = msrs;
|
|
|
|
+ rv.msr_no = msr_no;
|
|
|
|
+
|
|
|
|
+ preempt_disable();
|
|
|
|
+ /*
|
|
|
|
+ * FIXME: handle the CPU we're executing on separately for now until
|
|
|
|
+ * smp_call_function_many has been fixed to not skip it.
|
|
|
|
+ */
|
|
|
|
+ this_cpu = raw_smp_processor_id();
|
|
|
|
+ smp_call_function_single(this_cpu, __rdmsr_on_cpu, &rv, 1);
|
|
|
|
+
|
|
|
|
+ smp_call_function_many(mask, __rdmsr_on_cpu, &rv, 1);
|
|
|
|
+ preempt_enable();
|
|
|
|
+}
|
|
|
|
+EXPORT_SYMBOL(rdmsr_on_cpus);
|
|
|
|
+
|
|
|
|
+/*
|
|
|
|
+ * wrmsr on a bunch of CPUs
|
|
|
|
+ *
|
|
|
|
+ * @mask: which CPUs
|
|
|
|
+ * @msr_no: which MSR
|
|
|
|
+ * @msrs: array of MSR values
|
|
|
|
+ *
|
|
|
|
+ */
|
|
|
|
+void wrmsr_on_cpus(const cpumask_t *mask, u32 msr_no, struct msr *msrs)
|
|
|
|
+{
|
|
|
|
+ struct msr_info rv;
|
|
|
|
+ int this_cpu;
|
|
|
|
+
|
|
|
|
+ memset(&rv, 0, sizeof(rv));
|
|
|
|
+
|
|
|
|
+ rv.off = cpumask_first(mask);
|
|
|
|
+ rv.msrs = msrs;
|
|
|
|
+ rv.msr_no = msr_no;
|
|
|
|
+
|
|
|
|
+ preempt_disable();
|
|
|
|
+ /*
|
|
|
|
+ * FIXME: handle the CPU we're executing on separately for now until
|
|
|
|
+ * smp_call_function_many has been fixed to not skip it.
|
|
|
|
+ */
|
|
|
|
+ this_cpu = raw_smp_processor_id();
|
|
|
|
+ smp_call_function_single(this_cpu, __wrmsr_on_cpu, &rv, 1);
|
|
|
|
+
|
|
|
|
+ smp_call_function_many(mask, __wrmsr_on_cpu, &rv, 1);
|
|
|
|
+ preempt_enable();
|
|
|
|
+}
|
|
|
|
+EXPORT_SYMBOL(wrmsr_on_cpus);
|
|
|
|
|
|
/* These "safe" variants are slower and should be used when the target MSR
|
|
/* These "safe" variants are slower and should be used when the target MSR
|
|
may not actually exist. */
|
|
may not actually exist. */
|
|
@@ -70,6 +155,8 @@ int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
|
|
int err;
|
|
int err;
|
|
struct msr_info rv;
|
|
struct msr_info rv;
|
|
|
|
|
|
|
|
+ memset(&rv, 0, sizeof(rv));
|
|
|
|
+
|
|
rv.msr_no = msr_no;
|
|
rv.msr_no = msr_no;
|
|
err = smp_call_function_single(cpu, __rdmsr_safe_on_cpu, &rv, 1);
|
|
err = smp_call_function_single(cpu, __rdmsr_safe_on_cpu, &rv, 1);
|
|
*l = rv.reg.l;
|
|
*l = rv.reg.l;
|
|
@@ -77,12 +164,15 @@ int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
|
|
|
|
|
|
return err ? err : rv.err;
|
|
return err ? err : rv.err;
|
|
}
|
|
}
|
|
|
|
+EXPORT_SYMBOL(rdmsr_safe_on_cpu);
|
|
|
|
|
|
int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
|
|
int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
|
|
{
|
|
{
|
|
int err;
|
|
int err;
|
|
struct msr_info rv;
|
|
struct msr_info rv;
|
|
|
|
|
|
|
|
+ memset(&rv, 0, sizeof(rv));
|
|
|
|
+
|
|
rv.msr_no = msr_no;
|
|
rv.msr_no = msr_no;
|
|
rv.reg.l = l;
|
|
rv.reg.l = l;
|
|
rv.reg.h = h;
|
|
rv.reg.h = h;
|
|
@@ -90,8 +180,4 @@ int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
|
|
|
|
|
|
return err ? err : rv.err;
|
|
return err ? err : rv.err;
|
|
}
|
|
}
|
|
-
|
|
|
|
-EXPORT_SYMBOL(rdmsr_on_cpu);
|
|
|
|
-EXPORT_SYMBOL(wrmsr_on_cpu);
|
|
|
|
-EXPORT_SYMBOL(rdmsr_safe_on_cpu);
|
|
|
|
EXPORT_SYMBOL(wrmsr_safe_on_cpu);
|
|
EXPORT_SYMBOL(wrmsr_safe_on_cpu);
|