|
@@ -124,13 +124,10 @@ static void intel_init_thermal(struct cpuinfo_x86 *c)
|
|
|
|
|
|
|
|
|
/* P4/Xeon Extended MCE MSR retrieval, return 0 if unsupported */
|
|
|
-static inline int intel_get_extended_msrs(struct intel_mce_extended_msrs *r)
|
|
|
+static inline void intel_get_extended_msrs(struct intel_mce_extended_msrs *r)
|
|
|
{
|
|
|
u32 h;
|
|
|
|
|
|
- if (mce_num_extended_msrs == 0)
|
|
|
- goto done;
|
|
|
-
|
|
|
rdmsr (MSR_IA32_MCG_EAX, r->eax, h);
|
|
|
rdmsr (MSR_IA32_MCG_EBX, r->ebx, h);
|
|
|
rdmsr (MSR_IA32_MCG_ECX, r->ecx, h);
|
|
@@ -141,12 +138,6 @@ static inline int intel_get_extended_msrs(struct intel_mce_extended_msrs *r)
|
|
|
rdmsr (MSR_IA32_MCG_ESP, r->esp, h);
|
|
|
rdmsr (MSR_IA32_MCG_EFLAGS, r->eflags, h);
|
|
|
rdmsr (MSR_IA32_MCG_EIP, r->eip, h);
|
|
|
-
|
|
|
- /* can we rely on kmalloc to do a dynamic
|
|
|
- * allocation for the reserved registers?
|
|
|
- */
|
|
|
-done:
|
|
|
- return mce_num_extended_msrs;
|
|
|
}
|
|
|
|
|
|
static fastcall void intel_machine_check(struct pt_regs * regs, long error_code)
|
|
@@ -155,7 +146,6 @@ static fastcall void intel_machine_check(struct pt_regs * regs, long error_code)
|
|
|
u32 alow, ahigh, high, low;
|
|
|
u32 mcgstl, mcgsth;
|
|
|
int i;
|
|
|
- struct intel_mce_extended_msrs dbg;
|
|
|
|
|
|
rdmsr (MSR_IA32_MCG_STATUS, mcgstl, mcgsth);
|
|
|
if (mcgstl & (1<<0)) /* Recoverable ? */
|
|
@@ -164,7 +154,9 @@ static fastcall void intel_machine_check(struct pt_regs * regs, long error_code)
|
|
|
printk (KERN_EMERG "CPU %d: Machine Check Exception: %08x%08x\n",
|
|
|
smp_processor_id(), mcgsth, mcgstl);
|
|
|
|
|
|
- if (intel_get_extended_msrs(&dbg)) {
|
|
|
+ if (mce_num_extended_msrs > 0) {
|
|
|
+ struct intel_mce_extended_msrs dbg;
|
|
|
+ intel_get_extended_msrs(&dbg);
|
|
|
printk (KERN_DEBUG "CPU %d: EIP: %08x EFLAGS: %08x\n",
|
|
|
smp_processor_id(), dbg.eip, dbg.eflags);
|
|
|
printk (KERN_DEBUG "\teax: %08x ebx: %08x ecx: %08x edx: %08x\n",
|