p4.c 4.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163
  1. /*
  2. * P4 specific Machine Check Exception Reporting
  3. */
  4. #include <linux/kernel.h>
  5. #include <linux/types.h>
  6. #include <linux/init.h>
  7. #include <linux/smp.h>
  8. #include <asm/processor.h>
  9. #include <asm/mce.h>
  10. #include <asm/msr.h>
  11. /* as supported by the P4/Xeon family */
  12. struct intel_mce_extended_msrs {
  13. u32 eax;
  14. u32 ebx;
  15. u32 ecx;
  16. u32 edx;
  17. u32 esi;
  18. u32 edi;
  19. u32 ebp;
  20. u32 esp;
  21. u32 eflags;
  22. u32 eip;
  23. /* u32 *reserved[]; */
  24. };
  25. static int mce_num_extended_msrs;
  26. /* P4/Xeon Extended MCE MSR retrieval, return 0 if unsupported */
  27. static void intel_get_extended_msrs(struct intel_mce_extended_msrs *r)
  28. {
  29. u32 h;
  30. rdmsr(MSR_IA32_MCG_EAX, r->eax, h);
  31. rdmsr(MSR_IA32_MCG_EBX, r->ebx, h);
  32. rdmsr(MSR_IA32_MCG_ECX, r->ecx, h);
  33. rdmsr(MSR_IA32_MCG_EDX, r->edx, h);
  34. rdmsr(MSR_IA32_MCG_ESI, r->esi, h);
  35. rdmsr(MSR_IA32_MCG_EDI, r->edi, h);
  36. rdmsr(MSR_IA32_MCG_EBP, r->ebp, h);
  37. rdmsr(MSR_IA32_MCG_ESP, r->esp, h);
  38. rdmsr(MSR_IA32_MCG_EFLAGS, r->eflags, h);
  39. rdmsr(MSR_IA32_MCG_EIP, r->eip, h);
  40. }
  41. static void intel_machine_check(struct pt_regs *regs, long error_code)
  42. {
  43. u32 alow, ahigh, high, low;
  44. u32 mcgstl, mcgsth;
  45. int recover = 1;
  46. int i;
  47. rdmsr(MSR_IA32_MCG_STATUS, mcgstl, mcgsth);
  48. if (mcgstl & (1<<0)) /* Recoverable ? */
  49. recover = 0;
  50. printk(KERN_EMERG "CPU %d: Machine Check Exception: %08x%08x\n",
  51. smp_processor_id(), mcgsth, mcgstl);
  52. if (mce_num_extended_msrs > 0) {
  53. struct intel_mce_extended_msrs dbg;
  54. intel_get_extended_msrs(&dbg);
  55. printk(KERN_DEBUG "CPU %d: EIP: %08x EFLAGS: %08x\n"
  56. "\teax: %08x ebx: %08x ecx: %08x edx: %08x\n"
  57. "\tesi: %08x edi: %08x ebp: %08x esp: %08x\n",
  58. smp_processor_id(), dbg.eip, dbg.eflags,
  59. dbg.eax, dbg.ebx, dbg.ecx, dbg.edx,
  60. dbg.esi, dbg.edi, dbg.ebp, dbg.esp);
  61. }
  62. for (i = 0; i < nr_mce_banks; i++) {
  63. rdmsr(MSR_IA32_MC0_STATUS+i*4, low, high);
  64. if (high & (1<<31)) {
  65. char misc[20];
  66. char addr[24];
  67. misc[0] = addr[0] = '\0';
  68. if (high & (1<<29))
  69. recover |= 1;
  70. if (high & (1<<25))
  71. recover |= 2;
  72. high &= ~(1<<31);
  73. if (high & (1<<27)) {
  74. rdmsr(MSR_IA32_MC0_MISC+i*4, alow, ahigh);
  75. snprintf(misc, 20, "[%08x%08x]", ahigh, alow);
  76. }
  77. if (high & (1<<26)) {
  78. rdmsr(MSR_IA32_MC0_ADDR+i*4, alow, ahigh);
  79. snprintf(addr, 24, " at %08x%08x", ahigh, alow);
  80. }
  81. printk(KERN_EMERG "CPU %d: Bank %d: %08x%08x%s%s\n",
  82. smp_processor_id(), i, high, low, misc, addr);
  83. }
  84. }
  85. if (recover & 2)
  86. panic("CPU context corrupt");
  87. if (recover & 1)
  88. panic("Unable to continue");
  89. printk(KERN_EMERG "Attempting to continue.\n");
  90. /*
  91. * Do not clear the MSR_IA32_MCi_STATUS if the error is not
  92. * recoverable/continuable.This will allow BIOS to look at the MSRs
  93. * for errors if the OS could not log the error.
  94. */
  95. for (i = 0; i < nr_mce_banks; i++) {
  96. u32 msr;
  97. msr = MSR_IA32_MC0_STATUS+i*4;
  98. rdmsr(msr, low, high);
  99. if (high&(1<<31)) {
  100. /* Clear it */
  101. wrmsr(msr, 0UL, 0UL);
  102. /* Serialize */
  103. wmb();
  104. add_taint(TAINT_MACHINE_CHECK);
  105. }
  106. }
  107. mcgstl &= ~(1<<2);
  108. wrmsr(MSR_IA32_MCG_STATUS, mcgstl, mcgsth);
  109. }
  110. void intel_p4_mcheck_init(struct cpuinfo_x86 *c)
  111. {
  112. u32 l, h;
  113. int i;
  114. machine_check_vector = intel_machine_check;
  115. wmb();
  116. printk(KERN_INFO "Intel machine check architecture supported.\n");
  117. rdmsr(MSR_IA32_MCG_CAP, l, h);
  118. if (l & (1<<8)) /* Control register present ? */
  119. wrmsr(MSR_IA32_MCG_CTL, 0xffffffff, 0xffffffff);
  120. nr_mce_banks = l & 0xff;
  121. for (i = 0; i < nr_mce_banks; i++) {
  122. wrmsr(MSR_IA32_MC0_CTL+4*i, 0xffffffff, 0xffffffff);
  123. wrmsr(MSR_IA32_MC0_STATUS+4*i, 0x0, 0x0);
  124. }
  125. set_in_cr4(X86_CR4_MCE);
  126. printk(KERN_INFO "Intel machine check reporting enabled on CPU#%d.\n",
  127. smp_processor_id());
  128. /* Check for P4/Xeon extended MCE MSRs */
  129. rdmsr(MSR_IA32_MCG_CAP, l, h);
  130. if (l & (1<<9)) {/* MCG_EXT_P */
  131. mce_num_extended_msrs = (l >> 16) & 0xff;
  132. printk(KERN_INFO "CPU%d: Intel P4/Xeon Extended MCE MSRs (%d)"
  133. " available\n",
  134. smp_processor_id(), mce_num_extended_msrs);
  135. #ifdef CONFIG_X86_MCE_P4THERMAL
  136. /* Check for P4/Xeon Thermal monitor */
  137. intel_init_thermal(c);
  138. #endif
  139. }
  140. }