edac_mce_amd.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455
  1. #include <linux/module.h>
  2. #include "edac_mce_amd.h"
  3. static bool report_gart_errors;
  4. static void (*nb_bus_decoder)(int node_id, struct err_regs *regs);
  5. void amd_report_gart_errors(bool v)
  6. {
  7. report_gart_errors = v;
  8. }
  9. EXPORT_SYMBOL_GPL(amd_report_gart_errors);
  10. void amd_register_ecc_decoder(void (*f)(int, struct err_regs *))
  11. {
  12. nb_bus_decoder = f;
  13. }
  14. EXPORT_SYMBOL_GPL(amd_register_ecc_decoder);
  15. void amd_unregister_ecc_decoder(void (*f)(int, struct err_regs *))
  16. {
  17. if (nb_bus_decoder) {
  18. WARN_ON(nb_bus_decoder != f);
  19. nb_bus_decoder = NULL;
  20. }
  21. }
  22. EXPORT_SYMBOL_GPL(amd_unregister_ecc_decoder);
  23. /*
  24. * string representation for the different MCA reported error types, see F3x48
  25. * or MSR0000_0411.
  26. */
  27. const char *tt_msgs[] = { /* transaction type */
  28. "instruction",
  29. "data",
  30. "generic",
  31. "reserved"
  32. };
  33. EXPORT_SYMBOL_GPL(tt_msgs);
  34. const char *ll_msgs[] = { /* cache level */
  35. "L0",
  36. "L1",
  37. "L2",
  38. "L3/generic"
  39. };
  40. EXPORT_SYMBOL_GPL(ll_msgs);
  41. const char *rrrr_msgs[] = {
  42. "generic",
  43. "generic read",
  44. "generic write",
  45. "data read",
  46. "data write",
  47. "inst fetch",
  48. "prefetch",
  49. "evict",
  50. "snoop",
  51. "reserved RRRR= 9",
  52. "reserved RRRR= 10",
  53. "reserved RRRR= 11",
  54. "reserved RRRR= 12",
  55. "reserved RRRR= 13",
  56. "reserved RRRR= 14",
  57. "reserved RRRR= 15"
  58. };
  59. EXPORT_SYMBOL_GPL(rrrr_msgs);
  60. const char *pp_msgs[] = { /* participating processor */
  61. "local node originated (SRC)",
  62. "local node responded to request (RES)",
  63. "local node observed as 3rd party (OBS)",
  64. "generic"
  65. };
  66. EXPORT_SYMBOL_GPL(pp_msgs);
  67. const char *to_msgs[] = {
  68. "no timeout",
  69. "timed out"
  70. };
  71. EXPORT_SYMBOL_GPL(to_msgs);
  72. const char *ii_msgs[] = { /* memory or i/o */
  73. "mem access",
  74. "reserved",
  75. "i/o access",
  76. "generic"
  77. };
  78. EXPORT_SYMBOL_GPL(ii_msgs);
  79. /*
  80. * Map the 4 or 5 (family-specific) bits of Extended Error code to the
  81. * string table.
  82. */
  83. const char *ext_msgs[] = {
  84. "K8 ECC error", /* 0_0000b */
  85. "CRC error on link", /* 0_0001b */
  86. "Sync error packets on link", /* 0_0010b */
  87. "Master Abort during link operation", /* 0_0011b */
  88. "Target Abort during link operation", /* 0_0100b */
  89. "Invalid GART PTE entry during table walk", /* 0_0101b */
  90. "Unsupported atomic RMW command received", /* 0_0110b */
  91. "WDT error: NB transaction timeout", /* 0_0111b */
  92. "ECC/ChipKill ECC error", /* 0_1000b */
  93. "SVM DEV Error", /* 0_1001b */
  94. "Link Data error", /* 0_1010b */
  95. "Link/L3/Probe Filter Protocol error", /* 0_1011b */
  96. "NB Internal Arrays Parity error", /* 0_1100b */
  97. "DRAM Address/Control Parity error", /* 0_1101b */
  98. "Link Transmission error", /* 0_1110b */
  99. "GART/DEV Table Walk Data error" /* 0_1111b */
  100. "Res 0x100 error", /* 1_0000b */
  101. "Res 0x101 error", /* 1_0001b */
  102. "Res 0x102 error", /* 1_0010b */
  103. "Res 0x103 error", /* 1_0011b */
  104. "Res 0x104 error", /* 1_0100b */
  105. "Res 0x105 error", /* 1_0101b */
  106. "Res 0x106 error", /* 1_0110b */
  107. "Res 0x107 error", /* 1_0111b */
  108. "Res 0x108 error", /* 1_1000b */
  109. "Res 0x109 error", /* 1_1001b */
  110. "Res 0x10A error", /* 1_1010b */
  111. "Res 0x10B error", /* 1_1011b */
  112. "ECC error in L3 Cache Data", /* 1_1100b */
  113. "L3 Cache Tag error", /* 1_1101b */
  114. "L3 Cache LRU Parity error", /* 1_1110b */
  115. "Probe Filter error" /* 1_1111b */
  116. };
  117. EXPORT_SYMBOL_GPL(ext_msgs);
  118. static void amd_decode_dc_mce(u64 mc0_status)
  119. {
  120. u32 ec = mc0_status & 0xffff;
  121. u32 xec = (mc0_status >> 16) & 0xf;
  122. pr_emerg(" Data Cache Error");
  123. if (xec == 1 && TLB_ERROR(ec))
  124. pr_cont(": %s TLB multimatch.\n", LL_MSG(ec));
  125. else if (xec == 0) {
  126. if (mc0_status & (1ULL << 40))
  127. pr_cont(" during Data Scrub.\n");
  128. else if (TLB_ERROR(ec))
  129. pr_cont(": %s TLB parity error.\n", LL_MSG(ec));
  130. else if (MEM_ERROR(ec)) {
  131. u8 ll = ec & 0x3;
  132. u8 tt = (ec >> 2) & 0x3;
  133. u8 rrrr = (ec >> 4) & 0xf;
  134. /* see F10h BKDG (31116), Table 92. */
  135. if (ll == 0x1) {
  136. if (tt != 0x1)
  137. goto wrong_dc_mce;
  138. pr_cont(": Data/Tag %s error.\n", RRRR_MSG(ec));
  139. } else if (ll == 0x2 && rrrr == 0x3)
  140. pr_cont(" during L1 linefill from L2.\n");
  141. else
  142. goto wrong_dc_mce;
  143. } else if (BUS_ERROR(ec) && boot_cpu_data.x86 == 0xf)
  144. pr_cont(" during system linefill.\n");
  145. else
  146. goto wrong_dc_mce;
  147. } else
  148. goto wrong_dc_mce;
  149. return;
  150. wrong_dc_mce:
  151. pr_warning("Corrupted DC MCE info?\n");
  152. }
  153. static void amd_decode_ic_mce(u64 mc1_status)
  154. {
  155. u32 ec = mc1_status & 0xffff;
  156. u32 xec = (mc1_status >> 16) & 0xf;
  157. pr_emerg(" Instruction Cache Error");
  158. if (xec == 1 && TLB_ERROR(ec))
  159. pr_cont(": %s TLB multimatch.\n", LL_MSG(ec));
  160. else if (xec == 0) {
  161. if (TLB_ERROR(ec))
  162. pr_cont(": %s TLB Parity error.\n", LL_MSG(ec));
  163. else if (BUS_ERROR(ec)) {
  164. if (boot_cpu_data.x86 == 0xf &&
  165. (mc1_status & (1ULL << 58)))
  166. pr_cont(" during system linefill.\n");
  167. else
  168. pr_cont(" during attempted NB data read.\n");
  169. } else if (MEM_ERROR(ec)) {
  170. u8 ll = ec & 0x3;
  171. u8 rrrr = (ec >> 4) & 0xf;
  172. if (ll == 0x2)
  173. pr_cont(" during a linefill from L2.\n");
  174. else if (ll == 0x1) {
  175. switch (rrrr) {
  176. case 0x5:
  177. pr_cont(": Parity error during "
  178. "data load.\n");
  179. break;
  180. case 0x7:
  181. pr_cont(": Copyback Parity/Victim"
  182. " error.\n");
  183. break;
  184. case 0x8:
  185. pr_cont(": Tag Snoop error.\n");
  186. break;
  187. default:
  188. goto wrong_ic_mce;
  189. break;
  190. }
  191. }
  192. } else
  193. goto wrong_ic_mce;
  194. } else
  195. goto wrong_ic_mce;
  196. return;
  197. wrong_ic_mce:
  198. pr_warning("Corrupted IC MCE info?\n");
  199. }
  200. static void amd_decode_bu_mce(u64 mc2_status)
  201. {
  202. u32 ec = mc2_status & 0xffff;
  203. u32 xec = (mc2_status >> 16) & 0xf;
  204. pr_emerg(" Bus Unit Error");
  205. if (xec == 0x1)
  206. pr_cont(" in the write data buffers.\n");
  207. else if (xec == 0x3)
  208. pr_cont(" in the victim data buffers.\n");
  209. else if (xec == 0x2 && MEM_ERROR(ec))
  210. pr_cont(": %s error in the L2 cache tags.\n", RRRR_MSG(ec));
  211. else if (xec == 0x0) {
  212. if (TLB_ERROR(ec))
  213. pr_cont(": %s error in a Page Descriptor Cache or "
  214. "Guest TLB.\n", TT_MSG(ec));
  215. else if (BUS_ERROR(ec))
  216. pr_cont(": %s/ECC error in data read from NB: %s.\n",
  217. RRRR_MSG(ec), PP_MSG(ec));
  218. else if (MEM_ERROR(ec)) {
  219. u8 rrrr = (ec >> 4) & 0xf;
  220. if (rrrr >= 0x7)
  221. pr_cont(": %s error during data copyback.\n",
  222. RRRR_MSG(ec));
  223. else if (rrrr <= 0x1)
  224. pr_cont(": %s parity/ECC error during data "
  225. "access from L2.\n", RRRR_MSG(ec));
  226. else
  227. goto wrong_bu_mce;
  228. } else
  229. goto wrong_bu_mce;
  230. } else
  231. goto wrong_bu_mce;
  232. return;
  233. wrong_bu_mce:
  234. pr_warning("Corrupted BU MCE info?\n");
  235. }
  236. static void amd_decode_ls_mce(u64 mc3_status)
  237. {
  238. u32 ec = mc3_status & 0xffff;
  239. u32 xec = (mc3_status >> 16) & 0xf;
  240. pr_emerg(" Load Store Error");
  241. if (xec == 0x0) {
  242. u8 rrrr = (ec >> 4) & 0xf;
  243. if (!BUS_ERROR(ec) || (rrrr != 0x3 && rrrr != 0x4))
  244. goto wrong_ls_mce;
  245. pr_cont(" during %s.\n", RRRR_MSG(ec));
  246. }
  247. return;
  248. wrong_ls_mce:
  249. pr_warning("Corrupted LS MCE info?\n");
  250. }
  251. void amd_decode_nb_mce(int node_id, struct err_regs *regs, int handle_errors)
  252. {
  253. u32 ec = ERROR_CODE(regs->nbsl);
  254. u32 xec = EXT_ERROR_CODE(regs->nbsl);
  255. if (!handle_errors)
  256. return;
  257. pr_emerg(" Northbridge Error, node %d", node_id);
  258. /*
  259. * F10h, revD can disable ErrCpu[3:0] so check that first and also the
  260. * value encoding has changed so interpret those differently
  261. */
  262. if ((boot_cpu_data.x86 == 0x10) &&
  263. (boot_cpu_data.x86_model > 7)) {
  264. if (regs->nbsh & K8_NBSH_ERR_CPU_VAL)
  265. pr_cont(", core: %u\n", (u8)(regs->nbsh & 0xf));
  266. } else {
  267. pr_cont(", core: %d\n", ilog2((regs->nbsh & 0xf)));
  268. }
  269. pr_emerg("%s.\n", EXT_ERR_MSG(xec));
  270. if (BUS_ERROR(ec) && nb_bus_decoder)
  271. nb_bus_decoder(node_id, regs);
  272. }
  273. EXPORT_SYMBOL_GPL(amd_decode_nb_mce);
  274. static void amd_decode_fr_mce(u64 mc5_status)
  275. {
  276. /* we have only one error signature so match all fields at once. */
  277. if ((mc5_status & 0xffff) == 0x0f0f)
  278. pr_emerg(" FR Error: CPU Watchdog timer expire.\n");
  279. else
  280. pr_warning("Corrupted FR MCE info?\n");
  281. }
  282. static inline void amd_decode_err_code(unsigned int ec)
  283. {
  284. if (TLB_ERROR(ec)) {
  285. /*
  286. * GART errors are intended to help graphics driver developers
  287. * to detect bad GART PTEs. It is recommended by AMD to disable
  288. * GART table walk error reporting by default[1] (currently
  289. * being disabled in mce_cpu_quirks()) and according to the
  290. * comment in mce_cpu_quirks(), such GART errors can be
  291. * incorrectly triggered. We may see these errors anyway and
  292. * unless requested by the user, they won't be reported.
  293. *
  294. * [1] section 13.10.1 on BIOS and Kernel Developers Guide for
  295. * AMD NPT family 0Fh processors
  296. */
  297. if (!report_gart_errors)
  298. return;
  299. pr_emerg(" Transaction: %s, Cache Level %s\n",
  300. TT_MSG(ec), LL_MSG(ec));
  301. } else if (MEM_ERROR(ec)) {
  302. pr_emerg(" Transaction: %s, Type: %s, Cache Level: %s",
  303. RRRR_MSG(ec), TT_MSG(ec), LL_MSG(ec));
  304. } else if (BUS_ERROR(ec)) {
  305. pr_emerg(" Transaction type: %s(%s), %s, Cache Level: %s, "
  306. "Participating Processor: %s\n",
  307. RRRR_MSG(ec), II_MSG(ec), TO_MSG(ec), LL_MSG(ec),
  308. PP_MSG(ec));
  309. } else
  310. pr_warning("Huh? Unknown MCE error 0x%x\n", ec);
  311. }
  312. static int amd_decode_mce(struct notifier_block *nb, unsigned long val,
  313. void *data)
  314. {
  315. struct mce *m = (struct mce *)data;
  316. struct err_regs regs;
  317. int node, ecc;
  318. pr_emerg("MC%d_STATUS: ", m->bank);
  319. pr_cont("%sorrected error, report: %s, MiscV: %svalid, "
  320. "CPU context corrupt: %s",
  321. ((m->status & MCI_STATUS_UC) ? "Unc" : "C"),
  322. ((m->status & MCI_STATUS_EN) ? "yes" : "no"),
  323. ((m->status & MCI_STATUS_MISCV) ? "" : "in"),
  324. ((m->status & MCI_STATUS_PCC) ? "yes" : "no"));
  325. /* do the two bits[14:13] together */
  326. ecc = m->status & (3ULL << 45);
  327. if (ecc)
  328. pr_cont(", %sECC Error", ((ecc == 2) ? "C" : "U"));
  329. pr_cont("\n");
  330. switch (m->bank) {
  331. case 0:
  332. amd_decode_dc_mce(m->status);
  333. break;
  334. case 1:
  335. amd_decode_ic_mce(m->status);
  336. break;
  337. case 2:
  338. amd_decode_bu_mce(m->status);
  339. break;
  340. case 3:
  341. amd_decode_ls_mce(m->status);
  342. break;
  343. case 4:
  344. regs.nbsl = (u32) m->status;
  345. regs.nbsh = (u32)(m->status >> 32);
  346. regs.nbeal = (u32) m->addr;
  347. regs.nbeah = (u32)(m->addr >> 32);
  348. node = amd_get_nb_id(m->extcpu);
  349. amd_decode_nb_mce(node, &regs, 1);
  350. break;
  351. case 5:
  352. amd_decode_fr_mce(m->status);
  353. break;
  354. default:
  355. break;
  356. }
  357. amd_decode_err_code(m->status & 0xffff);
  358. return NOTIFY_STOP;
  359. }
  360. static struct notifier_block amd_mce_dec_nb = {
  361. .notifier_call = amd_decode_mce,
  362. };
  363. static int __init mce_amd_init(void)
  364. {
  365. /*
  366. * We can decode MCEs for Opteron and later CPUs:
  367. */
  368. if ((boot_cpu_data.x86_vendor == X86_VENDOR_AMD) &&
  369. (boot_cpu_data.x86 >= 0xf))
  370. atomic_notifier_chain_register(&x86_mce_decoder_chain, &amd_mce_dec_nb);
  371. return 0;
  372. }
  373. early_initcall(mce_amd_init);
  374. #ifdef MODULE
  375. static void __exit mce_amd_exit(void)
  376. {
  377. atomic_notifier_chain_unregister(&x86_mce_decoder_chain, &amd_mce_dec_nb);
  378. }
  379. MODULE_DESCRIPTION("AMD MCE decoder");
  380. MODULE_ALIAS("edac-mce-amd");
  381. MODULE_LICENSE("GPL");
  382. module_exit(mce_amd_exit);
  383. #endif