mce_amd.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540
  1. #include <linux/module.h>
  2. #include <linux/slab.h>
  3. #include "mce_amd.h"
  4. static struct amd_decoder_ops *fam_ops;
  5. static bool report_gart_errors;
  6. static void (*nb_bus_decoder)(int node_id, struct mce *m, u32 nbcfg);
  7. void amd_report_gart_errors(bool v)
  8. {
  9. report_gart_errors = v;
  10. }
  11. EXPORT_SYMBOL_GPL(amd_report_gart_errors);
  12. void amd_register_ecc_decoder(void (*f)(int, struct mce *, u32))
  13. {
  14. nb_bus_decoder = f;
  15. }
  16. EXPORT_SYMBOL_GPL(amd_register_ecc_decoder);
  17. void amd_unregister_ecc_decoder(void (*f)(int, struct mce *, u32))
  18. {
  19. if (nb_bus_decoder) {
  20. WARN_ON(nb_bus_decoder != f);
  21. nb_bus_decoder = NULL;
  22. }
  23. }
  24. EXPORT_SYMBOL_GPL(amd_unregister_ecc_decoder);
  25. /*
  26. * string representation for the different MCA reported error types, see F3x48
  27. * or MSR0000_0411.
  28. */
  29. /* transaction type */
  30. const char *tt_msgs[] = { "INSN", "DATA", "GEN", "RESV" };
  31. EXPORT_SYMBOL_GPL(tt_msgs);
  32. /* cache level */
  33. const char *ll_msgs[] = { "RESV", "L1", "L2", "L3/GEN" };
  34. EXPORT_SYMBOL_GPL(ll_msgs);
  35. /* memory transaction type */
  36. const char *rrrr_msgs[] = {
  37. "GEN", "RD", "WR", "DRD", "DWR", "IRD", "PRF", "EV", "SNP"
  38. };
  39. EXPORT_SYMBOL_GPL(rrrr_msgs);
  40. /* participating processor */
  41. const char *pp_msgs[] = { "SRC", "RES", "OBS", "GEN" };
  42. EXPORT_SYMBOL_GPL(pp_msgs);
  43. /* request timeout */
  44. const char *to_msgs[] = { "no timeout", "timed out" };
  45. EXPORT_SYMBOL_GPL(to_msgs);
  46. /* memory or i/o */
  47. const char *ii_msgs[] = { "MEM", "RESV", "IO", "GEN" };
  48. EXPORT_SYMBOL_GPL(ii_msgs);
  49. /*
  50. * Map the 4 or 5 (family-specific) bits of Extended Error code to the
  51. * string table.
  52. */
  53. const char *ext_msgs[] = {
  54. "K8 ECC error", /* 0_0000b */
  55. "CRC error on link", /* 0_0001b */
  56. "Sync error packets on link", /* 0_0010b */
  57. "Master Abort during link operation", /* 0_0011b */
  58. "Target Abort during link operation", /* 0_0100b */
  59. "Invalid GART PTE entry during table walk", /* 0_0101b */
  60. "Unsupported atomic RMW command received", /* 0_0110b */
  61. "WDT error: NB transaction timeout", /* 0_0111b */
  62. "ECC/ChipKill ECC error", /* 0_1000b */
  63. "SVM DEV Error", /* 0_1001b */
  64. "Link Data error", /* 0_1010b */
  65. "Link/L3/Probe Filter Protocol error", /* 0_1011b */
  66. "NB Internal Arrays Parity error", /* 0_1100b */
  67. "DRAM Address/Control Parity error", /* 0_1101b */
  68. "Link Transmission error", /* 0_1110b */
  69. "GART/DEV Table Walk Data error" /* 0_1111b */
  70. "Res 0x100 error", /* 1_0000b */
  71. "Res 0x101 error", /* 1_0001b */
  72. "Res 0x102 error", /* 1_0010b */
  73. "Res 0x103 error", /* 1_0011b */
  74. "Res 0x104 error", /* 1_0100b */
  75. "Res 0x105 error", /* 1_0101b */
  76. "Res 0x106 error", /* 1_0110b */
  77. "Res 0x107 error", /* 1_0111b */
  78. "Res 0x108 error", /* 1_1000b */
  79. "Res 0x109 error", /* 1_1001b */
  80. "Res 0x10A error", /* 1_1010b */
  81. "Res 0x10B error", /* 1_1011b */
  82. "ECC error in L3 Cache Data", /* 1_1100b */
  83. "L3 Cache Tag error", /* 1_1101b */
  84. "L3 Cache LRU Parity error", /* 1_1110b */
  85. "Probe Filter error" /* 1_1111b */
  86. };
  87. EXPORT_SYMBOL_GPL(ext_msgs);
  88. static bool f10h_dc_mce(u16 ec)
  89. {
  90. u8 r4 = (ec >> 4) & 0xf;
  91. bool ret = false;
  92. if (r4 == R4_GEN) {
  93. pr_cont("during data scrub.\n");
  94. return true;
  95. }
  96. if (MEM_ERROR(ec)) {
  97. u8 ll = ec & 0x3;
  98. ret = true;
  99. if (ll == LL_L2)
  100. pr_cont("during L1 linefill from L2.\n");
  101. else if (ll == LL_L1)
  102. pr_cont("Data/Tag %s error.\n", RRRR_MSG(ec));
  103. else
  104. ret = false;
  105. }
  106. return ret;
  107. }
  108. static bool k8_dc_mce(u16 ec)
  109. {
  110. if (BUS_ERROR(ec)) {
  111. pr_cont("during system linefill.\n");
  112. return true;
  113. }
  114. return f10h_dc_mce(ec);
  115. }
  116. static bool f14h_dc_mce(u16 ec)
  117. {
  118. u8 r4 = (ec >> 4) & 0xf;
  119. u8 ll = ec & 0x3;
  120. u8 tt = (ec >> 2) & 0x3;
  121. u8 ii = tt;
  122. bool ret = true;
  123. if (MEM_ERROR(ec)) {
  124. if (tt != TT_DATA || ll != LL_L1)
  125. return false;
  126. switch (r4) {
  127. case R4_DRD:
  128. case R4_DWR:
  129. pr_cont("Data/Tag parity error due to %s.\n",
  130. (r4 == R4_DRD ? "load/hw prf" : "store"));
  131. break;
  132. case R4_EVICT:
  133. pr_cont("Copyback parity error on a tag miss.\n");
  134. break;
  135. case R4_SNOOP:
  136. pr_cont("Tag parity error during snoop.\n");
  137. break;
  138. default:
  139. ret = false;
  140. }
  141. } else if (BUS_ERROR(ec)) {
  142. if ((ii != II_MEM && ii != II_IO) || ll != LL_LG)
  143. return false;
  144. pr_cont("System read data error on a ");
  145. switch (r4) {
  146. case R4_RD:
  147. pr_cont("TLB reload.\n");
  148. break;
  149. case R4_DWR:
  150. pr_cont("store.\n");
  151. break;
  152. case R4_DRD:
  153. pr_cont("load.\n");
  154. break;
  155. default:
  156. ret = false;
  157. }
  158. } else {
  159. ret = false;
  160. }
  161. return ret;
  162. }
  163. static void amd_decode_dc_mce(struct mce *m)
  164. {
  165. u16 ec = m->status & 0xffff;
  166. u8 xec = (m->status >> 16) & 0xf;
  167. pr_emerg(HW_ERR "Data Cache Error: ");
  168. /* TLB error signatures are the same across families */
  169. if (TLB_ERROR(ec)) {
  170. u8 tt = (ec >> 2) & 0x3;
  171. if (tt == TT_DATA) {
  172. pr_cont("%s TLB %s.\n", LL_MSG(ec),
  173. (xec ? "multimatch" : "parity error"));
  174. return;
  175. }
  176. else
  177. goto wrong_dc_mce;
  178. }
  179. if (!fam_ops->dc_mce(ec))
  180. goto wrong_dc_mce;
  181. return;
  182. wrong_dc_mce:
  183. pr_emerg(HW_ERR "Corrupted DC MCE info?\n");
  184. }
  185. static bool k8_ic_mce(u16 ec)
  186. {
  187. u8 ll = ec & 0x3;
  188. u8 r4 = (ec >> 4) & 0xf;
  189. bool ret = true;
  190. if (!MEM_ERROR(ec))
  191. return false;
  192. if (ll == 0x2)
  193. pr_cont("during a linefill from L2.\n");
  194. else if (ll == 0x1) {
  195. switch (r4) {
  196. case R4_IRD:
  197. pr_cont("Parity error during data load.\n");
  198. break;
  199. case R4_EVICT:
  200. pr_cont("Copyback Parity/Victim error.\n");
  201. break;
  202. case R4_SNOOP:
  203. pr_cont("Tag Snoop error.\n");
  204. break;
  205. default:
  206. ret = false;
  207. break;
  208. }
  209. } else
  210. ret = false;
  211. return ret;
  212. }
  213. static bool f14h_ic_mce(u16 ec)
  214. {
  215. u8 ll = ec & 0x3;
  216. u8 tt = (ec >> 2) & 0x3;
  217. u8 r4 = (ec >> 4) & 0xf;
  218. bool ret = true;
  219. if (MEM_ERROR(ec)) {
  220. if (tt != 0 || ll != 1)
  221. ret = false;
  222. if (r4 == R4_IRD)
  223. pr_cont("Data/tag array parity error for a tag hit.\n");
  224. else if (r4 == R4_SNOOP)
  225. pr_cont("Tag error during snoop/victimization.\n");
  226. else
  227. ret = false;
  228. }
  229. return ret;
  230. }
  231. static void amd_decode_ic_mce(struct mce *m)
  232. {
  233. u16 ec = m->status & 0xffff;
  234. u8 xec = (m->status >> 16) & 0xf;
  235. pr_emerg(HW_ERR "Instruction Cache Error: ");
  236. if (TLB_ERROR(ec))
  237. pr_cont("%s TLB %s.\n", LL_MSG(ec),
  238. (xec ? "multimatch" : "parity error"));
  239. else if (BUS_ERROR(ec)) {
  240. bool k8 = (boot_cpu_data.x86 == 0xf && (m->status & BIT(58)));
  241. pr_cont("during %s.\n", (k8 ? "system linefill" : "NB data read"));
  242. } else if (fam_ops->ic_mce(ec))
  243. ;
  244. else
  245. pr_emerg(HW_ERR "Corrupted IC MCE info?\n");
  246. }
  247. static void amd_decode_bu_mce(struct mce *m)
  248. {
  249. u32 ec = m->status & 0xffff;
  250. u32 xec = (m->status >> 16) & 0xf;
  251. pr_emerg(HW_ERR "Bus Unit Error");
  252. if (xec == 0x1)
  253. pr_cont(" in the write data buffers.\n");
  254. else if (xec == 0x3)
  255. pr_cont(" in the victim data buffers.\n");
  256. else if (xec == 0x2 && MEM_ERROR(ec))
  257. pr_cont(": %s error in the L2 cache tags.\n", RRRR_MSG(ec));
  258. else if (xec == 0x0) {
  259. if (TLB_ERROR(ec))
  260. pr_cont(": %s error in a Page Descriptor Cache or "
  261. "Guest TLB.\n", TT_MSG(ec));
  262. else if (BUS_ERROR(ec))
  263. pr_cont(": %s/ECC error in data read from NB: %s.\n",
  264. RRRR_MSG(ec), PP_MSG(ec));
  265. else if (MEM_ERROR(ec)) {
  266. u8 rrrr = (ec >> 4) & 0xf;
  267. if (rrrr >= 0x7)
  268. pr_cont(": %s error during data copyback.\n",
  269. RRRR_MSG(ec));
  270. else if (rrrr <= 0x1)
  271. pr_cont(": %s parity/ECC error during data "
  272. "access from L2.\n", RRRR_MSG(ec));
  273. else
  274. goto wrong_bu_mce;
  275. } else
  276. goto wrong_bu_mce;
  277. } else
  278. goto wrong_bu_mce;
  279. return;
  280. wrong_bu_mce:
  281. pr_emerg(HW_ERR "Corrupted BU MCE info?\n");
  282. }
  283. static void amd_decode_ls_mce(struct mce *m)
  284. {
  285. u32 ec = m->status & 0xffff;
  286. u32 xec = (m->status >> 16) & 0xf;
  287. pr_emerg(HW_ERR "Load Store Error");
  288. if (xec == 0x0) {
  289. u8 rrrr = (ec >> 4) & 0xf;
  290. if (!BUS_ERROR(ec) || (rrrr != 0x3 && rrrr != 0x4))
  291. goto wrong_ls_mce;
  292. pr_cont(" during %s.\n", RRRR_MSG(ec));
  293. }
  294. return;
  295. wrong_ls_mce:
  296. pr_emerg(HW_ERR "Corrupted LS MCE info?\n");
  297. }
  298. void amd_decode_nb_mce(int node_id, struct mce *m, u32 nbcfg)
  299. {
  300. u32 ec = m->status & 0xffff;
  301. u32 nbsh = (u32)(m->status >> 32);
  302. u32 nbsl = (u32)m->status;
  303. /*
  304. * GART TLB error reporting is disabled by default. Bail out early.
  305. */
  306. if (TLB_ERROR(ec) && !report_gart_errors)
  307. return;
  308. pr_emerg(HW_ERR "Northbridge Error, node %d", node_id);
  309. /*
  310. * F10h, revD can disable ErrCpu[3:0] so check that first and also the
  311. * value encoding has changed so interpret those differently
  312. */
  313. if ((boot_cpu_data.x86 == 0x10) &&
  314. (boot_cpu_data.x86_model > 7)) {
  315. if (nbsh & K8_NBSH_ERR_CPU_VAL)
  316. pr_cont(", core: %u\n", (u8)(nbsh & 0xf));
  317. } else {
  318. u8 assoc_cpus = nbsh & 0xf;
  319. if (assoc_cpus > 0)
  320. pr_cont(", core: %d", fls(assoc_cpus) - 1);
  321. pr_cont("\n");
  322. }
  323. pr_emerg(HW_ERR "%s.\n", EXT_ERR_MSG(nbsl));
  324. if (BUS_ERROR(ec) && nb_bus_decoder)
  325. nb_bus_decoder(node_id, m, nbcfg);
  326. }
  327. EXPORT_SYMBOL_GPL(amd_decode_nb_mce);
  328. static void amd_decode_fr_mce(struct mce *m)
  329. {
  330. /* we have only one error signature so match all fields at once. */
  331. if ((m->status & 0xffff) == 0x0f0f)
  332. pr_emerg(HW_ERR " FR Error: CPU Watchdog timer expire.\n");
  333. else
  334. pr_emerg(HW_ERR "Corrupted FR MCE info?\n");
  335. }
  336. static inline void amd_decode_err_code(u16 ec)
  337. {
  338. if (TLB_ERROR(ec)) {
  339. pr_emerg(HW_ERR "Transaction: %s, Cache Level: %s\n",
  340. TT_MSG(ec), LL_MSG(ec));
  341. } else if (MEM_ERROR(ec)) {
  342. pr_emerg(HW_ERR "Transaction: %s, Type: %s, Cache Level: %s\n",
  343. RRRR_MSG(ec), TT_MSG(ec), LL_MSG(ec));
  344. } else if (BUS_ERROR(ec)) {
  345. pr_emerg(HW_ERR "Transaction: %s (%s), %s, Cache Level: %s, "
  346. "Participating Processor: %s\n",
  347. RRRR_MSG(ec), II_MSG(ec), TO_MSG(ec), LL_MSG(ec),
  348. PP_MSG(ec));
  349. } else
  350. pr_emerg(HW_ERR "Huh? Unknown MCE error 0x%x\n", ec);
  351. }
  352. int amd_decode_mce(struct notifier_block *nb, unsigned long val, void *data)
  353. {
  354. struct mce *m = (struct mce *)data;
  355. int node, ecc;
  356. pr_emerg(HW_ERR "MC%d_STATUS: ", m->bank);
  357. pr_cont("%sorrected error, other errors lost: %s, "
  358. "CPU context corrupt: %s",
  359. ((m->status & MCI_STATUS_UC) ? "Unc" : "C"),
  360. ((m->status & MCI_STATUS_OVER) ? "yes" : "no"),
  361. ((m->status & MCI_STATUS_PCC) ? "yes" : "no"));
  362. /* do the two bits[14:13] together */
  363. ecc = (m->status >> 45) & 0x3;
  364. if (ecc)
  365. pr_cont(", %sECC Error", ((ecc == 2) ? "C" : "U"));
  366. pr_cont("\n");
  367. switch (m->bank) {
  368. case 0:
  369. amd_decode_dc_mce(m);
  370. break;
  371. case 1:
  372. amd_decode_ic_mce(m);
  373. break;
  374. case 2:
  375. amd_decode_bu_mce(m);
  376. break;
  377. case 3:
  378. amd_decode_ls_mce(m);
  379. break;
  380. case 4:
  381. node = amd_get_nb_id(m->extcpu);
  382. amd_decode_nb_mce(node, m, 0);
  383. break;
  384. case 5:
  385. amd_decode_fr_mce(m);
  386. break;
  387. default:
  388. break;
  389. }
  390. amd_decode_err_code(m->status & 0xffff);
  391. return NOTIFY_STOP;
  392. }
  393. EXPORT_SYMBOL_GPL(amd_decode_mce);
  394. static struct notifier_block amd_mce_dec_nb = {
  395. .notifier_call = amd_decode_mce,
  396. };
  397. static int __init mce_amd_init(void)
  398. {
  399. /*
  400. * We can decode MCEs for K8, F10h and F11h CPUs:
  401. */
  402. if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
  403. return 0;
  404. if (boot_cpu_data.x86 < 0xf || boot_cpu_data.x86 > 0x11)
  405. return 0;
  406. fam_ops = kzalloc(sizeof(struct amd_decoder_ops), GFP_KERNEL);
  407. if (!fam_ops)
  408. return -ENOMEM;
  409. switch (boot_cpu_data.x86) {
  410. case 0xf:
  411. fam_ops->dc_mce = k8_dc_mce;
  412. fam_ops->ic_mce = k8_ic_mce;
  413. break;
  414. case 0x10:
  415. fam_ops->dc_mce = f10h_dc_mce;
  416. fam_ops->ic_mce = k8_ic_mce;
  417. break;
  418. case 0x14:
  419. fam_ops->dc_mce = f14h_dc_mce;
  420. fam_ops->ic_mce = f14h_ic_mce;
  421. break;
  422. default:
  423. printk(KERN_WARNING "Huh? What family is that: %d?!\n",
  424. boot_cpu_data.x86);
  425. kfree(fam_ops);
  426. return -EINVAL;
  427. }
  428. atomic_notifier_chain_register(&x86_mce_decoder_chain, &amd_mce_dec_nb);
  429. return 0;
  430. }
  431. early_initcall(mce_amd_init);
  432. #ifdef MODULE
  433. static void __exit mce_amd_exit(void)
  434. {
  435. atomic_notifier_chain_unregister(&x86_mce_decoder_chain, &amd_mce_dec_nb);
  436. kfree(fam_ops);
  437. }
  438. MODULE_DESCRIPTION("AMD MCE decoder");
  439. MODULE_ALIAS("edac-mce-amd");
  440. MODULE_LICENSE("GPL");
  441. module_exit(mce_amd_exit);
  442. #endif