x38_edac.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532
  1. /*
  2. * Intel X38 Memory Controller kernel module
  3. * Copyright (C) 2008 Cluster Computing, Inc.
  4. *
  5. * This file may be distributed under the terms of the
  6. * GNU General Public License.
  7. *
  8. * This file is based on i3200_edac.c
  9. *
  10. */
  11. #include <linux/module.h>
  12. #include <linux/init.h>
  13. #include <linux/pci.h>
  14. #include <linux/pci_ids.h>
  15. #include <linux/edac.h>
  16. #include "edac_core.h"
  17. #define X38_REVISION "1.1"
  18. #define EDAC_MOD_STR "x38_edac"
  19. #define PCI_DEVICE_ID_INTEL_X38_HB 0x29e0
  20. #define X38_RANKS 8
  21. #define X38_RANKS_PER_CHANNEL 4
  22. #define X38_CHANNELS 2
  23. /* Intel X38 register addresses - device 0 function 0 - DRAM Controller */
  24. #define X38_MCHBAR_LOW 0x48 /* MCH Memory Mapped Register BAR */
  25. #define X38_MCHBAR_HIGH 0x4c
  26. #define X38_MCHBAR_MASK 0xfffffc000ULL /* bits 35:14 */
  27. #define X38_MMR_WINDOW_SIZE 16384
  28. #define X38_TOM 0xa0 /* Top of Memory (16b)
  29. *
  30. * 15:10 reserved
  31. * 9:0 total populated physical memory
  32. */
  33. #define X38_TOM_MASK 0x3ff /* bits 9:0 */
  34. #define X38_TOM_SHIFT 26 /* 64MiB grain */
  35. #define X38_ERRSTS 0xc8 /* Error Status Register (16b)
  36. *
  37. * 15 reserved
  38. * 14 Isochronous TBWRR Run Behind FIFO Full
  39. * (ITCV)
  40. * 13 Isochronous TBWRR Run Behind FIFO Put
  41. * (ITSTV)
  42. * 12 reserved
  43. * 11 MCH Thermal Sensor Event
  44. * for SMI/SCI/SERR (GTSE)
  45. * 10 reserved
  46. * 9 LOCK to non-DRAM Memory Flag (LCKF)
  47. * 8 reserved
  48. * 7 DRAM Throttle Flag (DTF)
  49. * 6:2 reserved
  50. * 1 Multi-bit DRAM ECC Error Flag (DMERR)
  51. * 0 Single-bit DRAM ECC Error Flag (DSERR)
  52. */
  53. #define X38_ERRSTS_UE 0x0002
  54. #define X38_ERRSTS_CE 0x0001
  55. #define X38_ERRSTS_BITS (X38_ERRSTS_UE | X38_ERRSTS_CE)
  56. /* Intel MMIO register space - device 0 function 0 - MMR space */
  57. #define X38_C0DRB 0x200 /* Channel 0 DRAM Rank Boundary (16b x 4)
  58. *
  59. * 15:10 reserved
  60. * 9:0 Channel 0 DRAM Rank Boundary Address
  61. */
  62. #define X38_C1DRB 0x600 /* Channel 1 DRAM Rank Boundary (16b x 4) */
  63. #define X38_DRB_MASK 0x3ff /* bits 9:0 */
  64. #define X38_DRB_SHIFT 26 /* 64MiB grain */
  65. #define X38_C0ECCERRLOG 0x280 /* Channel 0 ECC Error Log (64b)
  66. *
  67. * 63:48 Error Column Address (ERRCOL)
  68. * 47:32 Error Row Address (ERRROW)
  69. * 31:29 Error Bank Address (ERRBANK)
  70. * 28:27 Error Rank Address (ERRRANK)
  71. * 26:24 reserved
  72. * 23:16 Error Syndrome (ERRSYND)
  73. * 15: 2 reserved
  74. * 1 Multiple Bit Error Status (MERRSTS)
  75. * 0 Correctable Error Status (CERRSTS)
  76. */
  77. #define X38_C1ECCERRLOG 0x680 /* Channel 1 ECC Error Log (64b) */
  78. #define X38_ECCERRLOG_CE 0x1
  79. #define X38_ECCERRLOG_UE 0x2
  80. #define X38_ECCERRLOG_RANK_BITS 0x18000000
  81. #define X38_ECCERRLOG_SYNDROME_BITS 0xff0000
  82. #define X38_CAPID0 0xe0 /* see P.94 of spec for details */
  83. static int x38_channel_num;
  84. static int how_many_channel(struct pci_dev *pdev)
  85. {
  86. unsigned char capid0_8b; /* 8th byte of CAPID0 */
  87. pci_read_config_byte(pdev, X38_CAPID0 + 8, &capid0_8b);
  88. if (capid0_8b & 0x20) { /* check DCD: Dual Channel Disable */
  89. edac_dbg(0, "In single channel mode\n");
  90. x38_channel_num = 1;
  91. } else {
  92. edac_dbg(0, "In dual channel mode\n");
  93. x38_channel_num = 2;
  94. }
  95. return x38_channel_num;
  96. }
  97. static unsigned long eccerrlog_syndrome(u64 log)
  98. {
  99. return (log & X38_ECCERRLOG_SYNDROME_BITS) >> 16;
  100. }
  101. static int eccerrlog_row(int channel, u64 log)
  102. {
  103. return ((log & X38_ECCERRLOG_RANK_BITS) >> 27) |
  104. (channel * X38_RANKS_PER_CHANNEL);
  105. }
  106. enum x38_chips {
  107. X38 = 0,
  108. };
  109. struct x38_dev_info {
  110. const char *ctl_name;
  111. };
  112. struct x38_error_info {
  113. u16 errsts;
  114. u16 errsts2;
  115. u64 eccerrlog[X38_CHANNELS];
  116. };
  117. static const struct x38_dev_info x38_devs[] = {
  118. [X38] = {
  119. .ctl_name = "x38"},
  120. };
  121. static struct pci_dev *mci_pdev;
  122. static int x38_registered = 1;
  123. static void x38_clear_error_info(struct mem_ctl_info *mci)
  124. {
  125. struct pci_dev *pdev;
  126. pdev = to_pci_dev(mci->pdev);
  127. /*
  128. * Clear any error bits.
  129. * (Yes, we really clear bits by writing 1 to them.)
  130. */
  131. pci_write_bits16(pdev, X38_ERRSTS, X38_ERRSTS_BITS,
  132. X38_ERRSTS_BITS);
  133. }
  134. static u64 x38_readq(const void __iomem *addr)
  135. {
  136. return readl(addr) | (((u64)readl(addr + 4)) << 32);
  137. }
  138. static void x38_get_and_clear_error_info(struct mem_ctl_info *mci,
  139. struct x38_error_info *info)
  140. {
  141. struct pci_dev *pdev;
  142. void __iomem *window = mci->pvt_info;
  143. pdev = to_pci_dev(mci->pdev);
  144. /*
  145. * This is a mess because there is no atomic way to read all the
  146. * registers at once and the registers can transition from CE being
  147. * overwritten by UE.
  148. */
  149. pci_read_config_word(pdev, X38_ERRSTS, &info->errsts);
  150. if (!(info->errsts & X38_ERRSTS_BITS))
  151. return;
  152. info->eccerrlog[0] = x38_readq(window + X38_C0ECCERRLOG);
  153. if (x38_channel_num == 2)
  154. info->eccerrlog[1] = x38_readq(window + X38_C1ECCERRLOG);
  155. pci_read_config_word(pdev, X38_ERRSTS, &info->errsts2);
  156. /*
  157. * If the error is the same for both reads then the first set
  158. * of reads is valid. If there is a change then there is a CE
  159. * with no info and the second set of reads is valid and
  160. * should be UE info.
  161. */
  162. if ((info->errsts ^ info->errsts2) & X38_ERRSTS_BITS) {
  163. info->eccerrlog[0] = x38_readq(window + X38_C0ECCERRLOG);
  164. if (x38_channel_num == 2)
  165. info->eccerrlog[1] =
  166. x38_readq(window + X38_C1ECCERRLOG);
  167. }
  168. x38_clear_error_info(mci);
  169. }
  170. static void x38_process_error_info(struct mem_ctl_info *mci,
  171. struct x38_error_info *info)
  172. {
  173. int channel;
  174. u64 log;
  175. if (!(info->errsts & X38_ERRSTS_BITS))
  176. return;
  177. if ((info->errsts ^ info->errsts2) & X38_ERRSTS_BITS) {
  178. edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, 0, 0, 0,
  179. -1, -1, -1,
  180. "UE overwrote CE", "");
  181. info->errsts = info->errsts2;
  182. }
  183. for (channel = 0; channel < x38_channel_num; channel++) {
  184. log = info->eccerrlog[channel];
  185. if (log & X38_ECCERRLOG_UE) {
  186. edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
  187. 0, 0, 0,
  188. eccerrlog_row(channel, log),
  189. -1, -1,
  190. "x38 UE", "");
  191. } else if (log & X38_ECCERRLOG_CE) {
  192. edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
  193. 0, 0, eccerrlog_syndrome(log),
  194. eccerrlog_row(channel, log),
  195. -1, -1,
  196. "x38 CE", "");
  197. }
  198. }
  199. }
  200. static void x38_check(struct mem_ctl_info *mci)
  201. {
  202. struct x38_error_info info;
  203. edac_dbg(1, "MC%d\n", mci->mc_idx);
  204. x38_get_and_clear_error_info(mci, &info);
  205. x38_process_error_info(mci, &info);
  206. }
  207. void __iomem *x38_map_mchbar(struct pci_dev *pdev)
  208. {
  209. union {
  210. u64 mchbar;
  211. struct {
  212. u32 mchbar_low;
  213. u32 mchbar_high;
  214. };
  215. } u;
  216. void __iomem *window;
  217. pci_read_config_dword(pdev, X38_MCHBAR_LOW, &u.mchbar_low);
  218. pci_write_config_dword(pdev, X38_MCHBAR_LOW, u.mchbar_low | 0x1);
  219. pci_read_config_dword(pdev, X38_MCHBAR_HIGH, &u.mchbar_high);
  220. u.mchbar &= X38_MCHBAR_MASK;
  221. if (u.mchbar != (resource_size_t)u.mchbar) {
  222. printk(KERN_ERR
  223. "x38: mmio space beyond accessible range (0x%llx)\n",
  224. (unsigned long long)u.mchbar);
  225. return NULL;
  226. }
  227. window = ioremap_nocache(u.mchbar, X38_MMR_WINDOW_SIZE);
  228. if (!window)
  229. printk(KERN_ERR "x38: cannot map mmio space at 0x%llx\n",
  230. (unsigned long long)u.mchbar);
  231. return window;
  232. }
  233. static void x38_get_drbs(void __iomem *window,
  234. u16 drbs[X38_CHANNELS][X38_RANKS_PER_CHANNEL])
  235. {
  236. int i;
  237. for (i = 0; i < X38_RANKS_PER_CHANNEL; i++) {
  238. drbs[0][i] = readw(window + X38_C0DRB + 2*i) & X38_DRB_MASK;
  239. drbs[1][i] = readw(window + X38_C1DRB + 2*i) & X38_DRB_MASK;
  240. }
  241. }
  242. static bool x38_is_stacked(struct pci_dev *pdev,
  243. u16 drbs[X38_CHANNELS][X38_RANKS_PER_CHANNEL])
  244. {
  245. u16 tom;
  246. pci_read_config_word(pdev, X38_TOM, &tom);
  247. tom &= X38_TOM_MASK;
  248. return drbs[X38_CHANNELS - 1][X38_RANKS_PER_CHANNEL - 1] == tom;
  249. }
  250. static unsigned long drb_to_nr_pages(
  251. u16 drbs[X38_CHANNELS][X38_RANKS_PER_CHANNEL],
  252. bool stacked, int channel, int rank)
  253. {
  254. int n;
  255. n = drbs[channel][rank];
  256. if (rank > 0)
  257. n -= drbs[channel][rank - 1];
  258. if (stacked && (channel == 1) && drbs[channel][rank] ==
  259. drbs[channel][X38_RANKS_PER_CHANNEL - 1]) {
  260. n -= drbs[0][X38_RANKS_PER_CHANNEL - 1];
  261. }
  262. n <<= (X38_DRB_SHIFT - PAGE_SHIFT);
  263. return n;
  264. }
  265. static int x38_probe1(struct pci_dev *pdev, int dev_idx)
  266. {
  267. int rc;
  268. int i, j;
  269. struct mem_ctl_info *mci = NULL;
  270. struct edac_mc_layer layers[2];
  271. u16 drbs[X38_CHANNELS][X38_RANKS_PER_CHANNEL];
  272. bool stacked;
  273. void __iomem *window;
  274. edac_dbg(0, "MC:\n");
  275. window = x38_map_mchbar(pdev);
  276. if (!window)
  277. return -ENODEV;
  278. x38_get_drbs(window, drbs);
  279. how_many_channel(pdev);
  280. /* FIXME: unconventional pvt_info usage */
  281. layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
  282. layers[0].size = X38_RANKS;
  283. layers[0].is_virt_csrow = true;
  284. layers[1].type = EDAC_MC_LAYER_CHANNEL;
  285. layers[1].size = x38_channel_num;
  286. layers[1].is_virt_csrow = false;
  287. mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, 0);
  288. if (!mci)
  289. return -ENOMEM;
  290. edac_dbg(3, "MC: init mci\n");
  291. mci->pdev = &pdev->dev;
  292. mci->mtype_cap = MEM_FLAG_DDR2;
  293. mci->edac_ctl_cap = EDAC_FLAG_SECDED;
  294. mci->edac_cap = EDAC_FLAG_SECDED;
  295. mci->mod_name = EDAC_MOD_STR;
  296. mci->mod_ver = X38_REVISION;
  297. mci->ctl_name = x38_devs[dev_idx].ctl_name;
  298. mci->dev_name = pci_name(pdev);
  299. mci->edac_check = x38_check;
  300. mci->ctl_page_to_phys = NULL;
  301. mci->pvt_info = window;
  302. stacked = x38_is_stacked(pdev, drbs);
  303. /*
  304. * The dram rank boundary (DRB) reg values are boundary addresses
  305. * for each DRAM rank with a granularity of 64MB. DRB regs are
  306. * cumulative; the last one will contain the total memory
  307. * contained in all ranks.
  308. */
  309. for (i = 0; i < mci->nr_csrows; i++) {
  310. unsigned long nr_pages;
  311. struct csrow_info *csrow = mci->csrows[i];
  312. nr_pages = drb_to_nr_pages(drbs, stacked,
  313. i / X38_RANKS_PER_CHANNEL,
  314. i % X38_RANKS_PER_CHANNEL);
  315. if (nr_pages == 0)
  316. continue;
  317. for (j = 0; j < x38_channel_num; j++) {
  318. struct dimm_info *dimm = csrow->channels[j]->dimm;
  319. dimm->nr_pages = nr_pages / x38_channel_num;
  320. dimm->grain = nr_pages << PAGE_SHIFT;
  321. dimm->mtype = MEM_DDR2;
  322. dimm->dtype = DEV_UNKNOWN;
  323. dimm->edac_mode = EDAC_UNKNOWN;
  324. }
  325. }
  326. x38_clear_error_info(mci);
  327. rc = -ENODEV;
  328. if (edac_mc_add_mc(mci)) {
  329. edac_dbg(3, "MC: failed edac_mc_add_mc()\n");
  330. goto fail;
  331. }
  332. /* get this far and it's successful */
  333. edac_dbg(3, "MC: success\n");
  334. return 0;
  335. fail:
  336. iounmap(window);
  337. if (mci)
  338. edac_mc_free(mci);
  339. return rc;
  340. }
  341. static int x38_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
  342. {
  343. int rc;
  344. edac_dbg(0, "MC:\n");
  345. if (pci_enable_device(pdev) < 0)
  346. return -EIO;
  347. rc = x38_probe1(pdev, ent->driver_data);
  348. if (!mci_pdev)
  349. mci_pdev = pci_dev_get(pdev);
  350. return rc;
  351. }
  352. static void x38_remove_one(struct pci_dev *pdev)
  353. {
  354. struct mem_ctl_info *mci;
  355. edac_dbg(0, "\n");
  356. mci = edac_mc_del_mc(&pdev->dev);
  357. if (!mci)
  358. return;
  359. iounmap(mci->pvt_info);
  360. edac_mc_free(mci);
  361. }
  362. static DEFINE_PCI_DEVICE_TABLE(x38_pci_tbl) = {
  363. {
  364. PCI_VEND_DEV(INTEL, X38_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
  365. X38},
  366. {
  367. 0,
  368. } /* 0 terminated list. */
  369. };
  370. MODULE_DEVICE_TABLE(pci, x38_pci_tbl);
  371. static struct pci_driver x38_driver = {
  372. .name = EDAC_MOD_STR,
  373. .probe = x38_init_one,
  374. .remove = x38_remove_one,
  375. .id_table = x38_pci_tbl,
  376. };
  377. static int __init x38_init(void)
  378. {
  379. int pci_rc;
  380. edac_dbg(3, "MC:\n");
  381. /* Ensure that the OPSTATE is set correctly for POLL or NMI */
  382. opstate_init();
  383. pci_rc = pci_register_driver(&x38_driver);
  384. if (pci_rc < 0)
  385. goto fail0;
  386. if (!mci_pdev) {
  387. x38_registered = 0;
  388. mci_pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
  389. PCI_DEVICE_ID_INTEL_X38_HB, NULL);
  390. if (!mci_pdev) {
  391. edac_dbg(0, "x38 pci_get_device fail\n");
  392. pci_rc = -ENODEV;
  393. goto fail1;
  394. }
  395. pci_rc = x38_init_one(mci_pdev, x38_pci_tbl);
  396. if (pci_rc < 0) {
  397. edac_dbg(0, "x38 init fail\n");
  398. pci_rc = -ENODEV;
  399. goto fail1;
  400. }
  401. }
  402. return 0;
  403. fail1:
  404. pci_unregister_driver(&x38_driver);
  405. fail0:
  406. if (mci_pdev)
  407. pci_dev_put(mci_pdev);
  408. return pci_rc;
  409. }
  410. static void __exit x38_exit(void)
  411. {
  412. edac_dbg(3, "MC:\n");
  413. pci_unregister_driver(&x38_driver);
  414. if (!x38_registered) {
  415. x38_remove_one(mci_pdev);
  416. pci_dev_put(mci_pdev);
  417. }
  418. }
  419. module_init(x38_init);
  420. module_exit(x38_exit);
  421. MODULE_LICENSE("GPL");
  422. MODULE_AUTHOR("Cluster Computing, Inc. Hitoshi Mitake");
  423. MODULE_DESCRIPTION("MC support for Intel X38 memory hub controllers");
  424. module_param(edac_op_state, int, 0444);
  425. MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");