edac_mc.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857
  1. /*
  2. * edac_mc kernel module
  3. * (C) 2005, 2006 Linux Networx (http://lnxi.com)
  4. * This file may be distributed under the terms of the
  5. * GNU General Public License.
  6. *
  7. * Written by Thayne Harbaugh
  8. * Based on work by Dan Hollis <goemon at anime dot net> and others.
  9. * http://www.anime.net/~goemon/linux-ecc/
  10. *
  11. * Modified by Dave Peterson and Doug Thompson
  12. *
  13. */
  14. #include <linux/module.h>
  15. #include <linux/proc_fs.h>
  16. #include <linux/kernel.h>
  17. #include <linux/types.h>
  18. #include <linux/smp.h>
  19. #include <linux/init.h>
  20. #include <linux/sysctl.h>
  21. #include <linux/highmem.h>
  22. #include <linux/timer.h>
  23. #include <linux/slab.h>
  24. #include <linux/jiffies.h>
  25. #include <linux/spinlock.h>
  26. #include <linux/list.h>
  27. #include <linux/sysdev.h>
  28. #include <linux/ctype.h>
  29. #include <linux/edac.h>
  30. #include <asm/uaccess.h>
  31. #include <asm/page.h>
  32. #include <asm/edac.h>
  33. #include "edac_core.h"
  34. #include "edac_module.h"
  35. /* lock to memory controller's control array */
  36. static DEFINE_MUTEX(mem_ctls_mutex);
  37. static struct list_head mc_devices = LIST_HEAD_INIT(mc_devices);
  38. #ifdef CONFIG_EDAC_DEBUG
  39. static void edac_mc_dump_channel(struct channel_info *chan)
  40. {
  41. debugf4("\tchannel = %p\n", chan);
  42. debugf4("\tchannel->chan_idx = %d\n", chan->chan_idx);
  43. debugf4("\tchannel->ce_count = %d\n", chan->ce_count);
  44. debugf4("\tchannel->label = '%s'\n", chan->label);
  45. debugf4("\tchannel->csrow = %p\n\n", chan->csrow);
  46. }
  47. static void edac_mc_dump_csrow(struct csrow_info *csrow)
  48. {
  49. debugf4("\tcsrow = %p\n", csrow);
  50. debugf4("\tcsrow->csrow_idx = %d\n", csrow->csrow_idx);
  51. debugf4("\tcsrow->first_page = 0x%lx\n",
  52. csrow->first_page);
  53. debugf4("\tcsrow->last_page = 0x%lx\n", csrow->last_page);
  54. debugf4("\tcsrow->page_mask = 0x%lx\n", csrow->page_mask);
  55. debugf4("\tcsrow->nr_pages = 0x%x\n", csrow->nr_pages);
  56. debugf4("\tcsrow->nr_channels = %d\n",
  57. csrow->nr_channels);
  58. debugf4("\tcsrow->channels = %p\n", csrow->channels);
  59. debugf4("\tcsrow->mci = %p\n\n", csrow->mci);
  60. }
  61. static void edac_mc_dump_mci(struct mem_ctl_info *mci)
  62. {
  63. debugf3("\tmci = %p\n", mci);
  64. debugf3("\tmci->mtype_cap = %lx\n", mci->mtype_cap);
  65. debugf3("\tmci->edac_ctl_cap = %lx\n", mci->edac_ctl_cap);
  66. debugf3("\tmci->edac_cap = %lx\n", mci->edac_cap);
  67. debugf4("\tmci->edac_check = %p\n", mci->edac_check);
  68. debugf3("\tmci->nr_csrows = %d, csrows = %p\n",
  69. mci->nr_csrows, mci->csrows);
  70. debugf3("\tdev = %p\n", mci->dev);
  71. debugf3("\tmod_name:ctl_name = %s:%s\n",
  72. mci->mod_name, mci->ctl_name);
  73. debugf3("\tpvt_info = %p\n\n", mci->pvt_info);
  74. }
  75. #endif /* CONFIG_EDAC_DEBUG */
  76. /* 'ptr' points to a possibly unaligned item X such that sizeof(X) is 'size'.
  77. * Adjust 'ptr' so that its alignment is at least as stringent as what the
  78. * compiler would provide for X and return the aligned result.
  79. *
  80. * If 'size' is a constant, the compiler will optimize this whole function
  81. * down to either a no-op or the addition of a constant to the value of 'ptr'.
  82. */
  83. char * edac_align_ptr(void *ptr, unsigned size)
  84. {
  85. unsigned align, r;
  86. /* Here we assume that the alignment of a "long long" is the most
  87. * stringent alignment that the compiler will ever provide by default.
  88. * As far as I know, this is a reasonable assumption.
  89. */
  90. if (size > sizeof(long))
  91. align = sizeof(long long);
  92. else if (size > sizeof(int))
  93. align = sizeof(long);
  94. else if (size > sizeof(short))
  95. align = sizeof(int);
  96. else if (size > sizeof(char))
  97. align = sizeof(short);
  98. else
  99. return (char *) ptr;
  100. r = size % align;
  101. if (r == 0)
  102. return (char *) ptr;
  103. return (char *) (((unsigned long) ptr) + align - r);
  104. }
  105. /**
  106. * edac_mc_alloc: Allocate a struct mem_ctl_info structure
  107. * @size_pvt: size of private storage needed
  108. * @nr_csrows: Number of CWROWS needed for this MC
  109. * @nr_chans: Number of channels for the MC
  110. *
  111. * Everything is kmalloc'ed as one big chunk - more efficient.
  112. * Only can be used if all structures have the same lifetime - otherwise
  113. * you have to allocate and initialize your own structures.
  114. *
  115. * Use edac_mc_free() to free mc structures allocated by this function.
  116. *
  117. * Returns:
  118. * NULL allocation failed
  119. * struct mem_ctl_info pointer
  120. */
  121. struct mem_ctl_info *edac_mc_alloc(unsigned sz_pvt, unsigned nr_csrows,
  122. unsigned nr_chans)
  123. {
  124. struct mem_ctl_info *mci;
  125. struct csrow_info *csi, *csrow;
  126. struct channel_info *chi, *chp, *chan;
  127. void *pvt;
  128. unsigned size;
  129. int row, chn;
  130. /* Figure out the offsets of the various items from the start of an mc
  131. * structure. We want the alignment of each item to be at least as
  132. * stringent as what the compiler would provide if we could simply
  133. * hardcode everything into a single struct.
  134. */
  135. mci = (struct mem_ctl_info *) 0;
  136. csi = (struct csrow_info *)edac_align_ptr(&mci[1], sizeof(*csi));
  137. chi = (struct channel_info *)
  138. edac_align_ptr(&csi[nr_csrows], sizeof(*chi));
  139. pvt = edac_align_ptr(&chi[nr_chans * nr_csrows], sz_pvt);
  140. size = ((unsigned long) pvt) + sz_pvt;
  141. if ((mci = kmalloc(size, GFP_KERNEL)) == NULL)
  142. return NULL;
  143. /* Adjust pointers so they point within the memory we just allocated
  144. * rather than an imaginary chunk of memory located at address 0.
  145. */
  146. csi = (struct csrow_info *) (((char *) mci) + ((unsigned long) csi));
  147. chi = (struct channel_info *) (((char *) mci) + ((unsigned long) chi));
  148. pvt = sz_pvt ? (((char *) mci) + ((unsigned long) pvt)) : NULL;
  149. memset(mci, 0, size); /* clear all fields */
  150. mci->csrows = csi;
  151. mci->pvt_info = pvt;
  152. mci->nr_csrows = nr_csrows;
  153. for (row = 0; row < nr_csrows; row++) {
  154. csrow = &csi[row];
  155. csrow->csrow_idx = row;
  156. csrow->mci = mci;
  157. csrow->nr_channels = nr_chans;
  158. chp = &chi[row * nr_chans];
  159. csrow->channels = chp;
  160. for (chn = 0; chn < nr_chans; chn++) {
  161. chan = &chp[chn];
  162. chan->chan_idx = chn;
  163. chan->csrow = csrow;
  164. }
  165. }
  166. mci->op_state = OP_ALLOC;
  167. return mci;
  168. }
  169. EXPORT_SYMBOL_GPL(edac_mc_alloc);
  170. /**
  171. * edac_mc_free: Free a previously allocated 'mci' structure
  172. * @mci: pointer to a struct mem_ctl_info structure
  173. */
  174. void edac_mc_free(struct mem_ctl_info *mci)
  175. {
  176. kfree(mci);
  177. }
  178. EXPORT_SYMBOL_GPL(edac_mc_free);
  179. static struct mem_ctl_info *find_mci_by_dev(struct device *dev)
  180. {
  181. struct mem_ctl_info *mci;
  182. struct list_head *item;
  183. debugf3("%s()\n", __func__);
  184. list_for_each(item, &mc_devices) {
  185. mci = list_entry(item, struct mem_ctl_info, link);
  186. if (mci->dev == dev)
  187. return mci;
  188. }
  189. return NULL;
  190. }
  191. /*
  192. * handler for EDAC to check if NMI type handler has asserted interrupt
  193. */
  194. static int edac_mc_assert_error_check_and_clear(void)
  195. {
  196. int vreg;
  197. if(edac_op_state == EDAC_OPSTATE_POLL)
  198. return 1;
  199. vreg = atomic_read(&edac_err_assert);
  200. if(vreg) {
  201. atomic_set(&edac_err_assert, 0);
  202. return 1;
  203. }
  204. return 0;
  205. }
  206. /*
  207. * edac_mc_workq_function
  208. * performs the operation scheduled by a workq request
  209. */
  210. #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20))
  211. static void edac_mc_workq_function(struct work_struct *work_req)
  212. {
  213. struct delayed_work *d_work = (struct delayed_work*) work_req;
  214. struct mem_ctl_info *mci = to_edac_mem_ctl_work(d_work);
  215. #else
  216. static void edac_mc_workq_function(void *ptr)
  217. {
  218. struct mem_ctl_info *mci = (struct mem_ctl_info *) ptr;
  219. #endif
  220. mutex_lock(&mem_ctls_mutex);
  221. /* Only poll controllers that are running polled and have a check */
  222. if (edac_mc_assert_error_check_and_clear() && (mci->edac_check != NULL))
  223. mci->edac_check(mci);
  224. /*
  225. * FIXME: temp place holder for PCI checks,
  226. * goes away when we break out PCI
  227. */
  228. edac_pci_do_parity_check();
  229. mutex_unlock(&mem_ctls_mutex);
  230. /* Reschedule */
  231. queue_delayed_work(edac_workqueue, &mci->work, edac_mc_get_poll_msec());
  232. }
  233. /*
  234. * edac_mc_workq_setup
  235. * initialize a workq item for this mci
  236. * passing in the new delay period in msec
  237. */
  238. void edac_mc_workq_setup(struct mem_ctl_info *mci, unsigned msec)
  239. {
  240. debugf0("%s()\n", __func__);
  241. #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20))
  242. INIT_DELAYED_WORK(&mci->work, edac_mc_workq_function);
  243. #else
  244. INIT_WORK(&mci->work, edac_mc_workq_function, mci);
  245. #endif
  246. queue_delayed_work(edac_workqueue, &mci->work, msecs_to_jiffies(msec));
  247. }
  248. /*
  249. * edac_mc_workq_teardown
  250. * stop the workq processing on this mci
  251. */
  252. void edac_mc_workq_teardown(struct mem_ctl_info *mci)
  253. {
  254. int status;
  255. status = cancel_delayed_work(&mci->work);
  256. if (status == 0) {
  257. /* workq instance might be running, wait for it */
  258. flush_workqueue(edac_workqueue);
  259. }
  260. }
  261. /*
  262. * edac_reset_delay_period
  263. */
  264. void edac_reset_delay_period(struct mem_ctl_info *mci, unsigned long value)
  265. {
  266. mutex_lock(&mem_ctls_mutex);
  267. /* cancel the current workq request */
  268. edac_mc_workq_teardown(mci);
  269. /* restart the workq request, with new delay value */
  270. edac_mc_workq_setup(mci, value);
  271. mutex_unlock(&mem_ctls_mutex);
  272. }
  273. /* Return 0 on success, 1 on failure.
  274. * Before calling this function, caller must
  275. * assign a unique value to mci->mc_idx.
  276. */
  277. static int add_mc_to_global_list (struct mem_ctl_info *mci)
  278. {
  279. struct list_head *item, *insert_before;
  280. struct mem_ctl_info *p;
  281. insert_before = &mc_devices;
  282. if (unlikely((p = find_mci_by_dev(mci->dev)) != NULL))
  283. goto fail0;
  284. list_for_each(item, &mc_devices) {
  285. p = list_entry(item, struct mem_ctl_info, link);
  286. if (p->mc_idx >= mci->mc_idx) {
  287. if (unlikely(p->mc_idx == mci->mc_idx))
  288. goto fail1;
  289. insert_before = item;
  290. break;
  291. }
  292. }
  293. list_add_tail_rcu(&mci->link, insert_before);
  294. atomic_inc(&edac_handlers);
  295. return 0;
  296. fail0:
  297. edac_printk(KERN_WARNING, EDAC_MC,
  298. "%s (%s) %s %s already assigned %d\n", p->dev->bus_id,
  299. dev_name(mci), p->mod_name, p->ctl_name, p->mc_idx);
  300. return 1;
  301. fail1:
  302. edac_printk(KERN_WARNING, EDAC_MC,
  303. "bug in low-level driver: attempt to assign\n"
  304. " duplicate mc_idx %d in %s()\n", p->mc_idx, __func__);
  305. return 1;
  306. }
  307. static void complete_mc_list_del(struct rcu_head *head)
  308. {
  309. struct mem_ctl_info *mci;
  310. mci = container_of(head, struct mem_ctl_info, rcu);
  311. INIT_LIST_HEAD(&mci->link);
  312. complete(&mci->complete);
  313. }
  314. static void del_mc_from_global_list(struct mem_ctl_info *mci)
  315. {
  316. atomic_dec(&edac_handlers);
  317. list_del_rcu(&mci->link);
  318. init_completion(&mci->complete);
  319. call_rcu(&mci->rcu, complete_mc_list_del);
  320. wait_for_completion(&mci->complete);
  321. }
  322. /**
  323. * edac_mc_find: Search for a mem_ctl_info structure whose index is 'idx'.
  324. *
  325. * If found, return a pointer to the structure.
  326. * Else return NULL.
  327. *
  328. * Caller must hold mem_ctls_mutex.
  329. */
  330. struct mem_ctl_info * edac_mc_find(int idx)
  331. {
  332. struct list_head *item;
  333. struct mem_ctl_info *mci;
  334. list_for_each(item, &mc_devices) {
  335. mci = list_entry(item, struct mem_ctl_info, link);
  336. if (mci->mc_idx >= idx) {
  337. if (mci->mc_idx == idx)
  338. return mci;
  339. break;
  340. }
  341. }
  342. return NULL;
  343. }
  344. EXPORT_SYMBOL(edac_mc_find);
  345. /**
  346. * edac_mc_add_mc: Insert the 'mci' structure into the mci global list and
  347. * create sysfs entries associated with mci structure
  348. * @mci: pointer to the mci structure to be added to the list
  349. * @mc_idx: A unique numeric identifier to be assigned to the 'mci' structure.
  350. *
  351. * Return:
  352. * 0 Success
  353. * !0 Failure
  354. */
  355. /* FIXME - should a warning be printed if no error detection? correction? */
  356. int edac_mc_add_mc(struct mem_ctl_info *mci, int mc_idx)
  357. {
  358. debugf0("%s()\n", __func__);
  359. mci->mc_idx = mc_idx;
  360. #ifdef CONFIG_EDAC_DEBUG
  361. if (edac_debug_level >= 3)
  362. edac_mc_dump_mci(mci);
  363. if (edac_debug_level >= 4) {
  364. int i;
  365. for (i = 0; i < mci->nr_csrows; i++) {
  366. int j;
  367. edac_mc_dump_csrow(&mci->csrows[i]);
  368. for (j = 0; j < mci->csrows[i].nr_channels; j++)
  369. edac_mc_dump_channel(
  370. &mci->csrows[i].channels[j]);
  371. }
  372. }
  373. #endif
  374. mutex_lock(&mem_ctls_mutex);
  375. if (add_mc_to_global_list(mci))
  376. goto fail0;
  377. /* set load time so that error rate can be tracked */
  378. mci->start_time = jiffies;
  379. if (edac_create_sysfs_mci_device(mci)) {
  380. edac_mc_printk(mci, KERN_WARNING,
  381. "failed to create sysfs device\n");
  382. goto fail1;
  383. }
  384. /* If there IS a check routine, then we are running POLLED */
  385. if (mci->edac_check != NULL) {
  386. /* This instance is NOW RUNNING */
  387. mci->op_state = OP_RUNNING_POLL;
  388. edac_mc_workq_setup(mci, edac_mc_get_poll_msec());
  389. } else {
  390. mci->op_state = OP_RUNNING_INTERRUPT;
  391. }
  392. /* Report action taken */
  393. edac_mc_printk(mci, KERN_INFO, "Giving out device to %s %s: DEV %s\n",
  394. mci->mod_name, mci->ctl_name, dev_name(mci));
  395. mutex_unlock(&mem_ctls_mutex);
  396. return 0;
  397. fail1:
  398. del_mc_from_global_list(mci);
  399. fail0:
  400. mutex_unlock(&mem_ctls_mutex);
  401. return 1;
  402. }
  403. EXPORT_SYMBOL_GPL(edac_mc_add_mc);
  404. /**
  405. * edac_mc_del_mc: Remove sysfs entries for specified mci structure and
  406. * remove mci structure from global list
  407. * @pdev: Pointer to 'struct device' representing mci structure to remove.
  408. *
  409. * Return pointer to removed mci structure, or NULL if device not found.
  410. */
  411. struct mem_ctl_info * edac_mc_del_mc(struct device *dev)
  412. {
  413. struct mem_ctl_info *mci;
  414. debugf0("MC: %s()\n", __func__);
  415. mutex_lock(&mem_ctls_mutex);
  416. if ((mci = find_mci_by_dev(dev)) == NULL) {
  417. mutex_unlock(&mem_ctls_mutex);
  418. return NULL;
  419. }
  420. /* marking MCI offline */
  421. mci->op_state = OP_OFFLINE;
  422. /* flush workq processes */
  423. edac_mc_workq_teardown(mci);
  424. edac_remove_sysfs_mci_device(mci);
  425. del_mc_from_global_list(mci);
  426. mutex_unlock(&mem_ctls_mutex);
  427. edac_printk(KERN_INFO, EDAC_MC,
  428. "Removed device %d for %s %s: DEV %s\n", mci->mc_idx,
  429. mci->mod_name, mci->ctl_name, dev_name(mci));
  430. return mci;
  431. }
  432. EXPORT_SYMBOL_GPL(edac_mc_del_mc);
  433. static void edac_mc_scrub_block(unsigned long page, unsigned long offset,
  434. u32 size)
  435. {
  436. struct page *pg;
  437. void *virt_addr;
  438. unsigned long flags = 0;
  439. debugf3("%s()\n", __func__);
  440. /* ECC error page was not in our memory. Ignore it. */
  441. if(!pfn_valid(page))
  442. return;
  443. /* Find the actual page structure then map it and fix */
  444. pg = pfn_to_page(page);
  445. if (PageHighMem(pg))
  446. local_irq_save(flags);
  447. virt_addr = kmap_atomic(pg, KM_BOUNCE_READ);
  448. /* Perform architecture specific atomic scrub operation */
  449. atomic_scrub(virt_addr + offset, size);
  450. /* Unmap and complete */
  451. kunmap_atomic(virt_addr, KM_BOUNCE_READ);
  452. if (PageHighMem(pg))
  453. local_irq_restore(flags);
  454. }
  455. /* FIXME - should return -1 */
  456. int edac_mc_find_csrow_by_page(struct mem_ctl_info *mci, unsigned long page)
  457. {
  458. struct csrow_info *csrows = mci->csrows;
  459. int row, i;
  460. debugf1("MC%d: %s(): 0x%lx\n", mci->mc_idx, __func__, page);
  461. row = -1;
  462. for (i = 0; i < mci->nr_csrows; i++) {
  463. struct csrow_info *csrow = &csrows[i];
  464. if (csrow->nr_pages == 0)
  465. continue;
  466. debugf3("MC%d: %s(): first(0x%lx) page(0x%lx) last(0x%lx) "
  467. "mask(0x%lx)\n", mci->mc_idx, __func__,
  468. csrow->first_page, page, csrow->last_page,
  469. csrow->page_mask);
  470. if ((page >= csrow->first_page) &&
  471. (page <= csrow->last_page) &&
  472. ((page & csrow->page_mask) ==
  473. (csrow->first_page & csrow->page_mask))) {
  474. row = i;
  475. break;
  476. }
  477. }
  478. if (row == -1)
  479. edac_mc_printk(mci, KERN_ERR,
  480. "could not look up page error address %lx\n",
  481. (unsigned long) page);
  482. return row;
  483. }
  484. EXPORT_SYMBOL_GPL(edac_mc_find_csrow_by_page);
  485. /* FIXME - setable log (warning/emerg) levels */
  486. /* FIXME - integrate with evlog: http://evlog.sourceforge.net/ */
  487. void edac_mc_handle_ce(struct mem_ctl_info *mci,
  488. unsigned long page_frame_number, unsigned long offset_in_page,
  489. unsigned long syndrome, int row, int channel, const char *msg)
  490. {
  491. unsigned long remapped_page;
  492. debugf3("MC%d: %s()\n", mci->mc_idx, __func__);
  493. /* FIXME - maybe make panic on INTERNAL ERROR an option */
  494. if (row >= mci->nr_csrows || row < 0) {
  495. /* something is wrong */
  496. edac_mc_printk(mci, KERN_ERR,
  497. "INTERNAL ERROR: row out of range "
  498. "(%d >= %d)\n", row, mci->nr_csrows);
  499. edac_mc_handle_ce_no_info(mci, "INTERNAL ERROR");
  500. return;
  501. }
  502. if (channel >= mci->csrows[row].nr_channels || channel < 0) {
  503. /* something is wrong */
  504. edac_mc_printk(mci, KERN_ERR,
  505. "INTERNAL ERROR: channel out of range "
  506. "(%d >= %d)\n", channel,
  507. mci->csrows[row].nr_channels);
  508. edac_mc_handle_ce_no_info(mci, "INTERNAL ERROR");
  509. return;
  510. }
  511. if (edac_get_log_ce())
  512. /* FIXME - put in DIMM location */
  513. edac_mc_printk(mci, KERN_WARNING,
  514. "CE page 0x%lx, offset 0x%lx, grain %d, syndrome "
  515. "0x%lx, row %d, channel %d, label \"%s\": %s\n",
  516. page_frame_number, offset_in_page,
  517. mci->csrows[row].grain, syndrome, row, channel,
  518. mci->csrows[row].channels[channel].label, msg);
  519. mci->ce_count++;
  520. mci->csrows[row].ce_count++;
  521. mci->csrows[row].channels[channel].ce_count++;
  522. if (mci->scrub_mode & SCRUB_SW_SRC) {
  523. /*
  524. * Some MC's can remap memory so that it is still available
  525. * at a different address when PCI devices map into memory.
  526. * MC's that can't do this lose the memory where PCI devices
  527. * are mapped. This mapping is MC dependant and so we call
  528. * back into the MC driver for it to map the MC page to
  529. * a physical (CPU) page which can then be mapped to a virtual
  530. * page - which can then be scrubbed.
  531. */
  532. remapped_page = mci->ctl_page_to_phys ?
  533. mci->ctl_page_to_phys(mci, page_frame_number) :
  534. page_frame_number;
  535. edac_mc_scrub_block(remapped_page, offset_in_page,
  536. mci->csrows[row].grain);
  537. }
  538. }
  539. EXPORT_SYMBOL_GPL(edac_mc_handle_ce);
  540. void edac_mc_handle_ce_no_info(struct mem_ctl_info *mci, const char *msg)
  541. {
  542. if (edac_get_log_ce())
  543. edac_mc_printk(mci, KERN_WARNING,
  544. "CE - no information available: %s\n", msg);
  545. mci->ce_noinfo_count++;
  546. mci->ce_count++;
  547. }
  548. EXPORT_SYMBOL_GPL(edac_mc_handle_ce_no_info);
  549. void edac_mc_handle_ue(struct mem_ctl_info *mci,
  550. unsigned long page_frame_number, unsigned long offset_in_page,
  551. int row, const char *msg)
  552. {
  553. int len = EDAC_MC_LABEL_LEN * 4;
  554. char labels[len + 1];
  555. char *pos = labels;
  556. int chan;
  557. int chars;
  558. debugf3("MC%d: %s()\n", mci->mc_idx, __func__);
  559. /* FIXME - maybe make panic on INTERNAL ERROR an option */
  560. if (row >= mci->nr_csrows || row < 0) {
  561. /* something is wrong */
  562. edac_mc_printk(mci, KERN_ERR,
  563. "INTERNAL ERROR: row out of range "
  564. "(%d >= %d)\n", row, mci->nr_csrows);
  565. edac_mc_handle_ue_no_info(mci, "INTERNAL ERROR");
  566. return;
  567. }
  568. chars = snprintf(pos, len + 1, "%s",
  569. mci->csrows[row].channels[0].label);
  570. len -= chars;
  571. pos += chars;
  572. for (chan = 1; (chan < mci->csrows[row].nr_channels) && (len > 0);
  573. chan++) {
  574. chars = snprintf(pos, len + 1, ":%s",
  575. mci->csrows[row].channels[chan].label);
  576. len -= chars;
  577. pos += chars;
  578. }
  579. if (edac_get_log_ue())
  580. edac_mc_printk(mci, KERN_EMERG,
  581. "UE page 0x%lx, offset 0x%lx, grain %d, row %d, "
  582. "labels \"%s\": %s\n", page_frame_number,
  583. offset_in_page, mci->csrows[row].grain, row, labels,
  584. msg);
  585. if (edac_get_panic_on_ue())
  586. panic("EDAC MC%d: UE page 0x%lx, offset 0x%lx, grain %d, "
  587. "row %d, labels \"%s\": %s\n", mci->mc_idx,
  588. page_frame_number, offset_in_page,
  589. mci->csrows[row].grain, row, labels, msg);
  590. mci->ue_count++;
  591. mci->csrows[row].ue_count++;
  592. }
  593. EXPORT_SYMBOL_GPL(edac_mc_handle_ue);
  594. void edac_mc_handle_ue_no_info(struct mem_ctl_info *mci, const char *msg)
  595. {
  596. if (edac_get_panic_on_ue())
  597. panic("EDAC MC%d: Uncorrected Error", mci->mc_idx);
  598. if (edac_get_log_ue())
  599. edac_mc_printk(mci, KERN_WARNING,
  600. "UE - no information available: %s\n", msg);
  601. mci->ue_noinfo_count++;
  602. mci->ue_count++;
  603. }
  604. EXPORT_SYMBOL_GPL(edac_mc_handle_ue_no_info);
  605. /*************************************************************
  606. * On Fully Buffered DIMM modules, this help function is
  607. * called to process UE events
  608. */
  609. void edac_mc_handle_fbd_ue(struct mem_ctl_info *mci,
  610. unsigned int csrow,
  611. unsigned int channela,
  612. unsigned int channelb,
  613. char *msg)
  614. {
  615. int len = EDAC_MC_LABEL_LEN * 4;
  616. char labels[len + 1];
  617. char *pos = labels;
  618. int chars;
  619. if (csrow >= mci->nr_csrows) {
  620. /* something is wrong */
  621. edac_mc_printk(mci, KERN_ERR,
  622. "INTERNAL ERROR: row out of range (%d >= %d)\n",
  623. csrow, mci->nr_csrows);
  624. edac_mc_handle_ue_no_info(mci, "INTERNAL ERROR");
  625. return;
  626. }
  627. if (channela >= mci->csrows[csrow].nr_channels) {
  628. /* something is wrong */
  629. edac_mc_printk(mci, KERN_ERR,
  630. "INTERNAL ERROR: channel-a out of range "
  631. "(%d >= %d)\n",
  632. channela, mci->csrows[csrow].nr_channels);
  633. edac_mc_handle_ue_no_info(mci, "INTERNAL ERROR");
  634. return;
  635. }
  636. if (channelb >= mci->csrows[csrow].nr_channels) {
  637. /* something is wrong */
  638. edac_mc_printk(mci, KERN_ERR,
  639. "INTERNAL ERROR: channel-b out of range "
  640. "(%d >= %d)\n",
  641. channelb, mci->csrows[csrow].nr_channels);
  642. edac_mc_handle_ue_no_info(mci, "INTERNAL ERROR");
  643. return;
  644. }
  645. mci->ue_count++;
  646. mci->csrows[csrow].ue_count++;
  647. /* Generate the DIMM labels from the specified channels */
  648. chars = snprintf(pos, len + 1, "%s",
  649. mci->csrows[csrow].channels[channela].label);
  650. len -= chars; pos += chars;
  651. chars = snprintf(pos, len + 1, "-%s",
  652. mci->csrows[csrow].channels[channelb].label);
  653. if (edac_get_log_ue())
  654. edac_mc_printk(mci, KERN_EMERG,
  655. "UE row %d, channel-a= %d channel-b= %d "
  656. "labels \"%s\": %s\n", csrow, channela, channelb,
  657. labels, msg);
  658. if (edac_get_panic_on_ue())
  659. panic("UE row %d, channel-a= %d channel-b= %d "
  660. "labels \"%s\": %s\n", csrow, channela,
  661. channelb, labels, msg);
  662. }
  663. EXPORT_SYMBOL(edac_mc_handle_fbd_ue);
  664. /*************************************************************
  665. * On Fully Buffered DIMM modules, this help function is
  666. * called to process CE events
  667. */
  668. void edac_mc_handle_fbd_ce(struct mem_ctl_info *mci,
  669. unsigned int csrow,
  670. unsigned int channel,
  671. char *msg)
  672. {
  673. /* Ensure boundary values */
  674. if (csrow >= mci->nr_csrows) {
  675. /* something is wrong */
  676. edac_mc_printk(mci, KERN_ERR,
  677. "INTERNAL ERROR: row out of range (%d >= %d)\n",
  678. csrow, mci->nr_csrows);
  679. edac_mc_handle_ce_no_info(mci, "INTERNAL ERROR");
  680. return;
  681. }
  682. if (channel >= mci->csrows[csrow].nr_channels) {
  683. /* something is wrong */
  684. edac_mc_printk(mci, KERN_ERR,
  685. "INTERNAL ERROR: channel out of range (%d >= %d)\n",
  686. channel, mci->csrows[csrow].nr_channels);
  687. edac_mc_handle_ce_no_info(mci, "INTERNAL ERROR");
  688. return;
  689. }
  690. if (edac_get_log_ce())
  691. /* FIXME - put in DIMM location */
  692. edac_mc_printk(mci, KERN_WARNING,
  693. "CE row %d, channel %d, label \"%s\": %s\n",
  694. csrow, channel,
  695. mci->csrows[csrow].channels[channel].label,
  696. msg);
  697. mci->ce_count++;
  698. mci->csrows[csrow].ce_count++;
  699. mci->csrows[csrow].channels[channel].ce_count++;
  700. }
  701. EXPORT_SYMBOL(edac_mc_handle_fbd_ce);
  702. /*
  703. * Iterate over all MC instances and check for ECC, et al, errors
  704. */
  705. void edac_check_mc_devices(void)
  706. {
  707. struct list_head *item;
  708. struct mem_ctl_info *mci;
  709. debugf3("%s()\n", __func__);
  710. mutex_lock(&mem_ctls_mutex);
  711. list_for_each(item, &mc_devices) {
  712. mci = list_entry(item, struct mem_ctl_info, link);
  713. if (mci->edac_check != NULL)
  714. mci->edac_check(mci);
  715. }
  716. mutex_unlock(&mem_ctls_mutex);
  717. }