edac_mc.c 33 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301
  1. /*
  2. * edac_mc kernel module
  3. * (C) 2005, 2006 Linux Networx (http://lnxi.com)
  4. * This file may be distributed under the terms of the
  5. * GNU General Public License.
  6. *
  7. * Written by Thayne Harbaugh
  8. * Based on work by Dan Hollis <goemon at anime dot net> and others.
  9. * http://www.anime.net/~goemon/linux-ecc/
  10. *
  11. * Modified by Dave Peterson and Doug Thompson
  12. *
  13. */
  14. #include <linux/module.h>
  15. #include <linux/proc_fs.h>
  16. #include <linux/kernel.h>
  17. #include <linux/types.h>
  18. #include <linux/smp.h>
  19. #include <linux/init.h>
  20. #include <linux/sysctl.h>
  21. #include <linux/highmem.h>
  22. #include <linux/timer.h>
  23. #include <linux/slab.h>
  24. #include <linux/jiffies.h>
  25. #include <linux/spinlock.h>
  26. #include <linux/list.h>
  27. #include <linux/ctype.h>
  28. #include <linux/edac.h>
  29. #include <linux/bitops.h>
  30. #include <asm/uaccess.h>
  31. #include <asm/page.h>
  32. #include <asm/edac.h>
  33. #include "edac_core.h"
  34. #include "edac_module.h"
  35. #define CREATE_TRACE_POINTS
  36. #define TRACE_INCLUDE_PATH ../../include/ras
  37. #include <ras/ras_event.h>
  38. /* lock to memory controller's control array */
  39. static DEFINE_MUTEX(mem_ctls_mutex);
  40. static LIST_HEAD(mc_devices);
  41. /*
  42. * Used to lock EDAC MC to just one module, avoiding two drivers e. g.
  43. * apei/ghes and i7core_edac to be used at the same time.
  44. */
  45. static void const *edac_mc_owner;
  46. static struct bus_type mc_bus[EDAC_MAX_MCS];
  47. unsigned edac_dimm_info_location(struct dimm_info *dimm, char *buf,
  48. unsigned len)
  49. {
  50. struct mem_ctl_info *mci = dimm->mci;
  51. int i, n, count = 0;
  52. char *p = buf;
  53. for (i = 0; i < mci->n_layers; i++) {
  54. n = snprintf(p, len, "%s %d ",
  55. edac_layer_name[mci->layers[i].type],
  56. dimm->location[i]);
  57. p += n;
  58. len -= n;
  59. count += n;
  60. if (!len)
  61. break;
  62. }
  63. return count;
  64. }
  65. #ifdef CONFIG_EDAC_DEBUG
  66. static void edac_mc_dump_channel(struct rank_info *chan)
  67. {
  68. edac_dbg(4, " channel->chan_idx = %d\n", chan->chan_idx);
  69. edac_dbg(4, " channel = %p\n", chan);
  70. edac_dbg(4, " channel->csrow = %p\n", chan->csrow);
  71. edac_dbg(4, " channel->dimm = %p\n", chan->dimm);
  72. }
  73. static void edac_mc_dump_dimm(struct dimm_info *dimm, int number)
  74. {
  75. char location[80];
  76. edac_dimm_info_location(dimm, location, sizeof(location));
  77. edac_dbg(4, "%s%i: %smapped as virtual row %d, chan %d\n",
  78. dimm->mci->csbased ? "rank" : "dimm",
  79. number, location, dimm->csrow, dimm->cschannel);
  80. edac_dbg(4, " dimm = %p\n", dimm);
  81. edac_dbg(4, " dimm->label = '%s'\n", dimm->label);
  82. edac_dbg(4, " dimm->nr_pages = 0x%x\n", dimm->nr_pages);
  83. edac_dbg(4, " dimm->grain = %d\n", dimm->grain);
  84. edac_dbg(4, " dimm->nr_pages = 0x%x\n", dimm->nr_pages);
  85. }
  86. static void edac_mc_dump_csrow(struct csrow_info *csrow)
  87. {
  88. edac_dbg(4, "csrow->csrow_idx = %d\n", csrow->csrow_idx);
  89. edac_dbg(4, " csrow = %p\n", csrow);
  90. edac_dbg(4, " csrow->first_page = 0x%lx\n", csrow->first_page);
  91. edac_dbg(4, " csrow->last_page = 0x%lx\n", csrow->last_page);
  92. edac_dbg(4, " csrow->page_mask = 0x%lx\n", csrow->page_mask);
  93. edac_dbg(4, " csrow->nr_channels = %d\n", csrow->nr_channels);
  94. edac_dbg(4, " csrow->channels = %p\n", csrow->channels);
  95. edac_dbg(4, " csrow->mci = %p\n", csrow->mci);
  96. }
  97. static void edac_mc_dump_mci(struct mem_ctl_info *mci)
  98. {
  99. edac_dbg(3, "\tmci = %p\n", mci);
  100. edac_dbg(3, "\tmci->mtype_cap = %lx\n", mci->mtype_cap);
  101. edac_dbg(3, "\tmci->edac_ctl_cap = %lx\n", mci->edac_ctl_cap);
  102. edac_dbg(3, "\tmci->edac_cap = %lx\n", mci->edac_cap);
  103. edac_dbg(4, "\tmci->edac_check = %p\n", mci->edac_check);
  104. edac_dbg(3, "\tmci->nr_csrows = %d, csrows = %p\n",
  105. mci->nr_csrows, mci->csrows);
  106. edac_dbg(3, "\tmci->nr_dimms = %d, dimms = %p\n",
  107. mci->tot_dimms, mci->dimms);
  108. edac_dbg(3, "\tdev = %p\n", mci->pdev);
  109. edac_dbg(3, "\tmod_name:ctl_name = %s:%s\n",
  110. mci->mod_name, mci->ctl_name);
  111. edac_dbg(3, "\tpvt_info = %p\n\n", mci->pvt_info);
  112. }
  113. #endif /* CONFIG_EDAC_DEBUG */
  114. /*
  115. * keep those in sync with the enum mem_type
  116. */
  117. const char *edac_mem_types[] = {
  118. "Empty csrow",
  119. "Reserved csrow type",
  120. "Unknown csrow type",
  121. "Fast page mode RAM",
  122. "Extended data out RAM",
  123. "Burst Extended data out RAM",
  124. "Single data rate SDRAM",
  125. "Registered single data rate SDRAM",
  126. "Double data rate SDRAM",
  127. "Registered Double data rate SDRAM",
  128. "Rambus DRAM",
  129. "Unbuffered DDR2 RAM",
  130. "Fully buffered DDR2",
  131. "Registered DDR2 RAM",
  132. "Rambus XDR",
  133. "Unbuffered DDR3 RAM",
  134. "Registered DDR3 RAM",
  135. };
  136. EXPORT_SYMBOL_GPL(edac_mem_types);
  137. /**
  138. * edac_align_ptr - Prepares the pointer offsets for a single-shot allocation
  139. * @p: pointer to a pointer with the memory offset to be used. At
  140. * return, this will be incremented to point to the next offset
  141. * @size: Size of the data structure to be reserved
  142. * @n_elems: Number of elements that should be reserved
  143. *
  144. * If 'size' is a constant, the compiler will optimize this whole function
  145. * down to either a no-op or the addition of a constant to the value of '*p'.
  146. *
  147. * The 'p' pointer is absolutely needed to keep the proper advancing
  148. * further in memory to the proper offsets when allocating the struct along
  149. * with its embedded structs, as edac_device_alloc_ctl_info() does it
  150. * above, for example.
  151. *
  152. * At return, the pointer 'p' will be incremented to be used on a next call
  153. * to this function.
  154. */
  155. void *edac_align_ptr(void **p, unsigned size, int n_elems)
  156. {
  157. unsigned align, r;
  158. void *ptr = *p;
  159. *p += size * n_elems;
  160. /*
  161. * 'p' can possibly be an unaligned item X such that sizeof(X) is
  162. * 'size'. Adjust 'p' so that its alignment is at least as
  163. * stringent as what the compiler would provide for X and return
  164. * the aligned result.
  165. * Here we assume that the alignment of a "long long" is the most
  166. * stringent alignment that the compiler will ever provide by default.
  167. * As far as I know, this is a reasonable assumption.
  168. */
  169. if (size > sizeof(long))
  170. align = sizeof(long long);
  171. else if (size > sizeof(int))
  172. align = sizeof(long);
  173. else if (size > sizeof(short))
  174. align = sizeof(int);
  175. else if (size > sizeof(char))
  176. align = sizeof(short);
  177. else
  178. return (char *)ptr;
  179. r = (unsigned long)p % align;
  180. if (r == 0)
  181. return (char *)ptr;
  182. *p += align - r;
  183. return (void *)(((unsigned long)ptr) + align - r);
  184. }
  185. static void _edac_mc_free(struct mem_ctl_info *mci)
  186. {
  187. int i, chn, row;
  188. struct csrow_info *csr;
  189. const unsigned int tot_dimms = mci->tot_dimms;
  190. const unsigned int tot_channels = mci->num_cschannel;
  191. const unsigned int tot_csrows = mci->nr_csrows;
  192. if (mci->dimms) {
  193. for (i = 0; i < tot_dimms; i++)
  194. kfree(mci->dimms[i]);
  195. kfree(mci->dimms);
  196. }
  197. if (mci->csrows) {
  198. for (row = 0; row < tot_csrows; row++) {
  199. csr = mci->csrows[row];
  200. if (csr) {
  201. if (csr->channels) {
  202. for (chn = 0; chn < tot_channels; chn++)
  203. kfree(csr->channels[chn]);
  204. kfree(csr->channels);
  205. }
  206. kfree(csr);
  207. }
  208. }
  209. kfree(mci->csrows);
  210. }
  211. kfree(mci);
  212. }
  213. /**
  214. * edac_mc_alloc: Allocate and partially fill a struct mem_ctl_info structure
  215. * @mc_num: Memory controller number
  216. * @n_layers: Number of MC hierarchy layers
  217. * layers: Describes each layer as seen by the Memory Controller
  218. * @size_pvt: size of private storage needed
  219. *
  220. *
  221. * Everything is kmalloc'ed as one big chunk - more efficient.
  222. * Only can be used if all structures have the same lifetime - otherwise
  223. * you have to allocate and initialize your own structures.
  224. *
  225. * Use edac_mc_free() to free mc structures allocated by this function.
  226. *
  227. * NOTE: drivers handle multi-rank memories in different ways: in some
  228. * drivers, one multi-rank memory stick is mapped as one entry, while, in
  229. * others, a single multi-rank memory stick would be mapped into several
  230. * entries. Currently, this function will allocate multiple struct dimm_info
  231. * on such scenarios, as grouping the multiple ranks require drivers change.
  232. *
  233. * Returns:
  234. * On failure: NULL
  235. * On success: struct mem_ctl_info pointer
  236. */
  237. struct mem_ctl_info *edac_mc_alloc(unsigned mc_num,
  238. unsigned n_layers,
  239. struct edac_mc_layer *layers,
  240. unsigned sz_pvt)
  241. {
  242. struct mem_ctl_info *mci;
  243. struct edac_mc_layer *layer;
  244. struct csrow_info *csr;
  245. struct rank_info *chan;
  246. struct dimm_info *dimm;
  247. u32 *ce_per_layer[EDAC_MAX_LAYERS], *ue_per_layer[EDAC_MAX_LAYERS];
  248. unsigned pos[EDAC_MAX_LAYERS];
  249. unsigned size, tot_dimms = 1, count = 1;
  250. unsigned tot_csrows = 1, tot_channels = 1, tot_errcount = 0;
  251. void *pvt, *p, *ptr = NULL;
  252. int i, j, row, chn, n, len, off;
  253. bool per_rank = false;
  254. BUG_ON(n_layers > EDAC_MAX_LAYERS || n_layers == 0);
  255. /*
  256. * Calculate the total amount of dimms and csrows/cschannels while
  257. * in the old API emulation mode
  258. */
  259. for (i = 0; i < n_layers; i++) {
  260. tot_dimms *= layers[i].size;
  261. if (layers[i].is_virt_csrow)
  262. tot_csrows *= layers[i].size;
  263. else
  264. tot_channels *= layers[i].size;
  265. if (layers[i].type == EDAC_MC_LAYER_CHIP_SELECT)
  266. per_rank = true;
  267. }
  268. /* Figure out the offsets of the various items from the start of an mc
  269. * structure. We want the alignment of each item to be at least as
  270. * stringent as what the compiler would provide if we could simply
  271. * hardcode everything into a single struct.
  272. */
  273. mci = edac_align_ptr(&ptr, sizeof(*mci), 1);
  274. layer = edac_align_ptr(&ptr, sizeof(*layer), n_layers);
  275. for (i = 0; i < n_layers; i++) {
  276. count *= layers[i].size;
  277. edac_dbg(4, "errcount layer %d size %d\n", i, count);
  278. ce_per_layer[i] = edac_align_ptr(&ptr, sizeof(u32), count);
  279. ue_per_layer[i] = edac_align_ptr(&ptr, sizeof(u32), count);
  280. tot_errcount += 2 * count;
  281. }
  282. edac_dbg(4, "allocating %d error counters\n", tot_errcount);
  283. pvt = edac_align_ptr(&ptr, sz_pvt, 1);
  284. size = ((unsigned long)pvt) + sz_pvt;
  285. edac_dbg(1, "allocating %u bytes for mci data (%d %s, %d csrows/channels)\n",
  286. size,
  287. tot_dimms,
  288. per_rank ? "ranks" : "dimms",
  289. tot_csrows * tot_channels);
  290. mci = kzalloc(size, GFP_KERNEL);
  291. if (mci == NULL)
  292. return NULL;
  293. /* Adjust pointers so they point within the memory we just allocated
  294. * rather than an imaginary chunk of memory located at address 0.
  295. */
  296. layer = (struct edac_mc_layer *)(((char *)mci) + ((unsigned long)layer));
  297. for (i = 0; i < n_layers; i++) {
  298. mci->ce_per_layer[i] = (u32 *)((char *)mci + ((unsigned long)ce_per_layer[i]));
  299. mci->ue_per_layer[i] = (u32 *)((char *)mci + ((unsigned long)ue_per_layer[i]));
  300. }
  301. pvt = sz_pvt ? (((char *)mci) + ((unsigned long)pvt)) : NULL;
  302. /* setup index and various internal pointers */
  303. mci->mc_idx = mc_num;
  304. mci->tot_dimms = tot_dimms;
  305. mci->pvt_info = pvt;
  306. mci->n_layers = n_layers;
  307. mci->layers = layer;
  308. memcpy(mci->layers, layers, sizeof(*layer) * n_layers);
  309. mci->nr_csrows = tot_csrows;
  310. mci->num_cschannel = tot_channels;
  311. mci->csbased = per_rank;
  312. /*
  313. * Alocate and fill the csrow/channels structs
  314. */
  315. mci->csrows = kcalloc(tot_csrows, sizeof(*mci->csrows), GFP_KERNEL);
  316. if (!mci->csrows)
  317. goto error;
  318. for (row = 0; row < tot_csrows; row++) {
  319. csr = kzalloc(sizeof(**mci->csrows), GFP_KERNEL);
  320. if (!csr)
  321. goto error;
  322. mci->csrows[row] = csr;
  323. csr->csrow_idx = row;
  324. csr->mci = mci;
  325. csr->nr_channels = tot_channels;
  326. csr->channels = kcalloc(tot_channels, sizeof(*csr->channels),
  327. GFP_KERNEL);
  328. if (!csr->channels)
  329. goto error;
  330. for (chn = 0; chn < tot_channels; chn++) {
  331. chan = kzalloc(sizeof(**csr->channels), GFP_KERNEL);
  332. if (!chan)
  333. goto error;
  334. csr->channels[chn] = chan;
  335. chan->chan_idx = chn;
  336. chan->csrow = csr;
  337. }
  338. }
  339. /*
  340. * Allocate and fill the dimm structs
  341. */
  342. mci->dimms = kcalloc(tot_dimms, sizeof(*mci->dimms), GFP_KERNEL);
  343. if (!mci->dimms)
  344. goto error;
  345. memset(&pos, 0, sizeof(pos));
  346. row = 0;
  347. chn = 0;
  348. for (i = 0; i < tot_dimms; i++) {
  349. chan = mci->csrows[row]->channels[chn];
  350. off = EDAC_DIMM_OFF(layer, n_layers, pos[0], pos[1], pos[2]);
  351. if (off < 0 || off >= tot_dimms) {
  352. edac_mc_printk(mci, KERN_ERR, "EDAC core bug: EDAC_DIMM_OFF is trying to do an illegal data access\n");
  353. goto error;
  354. }
  355. dimm = kzalloc(sizeof(**mci->dimms), GFP_KERNEL);
  356. if (!dimm)
  357. goto error;
  358. mci->dimms[off] = dimm;
  359. dimm->mci = mci;
  360. /*
  361. * Copy DIMM location and initialize it.
  362. */
  363. len = sizeof(dimm->label);
  364. p = dimm->label;
  365. n = snprintf(p, len, "mc#%u", mc_num);
  366. p += n;
  367. len -= n;
  368. for (j = 0; j < n_layers; j++) {
  369. n = snprintf(p, len, "%s#%u",
  370. edac_layer_name[layers[j].type],
  371. pos[j]);
  372. p += n;
  373. len -= n;
  374. dimm->location[j] = pos[j];
  375. if (len <= 0)
  376. break;
  377. }
  378. /* Link it to the csrows old API data */
  379. chan->dimm = dimm;
  380. dimm->csrow = row;
  381. dimm->cschannel = chn;
  382. /* Increment csrow location */
  383. if (layers[0].is_virt_csrow) {
  384. chn++;
  385. if (chn == tot_channels) {
  386. chn = 0;
  387. row++;
  388. }
  389. } else {
  390. row++;
  391. if (row == tot_csrows) {
  392. row = 0;
  393. chn++;
  394. }
  395. }
  396. /* Increment dimm location */
  397. for (j = n_layers - 1; j >= 0; j--) {
  398. pos[j]++;
  399. if (pos[j] < layers[j].size)
  400. break;
  401. pos[j] = 0;
  402. }
  403. }
  404. mci->op_state = OP_ALLOC;
  405. return mci;
  406. error:
  407. _edac_mc_free(mci);
  408. return NULL;
  409. }
  410. EXPORT_SYMBOL_GPL(edac_mc_alloc);
  411. /**
  412. * edac_mc_free
  413. * 'Free' a previously allocated 'mci' structure
  414. * @mci: pointer to a struct mem_ctl_info structure
  415. */
  416. void edac_mc_free(struct mem_ctl_info *mci)
  417. {
  418. edac_dbg(1, "\n");
  419. /* If we're not yet registered with sysfs free only what was allocated
  420. * in edac_mc_alloc().
  421. */
  422. if (!device_is_registered(&mci->dev)) {
  423. _edac_mc_free(mci);
  424. return;
  425. }
  426. /* the mci instance is freed here, when the sysfs object is dropped */
  427. edac_unregister_sysfs(mci);
  428. }
  429. EXPORT_SYMBOL_GPL(edac_mc_free);
  430. /**
  431. * find_mci_by_dev
  432. *
  433. * scan list of controllers looking for the one that manages
  434. * the 'dev' device
  435. * @dev: pointer to a struct device related with the MCI
  436. */
  437. struct mem_ctl_info *find_mci_by_dev(struct device *dev)
  438. {
  439. struct mem_ctl_info *mci;
  440. struct list_head *item;
  441. edac_dbg(3, "\n");
  442. list_for_each(item, &mc_devices) {
  443. mci = list_entry(item, struct mem_ctl_info, link);
  444. if (mci->pdev == dev)
  445. return mci;
  446. }
  447. return NULL;
  448. }
  449. EXPORT_SYMBOL_GPL(find_mci_by_dev);
  450. /*
  451. * handler for EDAC to check if NMI type handler has asserted interrupt
  452. */
  453. static int edac_mc_assert_error_check_and_clear(void)
  454. {
  455. int old_state;
  456. if (edac_op_state == EDAC_OPSTATE_POLL)
  457. return 1;
  458. old_state = edac_err_assert;
  459. edac_err_assert = 0;
  460. return old_state;
  461. }
  462. /*
  463. * edac_mc_workq_function
  464. * performs the operation scheduled by a workq request
  465. */
  466. static void edac_mc_workq_function(struct work_struct *work_req)
  467. {
  468. struct delayed_work *d_work = to_delayed_work(work_req);
  469. struct mem_ctl_info *mci = to_edac_mem_ctl_work(d_work);
  470. mutex_lock(&mem_ctls_mutex);
  471. /* if this control struct has movd to offline state, we are done */
  472. if (mci->op_state == OP_OFFLINE) {
  473. mutex_unlock(&mem_ctls_mutex);
  474. return;
  475. }
  476. /* Only poll controllers that are running polled and have a check */
  477. if (edac_mc_assert_error_check_and_clear() && (mci->edac_check != NULL))
  478. mci->edac_check(mci);
  479. mutex_unlock(&mem_ctls_mutex);
  480. /* Reschedule */
  481. queue_delayed_work(edac_workqueue, &mci->work,
  482. msecs_to_jiffies(edac_mc_get_poll_msec()));
  483. }
  484. /*
  485. * edac_mc_workq_setup
  486. * initialize a workq item for this mci
  487. * passing in the new delay period in msec
  488. *
  489. * locking model:
  490. *
  491. * called with the mem_ctls_mutex held
  492. */
  493. static void edac_mc_workq_setup(struct mem_ctl_info *mci, unsigned msec)
  494. {
  495. edac_dbg(0, "\n");
  496. /* if this instance is not in the POLL state, then simply return */
  497. if (mci->op_state != OP_RUNNING_POLL)
  498. return;
  499. INIT_DELAYED_WORK(&mci->work, edac_mc_workq_function);
  500. mod_delayed_work(edac_workqueue, &mci->work, msecs_to_jiffies(msec));
  501. }
  502. /*
  503. * edac_mc_workq_teardown
  504. * stop the workq processing on this mci
  505. *
  506. * locking model:
  507. *
  508. * called WITHOUT lock held
  509. */
  510. static void edac_mc_workq_teardown(struct mem_ctl_info *mci)
  511. {
  512. int status;
  513. if (mci->op_state != OP_RUNNING_POLL)
  514. return;
  515. status = cancel_delayed_work(&mci->work);
  516. if (status == 0) {
  517. edac_dbg(0, "not canceled, flush the queue\n");
  518. /* workq instance might be running, wait for it */
  519. flush_workqueue(edac_workqueue);
  520. }
  521. }
  522. /*
  523. * edac_mc_reset_delay_period(unsigned long value)
  524. *
  525. * user space has updated our poll period value, need to
  526. * reset our workq delays
  527. */
  528. void edac_mc_reset_delay_period(int value)
  529. {
  530. struct mem_ctl_info *mci;
  531. struct list_head *item;
  532. mutex_lock(&mem_ctls_mutex);
  533. list_for_each(item, &mc_devices) {
  534. mci = list_entry(item, struct mem_ctl_info, link);
  535. edac_mc_workq_setup(mci, (unsigned long) value);
  536. }
  537. mutex_unlock(&mem_ctls_mutex);
  538. }
  539. /* Return 0 on success, 1 on failure.
  540. * Before calling this function, caller must
  541. * assign a unique value to mci->mc_idx.
  542. *
  543. * locking model:
  544. *
  545. * called with the mem_ctls_mutex lock held
  546. */
  547. static int add_mc_to_global_list(struct mem_ctl_info *mci)
  548. {
  549. struct list_head *item, *insert_before;
  550. struct mem_ctl_info *p;
  551. insert_before = &mc_devices;
  552. p = find_mci_by_dev(mci->pdev);
  553. if (unlikely(p != NULL))
  554. goto fail0;
  555. list_for_each(item, &mc_devices) {
  556. p = list_entry(item, struct mem_ctl_info, link);
  557. if (p->mc_idx >= mci->mc_idx) {
  558. if (unlikely(p->mc_idx == mci->mc_idx))
  559. goto fail1;
  560. insert_before = item;
  561. break;
  562. }
  563. }
  564. list_add_tail_rcu(&mci->link, insert_before);
  565. atomic_inc(&edac_handlers);
  566. return 0;
  567. fail0:
  568. edac_printk(KERN_WARNING, EDAC_MC,
  569. "%s (%s) %s %s already assigned %d\n", dev_name(p->pdev),
  570. edac_dev_name(mci), p->mod_name, p->ctl_name, p->mc_idx);
  571. return 1;
  572. fail1:
  573. edac_printk(KERN_WARNING, EDAC_MC,
  574. "bug in low-level driver: attempt to assign\n"
  575. " duplicate mc_idx %d in %s()\n", p->mc_idx, __func__);
  576. return 1;
  577. }
  578. static int del_mc_from_global_list(struct mem_ctl_info *mci)
  579. {
  580. int handlers = atomic_dec_return(&edac_handlers);
  581. list_del_rcu(&mci->link);
  582. /* these are for safe removal of devices from global list while
  583. * NMI handlers may be traversing list
  584. */
  585. synchronize_rcu();
  586. INIT_LIST_HEAD(&mci->link);
  587. return handlers;
  588. }
  589. /**
  590. * edac_mc_find: Search for a mem_ctl_info structure whose index is 'idx'.
  591. *
  592. * If found, return a pointer to the structure.
  593. * Else return NULL.
  594. *
  595. * Caller must hold mem_ctls_mutex.
  596. */
  597. struct mem_ctl_info *edac_mc_find(int idx)
  598. {
  599. struct list_head *item;
  600. struct mem_ctl_info *mci;
  601. list_for_each(item, &mc_devices) {
  602. mci = list_entry(item, struct mem_ctl_info, link);
  603. if (mci->mc_idx >= idx) {
  604. if (mci->mc_idx == idx)
  605. return mci;
  606. break;
  607. }
  608. }
  609. return NULL;
  610. }
  611. EXPORT_SYMBOL(edac_mc_find);
  612. /**
  613. * edac_mc_add_mc: Insert the 'mci' structure into the mci global list and
  614. * create sysfs entries associated with mci structure
  615. * @mci: pointer to the mci structure to be added to the list
  616. *
  617. * Return:
  618. * 0 Success
  619. * !0 Failure
  620. */
  621. /* FIXME - should a warning be printed if no error detection? correction? */
  622. int edac_mc_add_mc(struct mem_ctl_info *mci)
  623. {
  624. int ret = -EINVAL;
  625. edac_dbg(0, "\n");
  626. if (mci->mc_idx >= EDAC_MAX_MCS) {
  627. pr_warn_once("Too many memory controllers: %d\n", mci->mc_idx);
  628. return -ENODEV;
  629. }
  630. #ifdef CONFIG_EDAC_DEBUG
  631. if (edac_debug_level >= 3)
  632. edac_mc_dump_mci(mci);
  633. if (edac_debug_level >= 4) {
  634. int i;
  635. for (i = 0; i < mci->nr_csrows; i++) {
  636. struct csrow_info *csrow = mci->csrows[i];
  637. u32 nr_pages = 0;
  638. int j;
  639. for (j = 0; j < csrow->nr_channels; j++)
  640. nr_pages += csrow->channels[j]->dimm->nr_pages;
  641. if (!nr_pages)
  642. continue;
  643. edac_mc_dump_csrow(csrow);
  644. for (j = 0; j < csrow->nr_channels; j++)
  645. if (csrow->channels[j]->dimm->nr_pages)
  646. edac_mc_dump_channel(csrow->channels[j]);
  647. }
  648. for (i = 0; i < mci->tot_dimms; i++)
  649. if (mci->dimms[i]->nr_pages)
  650. edac_mc_dump_dimm(mci->dimms[i], i);
  651. }
  652. #endif
  653. mutex_lock(&mem_ctls_mutex);
  654. if (edac_mc_owner && edac_mc_owner != mci->mod_name) {
  655. ret = -EPERM;
  656. goto fail0;
  657. }
  658. if (add_mc_to_global_list(mci))
  659. goto fail0;
  660. /* set load time so that error rate can be tracked */
  661. mci->start_time = jiffies;
  662. mci->bus = &mc_bus[mci->mc_idx];
  663. if (edac_create_sysfs_mci_device(mci)) {
  664. edac_mc_printk(mci, KERN_WARNING,
  665. "failed to create sysfs device\n");
  666. goto fail1;
  667. }
  668. /* If there IS a check routine, then we are running POLLED */
  669. if (mci->edac_check != NULL) {
  670. /* This instance is NOW RUNNING */
  671. mci->op_state = OP_RUNNING_POLL;
  672. edac_mc_workq_setup(mci, edac_mc_get_poll_msec());
  673. } else {
  674. mci->op_state = OP_RUNNING_INTERRUPT;
  675. }
  676. /* Report action taken */
  677. edac_mc_printk(mci, KERN_INFO, "Giving out device to '%s' '%s':"
  678. " DEV %s\n", mci->mod_name, mci->ctl_name, edac_dev_name(mci));
  679. edac_mc_owner = mci->mod_name;
  680. mutex_unlock(&mem_ctls_mutex);
  681. return 0;
  682. fail1:
  683. del_mc_from_global_list(mci);
  684. fail0:
  685. mutex_unlock(&mem_ctls_mutex);
  686. return ret;
  687. }
  688. EXPORT_SYMBOL_GPL(edac_mc_add_mc);
  689. /**
  690. * edac_mc_del_mc: Remove sysfs entries for specified mci structure and
  691. * remove mci structure from global list
  692. * @pdev: Pointer to 'struct device' representing mci structure to remove.
  693. *
  694. * Return pointer to removed mci structure, or NULL if device not found.
  695. */
  696. struct mem_ctl_info *edac_mc_del_mc(struct device *dev)
  697. {
  698. struct mem_ctl_info *mci;
  699. edac_dbg(0, "\n");
  700. mutex_lock(&mem_ctls_mutex);
  701. /* find the requested mci struct in the global list */
  702. mci = find_mci_by_dev(dev);
  703. if (mci == NULL) {
  704. mutex_unlock(&mem_ctls_mutex);
  705. return NULL;
  706. }
  707. if (!del_mc_from_global_list(mci))
  708. edac_mc_owner = NULL;
  709. mutex_unlock(&mem_ctls_mutex);
  710. /* flush workq processes */
  711. edac_mc_workq_teardown(mci);
  712. /* marking MCI offline */
  713. mci->op_state = OP_OFFLINE;
  714. /* remove from sysfs */
  715. edac_remove_sysfs_mci_device(mci);
  716. edac_printk(KERN_INFO, EDAC_MC,
  717. "Removed device %d for %s %s: DEV %s\n", mci->mc_idx,
  718. mci->mod_name, mci->ctl_name, edac_dev_name(mci));
  719. return mci;
  720. }
  721. EXPORT_SYMBOL_GPL(edac_mc_del_mc);
  722. static void edac_mc_scrub_block(unsigned long page, unsigned long offset,
  723. u32 size)
  724. {
  725. struct page *pg;
  726. void *virt_addr;
  727. unsigned long flags = 0;
  728. edac_dbg(3, "\n");
  729. /* ECC error page was not in our memory. Ignore it. */
  730. if (!pfn_valid(page))
  731. return;
  732. /* Find the actual page structure then map it and fix */
  733. pg = pfn_to_page(page);
  734. if (PageHighMem(pg))
  735. local_irq_save(flags);
  736. virt_addr = kmap_atomic(pg);
  737. /* Perform architecture specific atomic scrub operation */
  738. atomic_scrub(virt_addr + offset, size);
  739. /* Unmap and complete */
  740. kunmap_atomic(virt_addr);
  741. if (PageHighMem(pg))
  742. local_irq_restore(flags);
  743. }
  744. /* FIXME - should return -1 */
  745. int edac_mc_find_csrow_by_page(struct mem_ctl_info *mci, unsigned long page)
  746. {
  747. struct csrow_info **csrows = mci->csrows;
  748. int row, i, j, n;
  749. edac_dbg(1, "MC%d: 0x%lx\n", mci->mc_idx, page);
  750. row = -1;
  751. for (i = 0; i < mci->nr_csrows; i++) {
  752. struct csrow_info *csrow = csrows[i];
  753. n = 0;
  754. for (j = 0; j < csrow->nr_channels; j++) {
  755. struct dimm_info *dimm = csrow->channels[j]->dimm;
  756. n += dimm->nr_pages;
  757. }
  758. if (n == 0)
  759. continue;
  760. edac_dbg(3, "MC%d: first(0x%lx) page(0x%lx) last(0x%lx) mask(0x%lx)\n",
  761. mci->mc_idx,
  762. csrow->first_page, page, csrow->last_page,
  763. csrow->page_mask);
  764. if ((page >= csrow->first_page) &&
  765. (page <= csrow->last_page) &&
  766. ((page & csrow->page_mask) ==
  767. (csrow->first_page & csrow->page_mask))) {
  768. row = i;
  769. break;
  770. }
  771. }
  772. if (row == -1)
  773. edac_mc_printk(mci, KERN_ERR,
  774. "could not look up page error address %lx\n",
  775. (unsigned long)page);
  776. return row;
  777. }
  778. EXPORT_SYMBOL_GPL(edac_mc_find_csrow_by_page);
  779. const char *edac_layer_name[] = {
  780. [EDAC_MC_LAYER_BRANCH] = "branch",
  781. [EDAC_MC_LAYER_CHANNEL] = "channel",
  782. [EDAC_MC_LAYER_SLOT] = "slot",
  783. [EDAC_MC_LAYER_CHIP_SELECT] = "csrow",
  784. [EDAC_MC_LAYER_ALL_MEM] = "memory",
  785. };
  786. EXPORT_SYMBOL_GPL(edac_layer_name);
  787. static void edac_inc_ce_error(struct mem_ctl_info *mci,
  788. bool enable_per_layer_report,
  789. const int pos[EDAC_MAX_LAYERS],
  790. const u16 count)
  791. {
  792. int i, index = 0;
  793. mci->ce_mc += count;
  794. if (!enable_per_layer_report) {
  795. mci->ce_noinfo_count += count;
  796. return;
  797. }
  798. for (i = 0; i < mci->n_layers; i++) {
  799. if (pos[i] < 0)
  800. break;
  801. index += pos[i];
  802. mci->ce_per_layer[i][index] += count;
  803. if (i < mci->n_layers - 1)
  804. index *= mci->layers[i + 1].size;
  805. }
  806. }
  807. static void edac_inc_ue_error(struct mem_ctl_info *mci,
  808. bool enable_per_layer_report,
  809. const int pos[EDAC_MAX_LAYERS],
  810. const u16 count)
  811. {
  812. int i, index = 0;
  813. mci->ue_mc += count;
  814. if (!enable_per_layer_report) {
  815. mci->ce_noinfo_count += count;
  816. return;
  817. }
  818. for (i = 0; i < mci->n_layers; i++) {
  819. if (pos[i] < 0)
  820. break;
  821. index += pos[i];
  822. mci->ue_per_layer[i][index] += count;
  823. if (i < mci->n_layers - 1)
  824. index *= mci->layers[i + 1].size;
  825. }
  826. }
  827. static void edac_ce_error(struct mem_ctl_info *mci,
  828. const u16 error_count,
  829. const int pos[EDAC_MAX_LAYERS],
  830. const char *msg,
  831. const char *location,
  832. const char *label,
  833. const char *detail,
  834. const char *other_detail,
  835. const bool enable_per_layer_report,
  836. const unsigned long page_frame_number,
  837. const unsigned long offset_in_page,
  838. long grain)
  839. {
  840. unsigned long remapped_page;
  841. char *msg_aux = "";
  842. if (*msg)
  843. msg_aux = " ";
  844. if (edac_mc_get_log_ce()) {
  845. if (other_detail && *other_detail)
  846. edac_mc_printk(mci, KERN_WARNING,
  847. "%d CE %s%son %s (%s %s - %s)\n",
  848. error_count, msg, msg_aux, label,
  849. location, detail, other_detail);
  850. else
  851. edac_mc_printk(mci, KERN_WARNING,
  852. "%d CE %s%son %s (%s %s)\n",
  853. error_count, msg, msg_aux, label,
  854. location, detail);
  855. }
  856. edac_inc_ce_error(mci, enable_per_layer_report, pos, error_count);
  857. if (mci->scrub_mode & SCRUB_SW_SRC) {
  858. /*
  859. * Some memory controllers (called MCs below) can remap
  860. * memory so that it is still available at a different
  861. * address when PCI devices map into memory.
  862. * MC's that can't do this, lose the memory where PCI
  863. * devices are mapped. This mapping is MC-dependent
  864. * and so we call back into the MC driver for it to
  865. * map the MC page to a physical (CPU) page which can
  866. * then be mapped to a virtual page - which can then
  867. * be scrubbed.
  868. */
  869. remapped_page = mci->ctl_page_to_phys ?
  870. mci->ctl_page_to_phys(mci, page_frame_number) :
  871. page_frame_number;
  872. edac_mc_scrub_block(remapped_page,
  873. offset_in_page, grain);
  874. }
  875. }
  876. static void edac_ue_error(struct mem_ctl_info *mci,
  877. const u16 error_count,
  878. const int pos[EDAC_MAX_LAYERS],
  879. const char *msg,
  880. const char *location,
  881. const char *label,
  882. const char *detail,
  883. const char *other_detail,
  884. const bool enable_per_layer_report)
  885. {
  886. char *msg_aux = "";
  887. if (*msg)
  888. msg_aux = " ";
  889. if (edac_mc_get_log_ue()) {
  890. if (other_detail && *other_detail)
  891. edac_mc_printk(mci, KERN_WARNING,
  892. "%d UE %s%son %s (%s %s - %s)\n",
  893. error_count, msg, msg_aux, label,
  894. location, detail, other_detail);
  895. else
  896. edac_mc_printk(mci, KERN_WARNING,
  897. "%d UE %s%son %s (%s %s)\n",
  898. error_count, msg, msg_aux, label,
  899. location, detail);
  900. }
  901. if (edac_mc_get_panic_on_ue()) {
  902. if (other_detail && *other_detail)
  903. panic("UE %s%son %s (%s%s - %s)\n",
  904. msg, msg_aux, label, location, detail, other_detail);
  905. else
  906. panic("UE %s%son %s (%s%s)\n",
  907. msg, msg_aux, label, location, detail);
  908. }
  909. edac_inc_ue_error(mci, enable_per_layer_report, pos, error_count);
  910. }
  911. /**
  912. * edac_raw_mc_handle_error - reports a memory event to userspace without doing
  913. * anything to discover the error location
  914. *
  915. * @type: severity of the error (CE/UE/Fatal)
  916. * @mci: a struct mem_ctl_info pointer
  917. * @e: error description
  918. *
  919. * This raw function is used internally by edac_mc_handle_error(). It should
  920. * only be called directly when the hardware error come directly from BIOS,
  921. * like in the case of APEI GHES driver.
  922. */
  923. void edac_raw_mc_handle_error(const enum hw_event_mc_err_type type,
  924. struct mem_ctl_info *mci,
  925. struct edac_raw_error_desc *e)
  926. {
  927. char detail[80];
  928. int pos[EDAC_MAX_LAYERS] = { e->top_layer, e->mid_layer, e->low_layer };
  929. /* Memory type dependent details about the error */
  930. if (type == HW_EVENT_ERR_CORRECTED) {
  931. snprintf(detail, sizeof(detail),
  932. "page:0x%lx offset:0x%lx grain:%ld syndrome:0x%lx",
  933. e->page_frame_number, e->offset_in_page,
  934. e->grain, e->syndrome);
  935. edac_ce_error(mci, e->error_count, pos, e->msg, e->location, e->label,
  936. detail, e->other_detail, e->enable_per_layer_report,
  937. e->page_frame_number, e->offset_in_page, e->grain);
  938. } else {
  939. snprintf(detail, sizeof(detail),
  940. "page:0x%lx offset:0x%lx grain:%ld",
  941. e->page_frame_number, e->offset_in_page, e->grain);
  942. edac_ue_error(mci, e->error_count, pos, e->msg, e->location, e->label,
  943. detail, e->other_detail, e->enable_per_layer_report);
  944. }
  945. }
  946. EXPORT_SYMBOL_GPL(edac_raw_mc_handle_error);
  947. /**
  948. * edac_mc_handle_error - reports a memory event to userspace
  949. *
  950. * @type: severity of the error (CE/UE/Fatal)
  951. * @mci: a struct mem_ctl_info pointer
  952. * @error_count: Number of errors of the same type
  953. * @page_frame_number: mem page where the error occurred
  954. * @offset_in_page: offset of the error inside the page
  955. * @syndrome: ECC syndrome
  956. * @top_layer: Memory layer[0] position
  957. * @mid_layer: Memory layer[1] position
  958. * @low_layer: Memory layer[2] position
  959. * @msg: Message meaningful to the end users that
  960. * explains the event
  961. * @other_detail: Technical details about the event that
  962. * may help hardware manufacturers and
  963. * EDAC developers to analyse the event
  964. */
  965. void edac_mc_handle_error(const enum hw_event_mc_err_type type,
  966. struct mem_ctl_info *mci,
  967. const u16 error_count,
  968. const unsigned long page_frame_number,
  969. const unsigned long offset_in_page,
  970. const unsigned long syndrome,
  971. const int top_layer,
  972. const int mid_layer,
  973. const int low_layer,
  974. const char *msg,
  975. const char *other_detail)
  976. {
  977. char *p;
  978. int row = -1, chan = -1;
  979. int pos[EDAC_MAX_LAYERS] = { top_layer, mid_layer, low_layer };
  980. int i, n_labels = 0;
  981. u8 grain_bits;
  982. struct edac_raw_error_desc *e = &mci->error_desc;
  983. edac_dbg(3, "MC%d\n", mci->mc_idx);
  984. /* Fills the error report buffer */
  985. memset(e, 0, sizeof (*e));
  986. e->error_count = error_count;
  987. e->top_layer = top_layer;
  988. e->mid_layer = mid_layer;
  989. e->low_layer = low_layer;
  990. e->page_frame_number = page_frame_number;
  991. e->offset_in_page = offset_in_page;
  992. e->syndrome = syndrome;
  993. e->msg = msg;
  994. e->other_detail = other_detail;
  995. /*
  996. * Check if the event report is consistent and if the memory
  997. * location is known. If it is known, enable_per_layer_report will be
  998. * true, the DIMM(s) label info will be filled and the per-layer
  999. * error counters will be incremented.
  1000. */
  1001. for (i = 0; i < mci->n_layers; i++) {
  1002. if (pos[i] >= (int)mci->layers[i].size) {
  1003. edac_mc_printk(mci, KERN_ERR,
  1004. "INTERNAL ERROR: %s value is out of range (%d >= %d)\n",
  1005. edac_layer_name[mci->layers[i].type],
  1006. pos[i], mci->layers[i].size);
  1007. /*
  1008. * Instead of just returning it, let's use what's
  1009. * known about the error. The increment routines and
  1010. * the DIMM filter logic will do the right thing by
  1011. * pointing the likely damaged DIMMs.
  1012. */
  1013. pos[i] = -1;
  1014. }
  1015. if (pos[i] >= 0)
  1016. e->enable_per_layer_report = true;
  1017. }
  1018. /*
  1019. * Get the dimm label/grain that applies to the match criteria.
  1020. * As the error algorithm may not be able to point to just one memory
  1021. * stick, the logic here will get all possible labels that could
  1022. * pottentially be affected by the error.
  1023. * On FB-DIMM memory controllers, for uncorrected errors, it is common
  1024. * to have only the MC channel and the MC dimm (also called "branch")
  1025. * but the channel is not known, as the memory is arranged in pairs,
  1026. * where each memory belongs to a separate channel within the same
  1027. * branch.
  1028. */
  1029. p = e->label;
  1030. *p = '\0';
  1031. for (i = 0; i < mci->tot_dimms; i++) {
  1032. struct dimm_info *dimm = mci->dimms[i];
  1033. if (top_layer >= 0 && top_layer != dimm->location[0])
  1034. continue;
  1035. if (mid_layer >= 0 && mid_layer != dimm->location[1])
  1036. continue;
  1037. if (low_layer >= 0 && low_layer != dimm->location[2])
  1038. continue;
  1039. /* get the max grain, over the error match range */
  1040. if (dimm->grain > e->grain)
  1041. e->grain = dimm->grain;
  1042. /*
  1043. * If the error is memory-controller wide, there's no need to
  1044. * seek for the affected DIMMs because the whole
  1045. * channel/memory controller/... may be affected.
  1046. * Also, don't show errors for empty DIMM slots.
  1047. */
  1048. if (e->enable_per_layer_report && dimm->nr_pages) {
  1049. if (n_labels >= EDAC_MAX_LABELS) {
  1050. e->enable_per_layer_report = false;
  1051. break;
  1052. }
  1053. n_labels++;
  1054. if (p != e->label) {
  1055. strcpy(p, OTHER_LABEL);
  1056. p += strlen(OTHER_LABEL);
  1057. }
  1058. strcpy(p, dimm->label);
  1059. p += strlen(p);
  1060. *p = '\0';
  1061. /*
  1062. * get csrow/channel of the DIMM, in order to allow
  1063. * incrementing the compat API counters
  1064. */
  1065. edac_dbg(4, "%s csrows map: (%d,%d)\n",
  1066. mci->csbased ? "rank" : "dimm",
  1067. dimm->csrow, dimm->cschannel);
  1068. if (row == -1)
  1069. row = dimm->csrow;
  1070. else if (row >= 0 && row != dimm->csrow)
  1071. row = -2;
  1072. if (chan == -1)
  1073. chan = dimm->cschannel;
  1074. else if (chan >= 0 && chan != dimm->cschannel)
  1075. chan = -2;
  1076. }
  1077. }
  1078. if (!e->enable_per_layer_report) {
  1079. strcpy(e->label, "any memory");
  1080. } else {
  1081. edac_dbg(4, "csrow/channel to increment: (%d,%d)\n", row, chan);
  1082. if (p == e->label)
  1083. strcpy(e->label, "unknown memory");
  1084. if (type == HW_EVENT_ERR_CORRECTED) {
  1085. if (row >= 0) {
  1086. mci->csrows[row]->ce_count += error_count;
  1087. if (chan >= 0)
  1088. mci->csrows[row]->channels[chan]->ce_count += error_count;
  1089. }
  1090. } else
  1091. if (row >= 0)
  1092. mci->csrows[row]->ue_count += error_count;
  1093. }
  1094. /* Fill the RAM location data */
  1095. p = e->location;
  1096. for (i = 0; i < mci->n_layers; i++) {
  1097. if (pos[i] < 0)
  1098. continue;
  1099. p += sprintf(p, "%s:%d ",
  1100. edac_layer_name[mci->layers[i].type],
  1101. pos[i]);
  1102. }
  1103. if (p > e->location)
  1104. *(p - 1) = '\0';
  1105. /* Report the error via the trace interface */
  1106. grain_bits = fls_long(e->grain) + 1;
  1107. trace_mc_event(type, e->msg, e->label, e->error_count,
  1108. mci->mc_idx, e->top_layer, e->mid_layer, e->low_layer,
  1109. PAGES_TO_MiB(e->page_frame_number) | e->offset_in_page,
  1110. grain_bits, e->syndrome, e->other_detail);
  1111. edac_raw_mc_handle_error(type, mci, e);
  1112. }
  1113. EXPORT_SYMBOL_GPL(edac_mc_handle_error);