mpc85xx_edac.c 33 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249
  1. /*
  2. * Freescale MPC85xx Memory Controller kenel module
  3. *
  4. * Author: Dave Jiang <djiang@mvista.com>
  5. *
  6. * 2006-2007 (c) MontaVista Software, Inc. This file is licensed under
  7. * the terms of the GNU General Public License version 2. This program
  8. * is licensed "as is" without any warranty of any kind, whether express
  9. * or implied.
  10. *
  11. */
  12. #include <linux/module.h>
  13. #include <linux/init.h>
  14. #include <linux/interrupt.h>
  15. #include <linux/ctype.h>
  16. #include <linux/io.h>
  17. #include <linux/mod_devicetable.h>
  18. #include <linux/edac.h>
  19. #include <linux/smp.h>
  20. #include <linux/gfp.h>
  21. #include <linux/of_platform.h>
  22. #include <linux/of_device.h>
  23. #include "edac_module.h"
  24. #include "edac_core.h"
  25. #include "mpc85xx_edac.h"
  26. static int edac_dev_idx;
  27. #ifdef CONFIG_PCI
  28. static int edac_pci_idx;
  29. #endif
  30. static int edac_mc_idx;
  31. static u32 orig_ddr_err_disable;
  32. static u32 orig_ddr_err_sbe;
  33. /*
  34. * PCI Err defines
  35. */
  36. #ifdef CONFIG_PCI
  37. static u32 orig_pci_err_cap_dr;
  38. static u32 orig_pci_err_en;
  39. #endif
  40. static u32 orig_l2_err_disable;
  41. #ifdef CONFIG_FSL_SOC_BOOKE
  42. static u32 orig_hid1[2];
  43. #endif
  44. /************************ MC SYSFS parts ***********************************/
  45. #define to_mci(k) container_of(k, struct mem_ctl_info, dev)
  46. static ssize_t mpc85xx_mc_inject_data_hi_show(struct device *dev,
  47. struct device_attribute *mattr,
  48. char *data)
  49. {
  50. struct mem_ctl_info *mci = to_mci(dev);
  51. struct mpc85xx_mc_pdata *pdata = mci->pvt_info;
  52. return sprintf(data, "0x%08x",
  53. in_be32(pdata->mc_vbase +
  54. MPC85XX_MC_DATA_ERR_INJECT_HI));
  55. }
  56. static ssize_t mpc85xx_mc_inject_data_lo_show(struct device *dev,
  57. struct device_attribute *mattr,
  58. char *data)
  59. {
  60. struct mem_ctl_info *mci = to_mci(dev);
  61. struct mpc85xx_mc_pdata *pdata = mci->pvt_info;
  62. return sprintf(data, "0x%08x",
  63. in_be32(pdata->mc_vbase +
  64. MPC85XX_MC_DATA_ERR_INJECT_LO));
  65. }
  66. static ssize_t mpc85xx_mc_inject_ctrl_show(struct device *dev,
  67. struct device_attribute *mattr,
  68. char *data)
  69. {
  70. struct mem_ctl_info *mci = to_mci(dev);
  71. struct mpc85xx_mc_pdata *pdata = mci->pvt_info;
  72. return sprintf(data, "0x%08x",
  73. in_be32(pdata->mc_vbase + MPC85XX_MC_ECC_ERR_INJECT));
  74. }
  75. static ssize_t mpc85xx_mc_inject_data_hi_store(struct device *dev,
  76. struct device_attribute *mattr,
  77. const char *data, size_t count)
  78. {
  79. struct mem_ctl_info *mci = to_mci(dev);
  80. struct mpc85xx_mc_pdata *pdata = mci->pvt_info;
  81. if (isdigit(*data)) {
  82. out_be32(pdata->mc_vbase + MPC85XX_MC_DATA_ERR_INJECT_HI,
  83. simple_strtoul(data, NULL, 0));
  84. return count;
  85. }
  86. return 0;
  87. }
  88. static ssize_t mpc85xx_mc_inject_data_lo_store(struct device *dev,
  89. struct device_attribute *mattr,
  90. const char *data, size_t count)
  91. {
  92. struct mem_ctl_info *mci = to_mci(dev);
  93. struct mpc85xx_mc_pdata *pdata = mci->pvt_info;
  94. if (isdigit(*data)) {
  95. out_be32(pdata->mc_vbase + MPC85XX_MC_DATA_ERR_INJECT_LO,
  96. simple_strtoul(data, NULL, 0));
  97. return count;
  98. }
  99. return 0;
  100. }
  101. static ssize_t mpc85xx_mc_inject_ctrl_store(struct device *dev,
  102. struct device_attribute *mattr,
  103. const char *data, size_t count)
  104. {
  105. struct mem_ctl_info *mci = to_mci(dev);
  106. struct mpc85xx_mc_pdata *pdata = mci->pvt_info;
  107. if (isdigit(*data)) {
  108. out_be32(pdata->mc_vbase + MPC85XX_MC_ECC_ERR_INJECT,
  109. simple_strtoul(data, NULL, 0));
  110. return count;
  111. }
  112. return 0;
  113. }
  114. DEVICE_ATTR(inject_data_hi, S_IRUGO | S_IWUSR,
  115. mpc85xx_mc_inject_data_hi_show, mpc85xx_mc_inject_data_hi_store);
  116. DEVICE_ATTR(inject_data_lo, S_IRUGO | S_IWUSR,
  117. mpc85xx_mc_inject_data_lo_show, mpc85xx_mc_inject_data_lo_store);
  118. DEVICE_ATTR(inject_ctrl, S_IRUGO | S_IWUSR,
  119. mpc85xx_mc_inject_ctrl_show, mpc85xx_mc_inject_ctrl_store);
  120. static int mpc85xx_create_sysfs_attributes(struct mem_ctl_info *mci)
  121. {
  122. int rc;
  123. rc = device_create_file(&mci->dev, &dev_attr_inject_data_hi);
  124. if (rc < 0)
  125. return rc;
  126. rc = device_create_file(&mci->dev, &dev_attr_inject_data_lo);
  127. if (rc < 0)
  128. return rc;
  129. rc = device_create_file(&mci->dev, &dev_attr_inject_ctrl);
  130. if (rc < 0)
  131. return rc;
  132. return 0;
  133. }
  134. static void mpc85xx_remove_sysfs_attributes(struct mem_ctl_info *mci)
  135. {
  136. device_remove_file(&mci->dev, &dev_attr_inject_data_hi);
  137. device_remove_file(&mci->dev, &dev_attr_inject_data_lo);
  138. device_remove_file(&mci->dev, &dev_attr_inject_ctrl);
  139. }
  140. /**************************** PCI Err device ***************************/
  141. #ifdef CONFIG_PCI
  142. static void mpc85xx_pci_check(struct edac_pci_ctl_info *pci)
  143. {
  144. struct mpc85xx_pci_pdata *pdata = pci->pvt_info;
  145. u32 err_detect;
  146. err_detect = in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_DR);
  147. /* master aborts can happen during PCI config cycles */
  148. if (!(err_detect & ~(PCI_EDE_MULTI_ERR | PCI_EDE_MST_ABRT))) {
  149. out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_DR, err_detect);
  150. return;
  151. }
  152. printk(KERN_ERR "PCI error(s) detected\n");
  153. printk(KERN_ERR "PCI/X ERR_DR register: %#08x\n", err_detect);
  154. printk(KERN_ERR "PCI/X ERR_ATTRIB register: %#08x\n",
  155. in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_ATTRIB));
  156. printk(KERN_ERR "PCI/X ERR_ADDR register: %#08x\n",
  157. in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_ADDR));
  158. printk(KERN_ERR "PCI/X ERR_EXT_ADDR register: %#08x\n",
  159. in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_EXT_ADDR));
  160. printk(KERN_ERR "PCI/X ERR_DL register: %#08x\n",
  161. in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_DL));
  162. printk(KERN_ERR "PCI/X ERR_DH register: %#08x\n",
  163. in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_DH));
  164. /* clear error bits */
  165. out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_DR, err_detect);
  166. if (err_detect & PCI_EDE_PERR_MASK)
  167. edac_pci_handle_pe(pci, pci->ctl_name);
  168. if ((err_detect & ~PCI_EDE_MULTI_ERR) & ~PCI_EDE_PERR_MASK)
  169. edac_pci_handle_npe(pci, pci->ctl_name);
  170. }
  171. static irqreturn_t mpc85xx_pci_isr(int irq, void *dev_id)
  172. {
  173. struct edac_pci_ctl_info *pci = dev_id;
  174. struct mpc85xx_pci_pdata *pdata = pci->pvt_info;
  175. u32 err_detect;
  176. err_detect = in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_DR);
  177. if (!err_detect)
  178. return IRQ_NONE;
  179. mpc85xx_pci_check(pci);
  180. return IRQ_HANDLED;
  181. }
  182. int mpc85xx_pci_err_probe(struct platform_device *op)
  183. {
  184. struct edac_pci_ctl_info *pci;
  185. struct mpc85xx_pci_pdata *pdata;
  186. struct resource r;
  187. int res = 0;
  188. if (!devres_open_group(&op->dev, mpc85xx_pci_err_probe, GFP_KERNEL))
  189. return -ENOMEM;
  190. pci = edac_pci_alloc_ctl_info(sizeof(*pdata), "mpc85xx_pci_err");
  191. if (!pci)
  192. return -ENOMEM;
  193. /* make sure error reporting method is sane */
  194. switch (edac_op_state) {
  195. case EDAC_OPSTATE_POLL:
  196. case EDAC_OPSTATE_INT:
  197. break;
  198. default:
  199. edac_op_state = EDAC_OPSTATE_INT;
  200. break;
  201. }
  202. pdata = pci->pvt_info;
  203. pdata->name = "mpc85xx_pci_err";
  204. pdata->irq = NO_IRQ;
  205. dev_set_drvdata(&op->dev, pci);
  206. pci->dev = &op->dev;
  207. pci->mod_name = EDAC_MOD_STR;
  208. pci->ctl_name = pdata->name;
  209. pci->dev_name = dev_name(&op->dev);
  210. if (edac_op_state == EDAC_OPSTATE_POLL)
  211. pci->edac_check = mpc85xx_pci_check;
  212. pdata->edac_idx = edac_pci_idx++;
  213. res = of_address_to_resource(op->dev.of_node, 0, &r);
  214. if (res) {
  215. printk(KERN_ERR "%s: Unable to get resource for "
  216. "PCI err regs\n", __func__);
  217. goto err;
  218. }
  219. /* we only need the error registers */
  220. r.start += 0xe00;
  221. if (!devm_request_mem_region(&op->dev, r.start, resource_size(&r),
  222. pdata->name)) {
  223. printk(KERN_ERR "%s: Error while requesting mem region\n",
  224. __func__);
  225. res = -EBUSY;
  226. goto err;
  227. }
  228. pdata->pci_vbase = devm_ioremap(&op->dev, r.start, resource_size(&r));
  229. if (!pdata->pci_vbase) {
  230. printk(KERN_ERR "%s: Unable to setup PCI err regs\n", __func__);
  231. res = -ENOMEM;
  232. goto err;
  233. }
  234. orig_pci_err_cap_dr =
  235. in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_CAP_DR);
  236. /* PCI master abort is expected during config cycles */
  237. out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_CAP_DR, 0x40);
  238. orig_pci_err_en = in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_EN);
  239. /* disable master abort reporting */
  240. out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_EN, ~0x40);
  241. /* clear error bits */
  242. out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_DR, ~0);
  243. if (edac_pci_add_device(pci, pdata->edac_idx) > 0) {
  244. edac_dbg(3, "failed edac_pci_add_device()\n");
  245. goto err;
  246. }
  247. if (edac_op_state == EDAC_OPSTATE_INT) {
  248. pdata->irq = irq_of_parse_and_map(op->dev.of_node, 0);
  249. res = devm_request_irq(&op->dev, pdata->irq,
  250. mpc85xx_pci_isr, IRQF_DISABLED,
  251. "[EDAC] PCI err", pci);
  252. if (res < 0) {
  253. printk(KERN_ERR
  254. "%s: Unable to request irq %d for "
  255. "MPC85xx PCI err\n", __func__, pdata->irq);
  256. irq_dispose_mapping(pdata->irq);
  257. res = -ENODEV;
  258. goto err2;
  259. }
  260. printk(KERN_INFO EDAC_MOD_STR " acquired irq %d for PCI Err\n",
  261. pdata->irq);
  262. }
  263. devres_remove_group(&op->dev, mpc85xx_pci_err_probe);
  264. edac_dbg(3, "success\n");
  265. printk(KERN_INFO EDAC_MOD_STR " PCI err registered\n");
  266. return 0;
  267. err2:
  268. edac_pci_del_device(&op->dev);
  269. err:
  270. edac_pci_free_ctl_info(pci);
  271. devres_release_group(&op->dev, mpc85xx_pci_err_probe);
  272. return res;
  273. }
  274. EXPORT_SYMBOL(mpc85xx_pci_err_probe);
  275. static int mpc85xx_pci_err_remove(struct platform_device *op)
  276. {
  277. struct edac_pci_ctl_info *pci = dev_get_drvdata(&op->dev);
  278. struct mpc85xx_pci_pdata *pdata = pci->pvt_info;
  279. edac_dbg(0, "\n");
  280. out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_CAP_DR,
  281. orig_pci_err_cap_dr);
  282. out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_EN, orig_pci_err_en);
  283. edac_pci_del_device(pci->dev);
  284. if (edac_op_state == EDAC_OPSTATE_INT)
  285. irq_dispose_mapping(pdata->irq);
  286. edac_pci_free_ctl_info(pci);
  287. return 0;
  288. }
  289. #endif /* CONFIG_PCI */
  290. /**************************** L2 Err device ***************************/
  291. /************************ L2 SYSFS parts ***********************************/
  292. static ssize_t mpc85xx_l2_inject_data_hi_show(struct edac_device_ctl_info
  293. *edac_dev, char *data)
  294. {
  295. struct mpc85xx_l2_pdata *pdata = edac_dev->pvt_info;
  296. return sprintf(data, "0x%08x",
  297. in_be32(pdata->l2_vbase + MPC85XX_L2_ERRINJHI));
  298. }
  299. static ssize_t mpc85xx_l2_inject_data_lo_show(struct edac_device_ctl_info
  300. *edac_dev, char *data)
  301. {
  302. struct mpc85xx_l2_pdata *pdata = edac_dev->pvt_info;
  303. return sprintf(data, "0x%08x",
  304. in_be32(pdata->l2_vbase + MPC85XX_L2_ERRINJLO));
  305. }
  306. static ssize_t mpc85xx_l2_inject_ctrl_show(struct edac_device_ctl_info
  307. *edac_dev, char *data)
  308. {
  309. struct mpc85xx_l2_pdata *pdata = edac_dev->pvt_info;
  310. return sprintf(data, "0x%08x",
  311. in_be32(pdata->l2_vbase + MPC85XX_L2_ERRINJCTL));
  312. }
  313. static ssize_t mpc85xx_l2_inject_data_hi_store(struct edac_device_ctl_info
  314. *edac_dev, const char *data,
  315. size_t count)
  316. {
  317. struct mpc85xx_l2_pdata *pdata = edac_dev->pvt_info;
  318. if (isdigit(*data)) {
  319. out_be32(pdata->l2_vbase + MPC85XX_L2_ERRINJHI,
  320. simple_strtoul(data, NULL, 0));
  321. return count;
  322. }
  323. return 0;
  324. }
  325. static ssize_t mpc85xx_l2_inject_data_lo_store(struct edac_device_ctl_info
  326. *edac_dev, const char *data,
  327. size_t count)
  328. {
  329. struct mpc85xx_l2_pdata *pdata = edac_dev->pvt_info;
  330. if (isdigit(*data)) {
  331. out_be32(pdata->l2_vbase + MPC85XX_L2_ERRINJLO,
  332. simple_strtoul(data, NULL, 0));
  333. return count;
  334. }
  335. return 0;
  336. }
  337. static ssize_t mpc85xx_l2_inject_ctrl_store(struct edac_device_ctl_info
  338. *edac_dev, const char *data,
  339. size_t count)
  340. {
  341. struct mpc85xx_l2_pdata *pdata = edac_dev->pvt_info;
  342. if (isdigit(*data)) {
  343. out_be32(pdata->l2_vbase + MPC85XX_L2_ERRINJCTL,
  344. simple_strtoul(data, NULL, 0));
  345. return count;
  346. }
  347. return 0;
  348. }
  349. static struct edac_dev_sysfs_attribute mpc85xx_l2_sysfs_attributes[] = {
  350. {
  351. .attr = {
  352. .name = "inject_data_hi",
  353. .mode = (S_IRUGO | S_IWUSR)
  354. },
  355. .show = mpc85xx_l2_inject_data_hi_show,
  356. .store = mpc85xx_l2_inject_data_hi_store},
  357. {
  358. .attr = {
  359. .name = "inject_data_lo",
  360. .mode = (S_IRUGO | S_IWUSR)
  361. },
  362. .show = mpc85xx_l2_inject_data_lo_show,
  363. .store = mpc85xx_l2_inject_data_lo_store},
  364. {
  365. .attr = {
  366. .name = "inject_ctrl",
  367. .mode = (S_IRUGO | S_IWUSR)
  368. },
  369. .show = mpc85xx_l2_inject_ctrl_show,
  370. .store = mpc85xx_l2_inject_ctrl_store},
  371. /* End of list */
  372. {
  373. .attr = {.name = NULL}
  374. }
  375. };
  376. static void mpc85xx_set_l2_sysfs_attributes(struct edac_device_ctl_info
  377. *edac_dev)
  378. {
  379. edac_dev->sysfs_attributes = mpc85xx_l2_sysfs_attributes;
  380. }
  381. /***************************** L2 ops ***********************************/
  382. static void mpc85xx_l2_check(struct edac_device_ctl_info *edac_dev)
  383. {
  384. struct mpc85xx_l2_pdata *pdata = edac_dev->pvt_info;
  385. u32 err_detect;
  386. err_detect = in_be32(pdata->l2_vbase + MPC85XX_L2_ERRDET);
  387. if (!(err_detect & L2_EDE_MASK))
  388. return;
  389. printk(KERN_ERR "ECC Error in CPU L2 cache\n");
  390. printk(KERN_ERR "L2 Error Detect Register: 0x%08x\n", err_detect);
  391. printk(KERN_ERR "L2 Error Capture Data High Register: 0x%08x\n",
  392. in_be32(pdata->l2_vbase + MPC85XX_L2_CAPTDATAHI));
  393. printk(KERN_ERR "L2 Error Capture Data Lo Register: 0x%08x\n",
  394. in_be32(pdata->l2_vbase + MPC85XX_L2_CAPTDATALO));
  395. printk(KERN_ERR "L2 Error Syndrome Register: 0x%08x\n",
  396. in_be32(pdata->l2_vbase + MPC85XX_L2_CAPTECC));
  397. printk(KERN_ERR "L2 Error Attributes Capture Register: 0x%08x\n",
  398. in_be32(pdata->l2_vbase + MPC85XX_L2_ERRATTR));
  399. printk(KERN_ERR "L2 Error Address Capture Register: 0x%08x\n",
  400. in_be32(pdata->l2_vbase + MPC85XX_L2_ERRADDR));
  401. /* clear error detect register */
  402. out_be32(pdata->l2_vbase + MPC85XX_L2_ERRDET, err_detect);
  403. if (err_detect & L2_EDE_CE_MASK)
  404. edac_device_handle_ce(edac_dev, 0, 0, edac_dev->ctl_name);
  405. if (err_detect & L2_EDE_UE_MASK)
  406. edac_device_handle_ue(edac_dev, 0, 0, edac_dev->ctl_name);
  407. }
  408. static irqreturn_t mpc85xx_l2_isr(int irq, void *dev_id)
  409. {
  410. struct edac_device_ctl_info *edac_dev = dev_id;
  411. struct mpc85xx_l2_pdata *pdata = edac_dev->pvt_info;
  412. u32 err_detect;
  413. err_detect = in_be32(pdata->l2_vbase + MPC85XX_L2_ERRDET);
  414. if (!(err_detect & L2_EDE_MASK))
  415. return IRQ_NONE;
  416. mpc85xx_l2_check(edac_dev);
  417. return IRQ_HANDLED;
  418. }
  419. static int mpc85xx_l2_err_probe(struct platform_device *op)
  420. {
  421. struct edac_device_ctl_info *edac_dev;
  422. struct mpc85xx_l2_pdata *pdata;
  423. struct resource r;
  424. int res;
  425. if (!devres_open_group(&op->dev, mpc85xx_l2_err_probe, GFP_KERNEL))
  426. return -ENOMEM;
  427. edac_dev = edac_device_alloc_ctl_info(sizeof(*pdata),
  428. "cpu", 1, "L", 1, 2, NULL, 0,
  429. edac_dev_idx);
  430. if (!edac_dev) {
  431. devres_release_group(&op->dev, mpc85xx_l2_err_probe);
  432. return -ENOMEM;
  433. }
  434. pdata = edac_dev->pvt_info;
  435. pdata->name = "mpc85xx_l2_err";
  436. pdata->irq = NO_IRQ;
  437. edac_dev->dev = &op->dev;
  438. dev_set_drvdata(edac_dev->dev, edac_dev);
  439. edac_dev->ctl_name = pdata->name;
  440. edac_dev->dev_name = pdata->name;
  441. res = of_address_to_resource(op->dev.of_node, 0, &r);
  442. if (res) {
  443. printk(KERN_ERR "%s: Unable to get resource for "
  444. "L2 err regs\n", __func__);
  445. goto err;
  446. }
  447. /* we only need the error registers */
  448. r.start += 0xe00;
  449. if (!devm_request_mem_region(&op->dev, r.start, resource_size(&r),
  450. pdata->name)) {
  451. printk(KERN_ERR "%s: Error while requesting mem region\n",
  452. __func__);
  453. res = -EBUSY;
  454. goto err;
  455. }
  456. pdata->l2_vbase = devm_ioremap(&op->dev, r.start, resource_size(&r));
  457. if (!pdata->l2_vbase) {
  458. printk(KERN_ERR "%s: Unable to setup L2 err regs\n", __func__);
  459. res = -ENOMEM;
  460. goto err;
  461. }
  462. out_be32(pdata->l2_vbase + MPC85XX_L2_ERRDET, ~0);
  463. orig_l2_err_disable = in_be32(pdata->l2_vbase + MPC85XX_L2_ERRDIS);
  464. /* clear the err_dis */
  465. out_be32(pdata->l2_vbase + MPC85XX_L2_ERRDIS, 0);
  466. edac_dev->mod_name = EDAC_MOD_STR;
  467. if (edac_op_state == EDAC_OPSTATE_POLL)
  468. edac_dev->edac_check = mpc85xx_l2_check;
  469. mpc85xx_set_l2_sysfs_attributes(edac_dev);
  470. pdata->edac_idx = edac_dev_idx++;
  471. if (edac_device_add_device(edac_dev) > 0) {
  472. edac_dbg(3, "failed edac_device_add_device()\n");
  473. goto err;
  474. }
  475. if (edac_op_state == EDAC_OPSTATE_INT) {
  476. pdata->irq = irq_of_parse_and_map(op->dev.of_node, 0);
  477. res = devm_request_irq(&op->dev, pdata->irq,
  478. mpc85xx_l2_isr, IRQF_DISABLED,
  479. "[EDAC] L2 err", edac_dev);
  480. if (res < 0) {
  481. printk(KERN_ERR
  482. "%s: Unable to request irq %d for "
  483. "MPC85xx L2 err\n", __func__, pdata->irq);
  484. irq_dispose_mapping(pdata->irq);
  485. res = -ENODEV;
  486. goto err2;
  487. }
  488. printk(KERN_INFO EDAC_MOD_STR " acquired irq %d for L2 Err\n",
  489. pdata->irq);
  490. edac_dev->op_state = OP_RUNNING_INTERRUPT;
  491. out_be32(pdata->l2_vbase + MPC85XX_L2_ERRINTEN, L2_EIE_MASK);
  492. }
  493. devres_remove_group(&op->dev, mpc85xx_l2_err_probe);
  494. edac_dbg(3, "success\n");
  495. printk(KERN_INFO EDAC_MOD_STR " L2 err registered\n");
  496. return 0;
  497. err2:
  498. edac_device_del_device(&op->dev);
  499. err:
  500. devres_release_group(&op->dev, mpc85xx_l2_err_probe);
  501. edac_device_free_ctl_info(edac_dev);
  502. return res;
  503. }
  504. static int mpc85xx_l2_err_remove(struct platform_device *op)
  505. {
  506. struct edac_device_ctl_info *edac_dev = dev_get_drvdata(&op->dev);
  507. struct mpc85xx_l2_pdata *pdata = edac_dev->pvt_info;
  508. edac_dbg(0, "\n");
  509. if (edac_op_state == EDAC_OPSTATE_INT) {
  510. out_be32(pdata->l2_vbase + MPC85XX_L2_ERRINTEN, 0);
  511. irq_dispose_mapping(pdata->irq);
  512. }
  513. out_be32(pdata->l2_vbase + MPC85XX_L2_ERRDIS, orig_l2_err_disable);
  514. edac_device_del_device(&op->dev);
  515. edac_device_free_ctl_info(edac_dev);
  516. return 0;
  517. }
  518. static struct of_device_id mpc85xx_l2_err_of_match[] = {
  519. /* deprecate the fsl,85.. forms in the future, 2.6.30? */
  520. { .compatible = "fsl,8540-l2-cache-controller", },
  521. { .compatible = "fsl,8541-l2-cache-controller", },
  522. { .compatible = "fsl,8544-l2-cache-controller", },
  523. { .compatible = "fsl,8548-l2-cache-controller", },
  524. { .compatible = "fsl,8555-l2-cache-controller", },
  525. { .compatible = "fsl,8568-l2-cache-controller", },
  526. { .compatible = "fsl,mpc8536-l2-cache-controller", },
  527. { .compatible = "fsl,mpc8540-l2-cache-controller", },
  528. { .compatible = "fsl,mpc8541-l2-cache-controller", },
  529. { .compatible = "fsl,mpc8544-l2-cache-controller", },
  530. { .compatible = "fsl,mpc8548-l2-cache-controller", },
  531. { .compatible = "fsl,mpc8555-l2-cache-controller", },
  532. { .compatible = "fsl,mpc8560-l2-cache-controller", },
  533. { .compatible = "fsl,mpc8568-l2-cache-controller", },
  534. { .compatible = "fsl,mpc8569-l2-cache-controller", },
  535. { .compatible = "fsl,mpc8572-l2-cache-controller", },
  536. { .compatible = "fsl,p1020-l2-cache-controller", },
  537. { .compatible = "fsl,p1021-l2-cache-controller", },
  538. { .compatible = "fsl,p2020-l2-cache-controller", },
  539. {},
  540. };
  541. MODULE_DEVICE_TABLE(of, mpc85xx_l2_err_of_match);
  542. static struct platform_driver mpc85xx_l2_err_driver = {
  543. .probe = mpc85xx_l2_err_probe,
  544. .remove = mpc85xx_l2_err_remove,
  545. .driver = {
  546. .name = "mpc85xx_l2_err",
  547. .owner = THIS_MODULE,
  548. .of_match_table = mpc85xx_l2_err_of_match,
  549. },
  550. };
  551. /**************************** MC Err device ***************************/
  552. /*
  553. * Taken from table 8-55 in the MPC8641 User's Manual and/or 9-61 in the
  554. * MPC8572 User's Manual. Each line represents a syndrome bit column as a
  555. * 64-bit value, but split into an upper and lower 32-bit chunk. The labels
  556. * below correspond to Freescale's manuals.
  557. */
  558. static unsigned int ecc_table[16] = {
  559. /* MSB LSB */
  560. /* [0:31] [32:63] */
  561. 0xf00fe11e, 0xc33c0ff7, /* Syndrome bit 7 */
  562. 0x00ff00ff, 0x00fff0ff,
  563. 0x0f0f0f0f, 0x0f0fff00,
  564. 0x11113333, 0x7777000f,
  565. 0x22224444, 0x8888222f,
  566. 0x44448888, 0xffff4441,
  567. 0x8888ffff, 0x11118882,
  568. 0xffff1111, 0x22221114, /* Syndrome bit 0 */
  569. };
  570. /*
  571. * Calculate the correct ECC value for a 64-bit value specified by high:low
  572. */
  573. static u8 calculate_ecc(u32 high, u32 low)
  574. {
  575. u32 mask_low;
  576. u32 mask_high;
  577. int bit_cnt;
  578. u8 ecc = 0;
  579. int i;
  580. int j;
  581. for (i = 0; i < 8; i++) {
  582. mask_high = ecc_table[i * 2];
  583. mask_low = ecc_table[i * 2 + 1];
  584. bit_cnt = 0;
  585. for (j = 0; j < 32; j++) {
  586. if ((mask_high >> j) & 1)
  587. bit_cnt ^= (high >> j) & 1;
  588. if ((mask_low >> j) & 1)
  589. bit_cnt ^= (low >> j) & 1;
  590. }
  591. ecc |= bit_cnt << i;
  592. }
  593. return ecc;
  594. }
  595. /*
  596. * Create the syndrome code which is generated if the data line specified by
  597. * 'bit' failed. Eg generate an 8-bit codes seen in Table 8-55 in the MPC8641
  598. * User's Manual and 9-61 in the MPC8572 User's Manual.
  599. */
  600. static u8 syndrome_from_bit(unsigned int bit) {
  601. int i;
  602. u8 syndrome = 0;
  603. /*
  604. * Cycle through the upper or lower 32-bit portion of each value in
  605. * ecc_table depending on if 'bit' is in the upper or lower half of
  606. * 64-bit data.
  607. */
  608. for (i = bit < 32; i < 16; i += 2)
  609. syndrome |= ((ecc_table[i] >> (bit % 32)) & 1) << (i / 2);
  610. return syndrome;
  611. }
  612. /*
  613. * Decode data and ecc syndrome to determine what went wrong
  614. * Note: This can only decode single-bit errors
  615. */
  616. static void sbe_ecc_decode(u32 cap_high, u32 cap_low, u32 cap_ecc,
  617. int *bad_data_bit, int *bad_ecc_bit)
  618. {
  619. int i;
  620. u8 syndrome;
  621. *bad_data_bit = -1;
  622. *bad_ecc_bit = -1;
  623. /*
  624. * Calculate the ECC of the captured data and XOR it with the captured
  625. * ECC to find an ECC syndrome value we can search for
  626. */
  627. syndrome = calculate_ecc(cap_high, cap_low) ^ cap_ecc;
  628. /* Check if a data line is stuck... */
  629. for (i = 0; i < 64; i++) {
  630. if (syndrome == syndrome_from_bit(i)) {
  631. *bad_data_bit = i;
  632. return;
  633. }
  634. }
  635. /* If data is correct, check ECC bits for errors... */
  636. for (i = 0; i < 8; i++) {
  637. if ((syndrome >> i) & 0x1) {
  638. *bad_ecc_bit = i;
  639. return;
  640. }
  641. }
  642. }
  643. static void mpc85xx_mc_check(struct mem_ctl_info *mci)
  644. {
  645. struct mpc85xx_mc_pdata *pdata = mci->pvt_info;
  646. struct csrow_info *csrow;
  647. u32 bus_width;
  648. u32 err_detect;
  649. u32 syndrome;
  650. u32 err_addr;
  651. u32 pfn;
  652. int row_index;
  653. u32 cap_high;
  654. u32 cap_low;
  655. int bad_data_bit;
  656. int bad_ecc_bit;
  657. err_detect = in_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DETECT);
  658. if (!err_detect)
  659. return;
  660. mpc85xx_mc_printk(mci, KERN_ERR, "Err Detect Register: %#8.8x\n",
  661. err_detect);
  662. /* no more processing if not ECC bit errors */
  663. if (!(err_detect & (DDR_EDE_SBE | DDR_EDE_MBE))) {
  664. out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DETECT, err_detect);
  665. return;
  666. }
  667. syndrome = in_be32(pdata->mc_vbase + MPC85XX_MC_CAPTURE_ECC);
  668. /* Mask off appropriate bits of syndrome based on bus width */
  669. bus_width = (in_be32(pdata->mc_vbase + MPC85XX_MC_DDR_SDRAM_CFG) &
  670. DSC_DBW_MASK) ? 32 : 64;
  671. if (bus_width == 64)
  672. syndrome &= 0xff;
  673. else
  674. syndrome &= 0xffff;
  675. err_addr = in_be32(pdata->mc_vbase + MPC85XX_MC_CAPTURE_ADDRESS);
  676. pfn = err_addr >> PAGE_SHIFT;
  677. for (row_index = 0; row_index < mci->nr_csrows; row_index++) {
  678. csrow = mci->csrows[row_index];
  679. if ((pfn >= csrow->first_page) && (pfn <= csrow->last_page))
  680. break;
  681. }
  682. cap_high = in_be32(pdata->mc_vbase + MPC85XX_MC_CAPTURE_DATA_HI);
  683. cap_low = in_be32(pdata->mc_vbase + MPC85XX_MC_CAPTURE_DATA_LO);
  684. /*
  685. * Analyze single-bit errors on 64-bit wide buses
  686. * TODO: Add support for 32-bit wide buses
  687. */
  688. if ((err_detect & DDR_EDE_SBE) && (bus_width == 64)) {
  689. sbe_ecc_decode(cap_high, cap_low, syndrome,
  690. &bad_data_bit, &bad_ecc_bit);
  691. if (bad_data_bit != -1)
  692. mpc85xx_mc_printk(mci, KERN_ERR,
  693. "Faulty Data bit: %d\n", bad_data_bit);
  694. if (bad_ecc_bit != -1)
  695. mpc85xx_mc_printk(mci, KERN_ERR,
  696. "Faulty ECC bit: %d\n", bad_ecc_bit);
  697. mpc85xx_mc_printk(mci, KERN_ERR,
  698. "Expected Data / ECC:\t%#8.8x_%08x / %#2.2x\n",
  699. cap_high ^ (1 << (bad_data_bit - 32)),
  700. cap_low ^ (1 << bad_data_bit),
  701. syndrome ^ (1 << bad_ecc_bit));
  702. }
  703. mpc85xx_mc_printk(mci, KERN_ERR,
  704. "Captured Data / ECC:\t%#8.8x_%08x / %#2.2x\n",
  705. cap_high, cap_low, syndrome);
  706. mpc85xx_mc_printk(mci, KERN_ERR, "Err addr: %#8.8x\n", err_addr);
  707. mpc85xx_mc_printk(mci, KERN_ERR, "PFN: %#8.8x\n", pfn);
  708. /* we are out of range */
  709. if (row_index == mci->nr_csrows)
  710. mpc85xx_mc_printk(mci, KERN_ERR, "PFN out of range!\n");
  711. if (err_detect & DDR_EDE_SBE)
  712. edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
  713. pfn, err_addr & ~PAGE_MASK, syndrome,
  714. row_index, 0, -1,
  715. mci->ctl_name, "");
  716. if (err_detect & DDR_EDE_MBE)
  717. edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
  718. pfn, err_addr & ~PAGE_MASK, syndrome,
  719. row_index, 0, -1,
  720. mci->ctl_name, "");
  721. out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DETECT, err_detect);
  722. }
  723. static irqreturn_t mpc85xx_mc_isr(int irq, void *dev_id)
  724. {
  725. struct mem_ctl_info *mci = dev_id;
  726. struct mpc85xx_mc_pdata *pdata = mci->pvt_info;
  727. u32 err_detect;
  728. err_detect = in_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DETECT);
  729. if (!err_detect)
  730. return IRQ_NONE;
  731. mpc85xx_mc_check(mci);
  732. return IRQ_HANDLED;
  733. }
  734. static void mpc85xx_init_csrows(struct mem_ctl_info *mci)
  735. {
  736. struct mpc85xx_mc_pdata *pdata = mci->pvt_info;
  737. struct csrow_info *csrow;
  738. struct dimm_info *dimm;
  739. u32 sdram_ctl;
  740. u32 sdtype;
  741. enum mem_type mtype;
  742. u32 cs_bnds;
  743. int index;
  744. sdram_ctl = in_be32(pdata->mc_vbase + MPC85XX_MC_DDR_SDRAM_CFG);
  745. sdtype = sdram_ctl & DSC_SDTYPE_MASK;
  746. if (sdram_ctl & DSC_RD_EN) {
  747. switch (sdtype) {
  748. case DSC_SDTYPE_DDR:
  749. mtype = MEM_RDDR;
  750. break;
  751. case DSC_SDTYPE_DDR2:
  752. mtype = MEM_RDDR2;
  753. break;
  754. case DSC_SDTYPE_DDR3:
  755. mtype = MEM_RDDR3;
  756. break;
  757. default:
  758. mtype = MEM_UNKNOWN;
  759. break;
  760. }
  761. } else {
  762. switch (sdtype) {
  763. case DSC_SDTYPE_DDR:
  764. mtype = MEM_DDR;
  765. break;
  766. case DSC_SDTYPE_DDR2:
  767. mtype = MEM_DDR2;
  768. break;
  769. case DSC_SDTYPE_DDR3:
  770. mtype = MEM_DDR3;
  771. break;
  772. default:
  773. mtype = MEM_UNKNOWN;
  774. break;
  775. }
  776. }
  777. for (index = 0; index < mci->nr_csrows; index++) {
  778. u32 start;
  779. u32 end;
  780. csrow = mci->csrows[index];
  781. dimm = csrow->channels[0]->dimm;
  782. cs_bnds = in_be32(pdata->mc_vbase + MPC85XX_MC_CS_BNDS_0 +
  783. (index * MPC85XX_MC_CS_BNDS_OFS));
  784. start = (cs_bnds & 0xffff0000) >> 16;
  785. end = (cs_bnds & 0x0000ffff);
  786. if (start == end)
  787. continue; /* not populated */
  788. start <<= (24 - PAGE_SHIFT);
  789. end <<= (24 - PAGE_SHIFT);
  790. end |= (1 << (24 - PAGE_SHIFT)) - 1;
  791. csrow->first_page = start;
  792. csrow->last_page = end;
  793. dimm->nr_pages = end + 1 - start;
  794. dimm->grain = 8;
  795. dimm->mtype = mtype;
  796. dimm->dtype = DEV_UNKNOWN;
  797. if (sdram_ctl & DSC_X32_EN)
  798. dimm->dtype = DEV_X32;
  799. dimm->edac_mode = EDAC_SECDED;
  800. }
  801. }
  802. static int mpc85xx_mc_err_probe(struct platform_device *op)
  803. {
  804. struct mem_ctl_info *mci;
  805. struct edac_mc_layer layers[2];
  806. struct mpc85xx_mc_pdata *pdata;
  807. struct resource r;
  808. u32 sdram_ctl;
  809. int res;
  810. if (!devres_open_group(&op->dev, mpc85xx_mc_err_probe, GFP_KERNEL))
  811. return -ENOMEM;
  812. layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
  813. layers[0].size = 4;
  814. layers[0].is_virt_csrow = true;
  815. layers[1].type = EDAC_MC_LAYER_CHANNEL;
  816. layers[1].size = 1;
  817. layers[1].is_virt_csrow = false;
  818. mci = edac_mc_alloc(edac_mc_idx, ARRAY_SIZE(layers), layers,
  819. sizeof(*pdata));
  820. if (!mci) {
  821. devres_release_group(&op->dev, mpc85xx_mc_err_probe);
  822. return -ENOMEM;
  823. }
  824. pdata = mci->pvt_info;
  825. pdata->name = "mpc85xx_mc_err";
  826. pdata->irq = NO_IRQ;
  827. mci->pdev = &op->dev;
  828. pdata->edac_idx = edac_mc_idx++;
  829. dev_set_drvdata(mci->pdev, mci);
  830. mci->ctl_name = pdata->name;
  831. mci->dev_name = pdata->name;
  832. res = of_address_to_resource(op->dev.of_node, 0, &r);
  833. if (res) {
  834. printk(KERN_ERR "%s: Unable to get resource for MC err regs\n",
  835. __func__);
  836. goto err;
  837. }
  838. if (!devm_request_mem_region(&op->dev, r.start, resource_size(&r),
  839. pdata->name)) {
  840. printk(KERN_ERR "%s: Error while requesting mem region\n",
  841. __func__);
  842. res = -EBUSY;
  843. goto err;
  844. }
  845. pdata->mc_vbase = devm_ioremap(&op->dev, r.start, resource_size(&r));
  846. if (!pdata->mc_vbase) {
  847. printk(KERN_ERR "%s: Unable to setup MC err regs\n", __func__);
  848. res = -ENOMEM;
  849. goto err;
  850. }
  851. sdram_ctl = in_be32(pdata->mc_vbase + MPC85XX_MC_DDR_SDRAM_CFG);
  852. if (!(sdram_ctl & DSC_ECC_EN)) {
  853. /* no ECC */
  854. printk(KERN_WARNING "%s: No ECC DIMMs discovered\n", __func__);
  855. res = -ENODEV;
  856. goto err;
  857. }
  858. edac_dbg(3, "init mci\n");
  859. mci->mtype_cap = MEM_FLAG_RDDR | MEM_FLAG_RDDR2 |
  860. MEM_FLAG_DDR | MEM_FLAG_DDR2;
  861. mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
  862. mci->edac_cap = EDAC_FLAG_SECDED;
  863. mci->mod_name = EDAC_MOD_STR;
  864. mci->mod_ver = MPC85XX_REVISION;
  865. if (edac_op_state == EDAC_OPSTATE_POLL)
  866. mci->edac_check = mpc85xx_mc_check;
  867. mci->ctl_page_to_phys = NULL;
  868. mci->scrub_mode = SCRUB_SW_SRC;
  869. mpc85xx_init_csrows(mci);
  870. /* store the original error disable bits */
  871. orig_ddr_err_disable =
  872. in_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DISABLE);
  873. out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DISABLE, 0);
  874. /* clear all error bits */
  875. out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DETECT, ~0);
  876. if (edac_mc_add_mc(mci)) {
  877. edac_dbg(3, "failed edac_mc_add_mc()\n");
  878. goto err;
  879. }
  880. if (mpc85xx_create_sysfs_attributes(mci)) {
  881. edac_mc_del_mc(mci->pdev);
  882. edac_dbg(3, "failed edac_mc_add_mc()\n");
  883. goto err;
  884. }
  885. if (edac_op_state == EDAC_OPSTATE_INT) {
  886. out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_INT_EN,
  887. DDR_EIE_MBEE | DDR_EIE_SBEE);
  888. /* store the original error management threshold */
  889. orig_ddr_err_sbe = in_be32(pdata->mc_vbase +
  890. MPC85XX_MC_ERR_SBE) & 0xff0000;
  891. /* set threshold to 1 error per interrupt */
  892. out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_SBE, 0x10000);
  893. /* register interrupts */
  894. pdata->irq = irq_of_parse_and_map(op->dev.of_node, 0);
  895. res = devm_request_irq(&op->dev, pdata->irq,
  896. mpc85xx_mc_isr,
  897. IRQF_DISABLED | IRQF_SHARED,
  898. "[EDAC] MC err", mci);
  899. if (res < 0) {
  900. printk(KERN_ERR "%s: Unable to request irq %d for "
  901. "MPC85xx DRAM ERR\n", __func__, pdata->irq);
  902. irq_dispose_mapping(pdata->irq);
  903. res = -ENODEV;
  904. goto err2;
  905. }
  906. printk(KERN_INFO EDAC_MOD_STR " acquired irq %d for MC\n",
  907. pdata->irq);
  908. }
  909. devres_remove_group(&op->dev, mpc85xx_mc_err_probe);
  910. edac_dbg(3, "success\n");
  911. printk(KERN_INFO EDAC_MOD_STR " MC err registered\n");
  912. return 0;
  913. err2:
  914. edac_mc_del_mc(&op->dev);
  915. err:
  916. devres_release_group(&op->dev, mpc85xx_mc_err_probe);
  917. edac_mc_free(mci);
  918. return res;
  919. }
  920. static int mpc85xx_mc_err_remove(struct platform_device *op)
  921. {
  922. struct mem_ctl_info *mci = dev_get_drvdata(&op->dev);
  923. struct mpc85xx_mc_pdata *pdata = mci->pvt_info;
  924. edac_dbg(0, "\n");
  925. if (edac_op_state == EDAC_OPSTATE_INT) {
  926. out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_INT_EN, 0);
  927. irq_dispose_mapping(pdata->irq);
  928. }
  929. out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DISABLE,
  930. orig_ddr_err_disable);
  931. out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_SBE, orig_ddr_err_sbe);
  932. mpc85xx_remove_sysfs_attributes(mci);
  933. edac_mc_del_mc(&op->dev);
  934. edac_mc_free(mci);
  935. return 0;
  936. }
  937. static struct of_device_id mpc85xx_mc_err_of_match[] = {
  938. /* deprecate the fsl,85.. forms in the future, 2.6.30? */
  939. { .compatible = "fsl,8540-memory-controller", },
  940. { .compatible = "fsl,8541-memory-controller", },
  941. { .compatible = "fsl,8544-memory-controller", },
  942. { .compatible = "fsl,8548-memory-controller", },
  943. { .compatible = "fsl,8555-memory-controller", },
  944. { .compatible = "fsl,8568-memory-controller", },
  945. { .compatible = "fsl,mpc8536-memory-controller", },
  946. { .compatible = "fsl,mpc8540-memory-controller", },
  947. { .compatible = "fsl,mpc8541-memory-controller", },
  948. { .compatible = "fsl,mpc8544-memory-controller", },
  949. { .compatible = "fsl,mpc8548-memory-controller", },
  950. { .compatible = "fsl,mpc8555-memory-controller", },
  951. { .compatible = "fsl,mpc8560-memory-controller", },
  952. { .compatible = "fsl,mpc8568-memory-controller", },
  953. { .compatible = "fsl,mpc8569-memory-controller", },
  954. { .compatible = "fsl,mpc8572-memory-controller", },
  955. { .compatible = "fsl,mpc8349-memory-controller", },
  956. { .compatible = "fsl,p1020-memory-controller", },
  957. { .compatible = "fsl,p1021-memory-controller", },
  958. { .compatible = "fsl,p2020-memory-controller", },
  959. { .compatible = "fsl,qoriq-memory-controller", },
  960. {},
  961. };
  962. MODULE_DEVICE_TABLE(of, mpc85xx_mc_err_of_match);
  963. static struct platform_driver mpc85xx_mc_err_driver = {
  964. .probe = mpc85xx_mc_err_probe,
  965. .remove = mpc85xx_mc_err_remove,
  966. .driver = {
  967. .name = "mpc85xx_mc_err",
  968. .owner = THIS_MODULE,
  969. .of_match_table = mpc85xx_mc_err_of_match,
  970. },
  971. };
  972. #ifdef CONFIG_FSL_SOC_BOOKE
  973. static void __init mpc85xx_mc_clear_rfxe(void *data)
  974. {
  975. orig_hid1[smp_processor_id()] = mfspr(SPRN_HID1);
  976. mtspr(SPRN_HID1, (orig_hid1[smp_processor_id()] & ~HID1_RFXE));
  977. }
  978. #endif
  979. static int __init mpc85xx_mc_init(void)
  980. {
  981. int res = 0;
  982. u32 pvr = 0;
  983. printk(KERN_INFO "Freescale(R) MPC85xx EDAC driver, "
  984. "(C) 2006 Montavista Software\n");
  985. /* make sure error reporting method is sane */
  986. switch (edac_op_state) {
  987. case EDAC_OPSTATE_POLL:
  988. case EDAC_OPSTATE_INT:
  989. break;
  990. default:
  991. edac_op_state = EDAC_OPSTATE_INT;
  992. break;
  993. }
  994. res = platform_driver_register(&mpc85xx_mc_err_driver);
  995. if (res)
  996. printk(KERN_WARNING EDAC_MOD_STR "MC fails to register\n");
  997. res = platform_driver_register(&mpc85xx_l2_err_driver);
  998. if (res)
  999. printk(KERN_WARNING EDAC_MOD_STR "L2 fails to register\n");
  1000. #ifdef CONFIG_FSL_SOC_BOOKE
  1001. pvr = mfspr(SPRN_PVR);
  1002. if ((PVR_VER(pvr) == PVR_VER_E500V1) ||
  1003. (PVR_VER(pvr) == PVR_VER_E500V2)) {
  1004. /*
  1005. * need to clear HID1[RFXE] to disable machine check int
  1006. * so we can catch it
  1007. */
  1008. if (edac_op_state == EDAC_OPSTATE_INT)
  1009. on_each_cpu(mpc85xx_mc_clear_rfxe, NULL, 0);
  1010. }
  1011. #endif
  1012. return 0;
  1013. }
  1014. module_init(mpc85xx_mc_init);
  1015. #ifdef CONFIG_FSL_SOC_BOOKE
  1016. static void __exit mpc85xx_mc_restore_hid1(void *data)
  1017. {
  1018. mtspr(SPRN_HID1, orig_hid1[smp_processor_id()]);
  1019. }
  1020. #endif
  1021. static void __exit mpc85xx_mc_exit(void)
  1022. {
  1023. #ifdef CONFIG_FSL_SOC_BOOKE
  1024. u32 pvr = mfspr(SPRN_PVR);
  1025. if ((PVR_VER(pvr) == PVR_VER_E500V1) ||
  1026. (PVR_VER(pvr) == PVR_VER_E500V2)) {
  1027. on_each_cpu(mpc85xx_mc_restore_hid1, NULL, 0);
  1028. }
  1029. #endif
  1030. platform_driver_unregister(&mpc85xx_l2_err_driver);
  1031. platform_driver_unregister(&mpc85xx_mc_err_driver);
  1032. }
  1033. module_exit(mpc85xx_mc_exit);
  1034. MODULE_LICENSE("GPL");
  1035. MODULE_AUTHOR("Montavista Software, Inc.");
  1036. module_param(edac_op_state, int, 0444);
  1037. MODULE_PARM_DESC(edac_op_state,
  1038. "EDAC Error Reporting state: 0=Poll, 2=Interrupt");