mpc85xx_edac.c 34 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267
  1. /*
  2. * Freescale MPC85xx Memory Controller kenel module
  3. *
  4. * Author: Dave Jiang <djiang@mvista.com>
  5. *
  6. * 2006-2007 (c) MontaVista Software, Inc. This file is licensed under
  7. * the terms of the GNU General Public License version 2. This program
  8. * is licensed "as is" without any warranty of any kind, whether express
  9. * or implied.
  10. *
  11. */
  12. #include <linux/module.h>
  13. #include <linux/init.h>
  14. #include <linux/interrupt.h>
  15. #include <linux/ctype.h>
  16. #include <linux/io.h>
  17. #include <linux/mod_devicetable.h>
  18. #include <linux/edac.h>
  19. #include <linux/smp.h>
  20. #include <linux/gfp.h>
  21. #include <linux/of_platform.h>
  22. #include <linux/of_device.h>
  23. #include "edac_module.h"
  24. #include "edac_core.h"
  25. #include "mpc85xx_edac.h"
  26. static int edac_dev_idx;
  27. #ifdef CONFIG_PCI
  28. static int edac_pci_idx;
  29. #endif
  30. static int edac_mc_idx;
  31. static u32 orig_ddr_err_disable;
  32. static u32 orig_ddr_err_sbe;
  33. /*
  34. * PCI Err defines
  35. */
  36. #ifdef CONFIG_PCI
  37. static u32 orig_pci_err_cap_dr;
  38. static u32 orig_pci_err_en;
  39. #endif
  40. static u32 orig_l2_err_disable;
  41. #ifdef CONFIG_FSL_SOC_BOOKE
  42. static u32 orig_hid1[2];
  43. #endif
  44. /************************ MC SYSFS parts ***********************************/
  45. #define to_mci(k) container_of(k, struct mem_ctl_info, dev)
  46. static ssize_t mpc85xx_mc_inject_data_hi_show(struct device *dev,
  47. struct device_attribute *mattr,
  48. char *data)
  49. {
  50. struct mem_ctl_info *mci = to_mci(dev);
  51. struct mpc85xx_mc_pdata *pdata = mci->pvt_info;
  52. return sprintf(data, "0x%08x",
  53. in_be32(pdata->mc_vbase +
  54. MPC85XX_MC_DATA_ERR_INJECT_HI));
  55. }
  56. static ssize_t mpc85xx_mc_inject_data_lo_show(struct device *dev,
  57. struct device_attribute *mattr,
  58. char *data)
  59. {
  60. struct mem_ctl_info *mci = to_mci(dev);
  61. struct mpc85xx_mc_pdata *pdata = mci->pvt_info;
  62. return sprintf(data, "0x%08x",
  63. in_be32(pdata->mc_vbase +
  64. MPC85XX_MC_DATA_ERR_INJECT_LO));
  65. }
  66. static ssize_t mpc85xx_mc_inject_ctrl_show(struct device *dev,
  67. struct device_attribute *mattr,
  68. char *data)
  69. {
  70. struct mem_ctl_info *mci = to_mci(dev);
  71. struct mpc85xx_mc_pdata *pdata = mci->pvt_info;
  72. return sprintf(data, "0x%08x",
  73. in_be32(pdata->mc_vbase + MPC85XX_MC_ECC_ERR_INJECT));
  74. }
  75. static ssize_t mpc85xx_mc_inject_data_hi_store(struct device *dev,
  76. struct device_attribute *mattr,
  77. const char *data, size_t count)
  78. {
  79. struct mem_ctl_info *mci = to_mci(dev);
  80. struct mpc85xx_mc_pdata *pdata = mci->pvt_info;
  81. if (isdigit(*data)) {
  82. out_be32(pdata->mc_vbase + MPC85XX_MC_DATA_ERR_INJECT_HI,
  83. simple_strtoul(data, NULL, 0));
  84. return count;
  85. }
  86. return 0;
  87. }
  88. static ssize_t mpc85xx_mc_inject_data_lo_store(struct device *dev,
  89. struct device_attribute *mattr,
  90. const char *data, size_t count)
  91. {
  92. struct mem_ctl_info *mci = to_mci(dev);
  93. struct mpc85xx_mc_pdata *pdata = mci->pvt_info;
  94. if (isdigit(*data)) {
  95. out_be32(pdata->mc_vbase + MPC85XX_MC_DATA_ERR_INJECT_LO,
  96. simple_strtoul(data, NULL, 0));
  97. return count;
  98. }
  99. return 0;
  100. }
  101. static ssize_t mpc85xx_mc_inject_ctrl_store(struct device *dev,
  102. struct device_attribute *mattr,
  103. const char *data, size_t count)
  104. {
  105. struct mem_ctl_info *mci = to_mci(dev);
  106. struct mpc85xx_mc_pdata *pdata = mci->pvt_info;
  107. if (isdigit(*data)) {
  108. out_be32(pdata->mc_vbase + MPC85XX_MC_ECC_ERR_INJECT,
  109. simple_strtoul(data, NULL, 0));
  110. return count;
  111. }
  112. return 0;
  113. }
  114. DEVICE_ATTR(inject_data_hi, S_IRUGO | S_IWUSR,
  115. mpc85xx_mc_inject_data_hi_show, mpc85xx_mc_inject_data_hi_store);
  116. DEVICE_ATTR(inject_data_lo, S_IRUGO | S_IWUSR,
  117. mpc85xx_mc_inject_data_lo_show, mpc85xx_mc_inject_data_lo_store);
  118. DEVICE_ATTR(inject_ctrl, S_IRUGO | S_IWUSR,
  119. mpc85xx_mc_inject_ctrl_show, mpc85xx_mc_inject_ctrl_store);
  120. static int mpc85xx_create_sysfs_attributes(struct mem_ctl_info *mci)
  121. {
  122. int rc;
  123. rc = device_create_file(&mci->dev, &dev_attr_inject_data_hi);
  124. if (rc < 0)
  125. return rc;
  126. rc = device_create_file(&mci->dev, &dev_attr_inject_data_lo);
  127. if (rc < 0)
  128. return rc;
  129. rc = device_create_file(&mci->dev, &dev_attr_inject_ctrl);
  130. if (rc < 0)
  131. return rc;
  132. return 0;
  133. }
  134. static void mpc85xx_remove_sysfs_attributes(struct mem_ctl_info *mci)
  135. {
  136. device_remove_file(&mci->dev, &dev_attr_inject_data_hi);
  137. device_remove_file(&mci->dev, &dev_attr_inject_data_lo);
  138. device_remove_file(&mci->dev, &dev_attr_inject_ctrl);
  139. }
  140. /**************************** PCI Err device ***************************/
  141. #ifdef CONFIG_PCI
  142. static void mpc85xx_pci_check(struct edac_pci_ctl_info *pci)
  143. {
  144. struct mpc85xx_pci_pdata *pdata = pci->pvt_info;
  145. u32 err_detect;
  146. err_detect = in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_DR);
  147. /* master aborts can happen during PCI config cycles */
  148. if (!(err_detect & ~(PCI_EDE_MULTI_ERR | PCI_EDE_MST_ABRT))) {
  149. out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_DR, err_detect);
  150. return;
  151. }
  152. printk(KERN_ERR "PCI error(s) detected\n");
  153. printk(KERN_ERR "PCI/X ERR_DR register: %#08x\n", err_detect);
  154. printk(KERN_ERR "PCI/X ERR_ATTRIB register: %#08x\n",
  155. in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_ATTRIB));
  156. printk(KERN_ERR "PCI/X ERR_ADDR register: %#08x\n",
  157. in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_ADDR));
  158. printk(KERN_ERR "PCI/X ERR_EXT_ADDR register: %#08x\n",
  159. in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_EXT_ADDR));
  160. printk(KERN_ERR "PCI/X ERR_DL register: %#08x\n",
  161. in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_DL));
  162. printk(KERN_ERR "PCI/X ERR_DH register: %#08x\n",
  163. in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_DH));
  164. /* clear error bits */
  165. out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_DR, err_detect);
  166. if (err_detect & PCI_EDE_PERR_MASK)
  167. edac_pci_handle_pe(pci, pci->ctl_name);
  168. if ((err_detect & ~PCI_EDE_MULTI_ERR) & ~PCI_EDE_PERR_MASK)
  169. edac_pci_handle_npe(pci, pci->ctl_name);
  170. }
  171. static irqreturn_t mpc85xx_pci_isr(int irq, void *dev_id)
  172. {
  173. struct edac_pci_ctl_info *pci = dev_id;
  174. struct mpc85xx_pci_pdata *pdata = pci->pvt_info;
  175. u32 err_detect;
  176. err_detect = in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_DR);
  177. if (!err_detect)
  178. return IRQ_NONE;
  179. mpc85xx_pci_check(pci);
  180. return IRQ_HANDLED;
  181. }
  182. static int __devinit mpc85xx_pci_err_probe(struct platform_device *op)
  183. {
  184. struct edac_pci_ctl_info *pci;
  185. struct mpc85xx_pci_pdata *pdata;
  186. struct resource r;
  187. int res = 0;
  188. if (!devres_open_group(&op->dev, mpc85xx_pci_err_probe, GFP_KERNEL))
  189. return -ENOMEM;
  190. pci = edac_pci_alloc_ctl_info(sizeof(*pdata), "mpc85xx_pci_err");
  191. if (!pci)
  192. return -ENOMEM;
  193. pdata = pci->pvt_info;
  194. pdata->name = "mpc85xx_pci_err";
  195. pdata->irq = NO_IRQ;
  196. dev_set_drvdata(&op->dev, pci);
  197. pci->dev = &op->dev;
  198. pci->mod_name = EDAC_MOD_STR;
  199. pci->ctl_name = pdata->name;
  200. pci->dev_name = dev_name(&op->dev);
  201. if (edac_op_state == EDAC_OPSTATE_POLL)
  202. pci->edac_check = mpc85xx_pci_check;
  203. pdata->edac_idx = edac_pci_idx++;
  204. res = of_address_to_resource(op->dev.of_node, 0, &r);
  205. if (res) {
  206. printk(KERN_ERR "%s: Unable to get resource for "
  207. "PCI err regs\n", __func__);
  208. goto err;
  209. }
  210. /* we only need the error registers */
  211. r.start += 0xe00;
  212. if (!devm_request_mem_region(&op->dev, r.start, resource_size(&r),
  213. pdata->name)) {
  214. printk(KERN_ERR "%s: Error while requesting mem region\n",
  215. __func__);
  216. res = -EBUSY;
  217. goto err;
  218. }
  219. pdata->pci_vbase = devm_ioremap(&op->dev, r.start, resource_size(&r));
  220. if (!pdata->pci_vbase) {
  221. printk(KERN_ERR "%s: Unable to setup PCI err regs\n", __func__);
  222. res = -ENOMEM;
  223. goto err;
  224. }
  225. orig_pci_err_cap_dr =
  226. in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_CAP_DR);
  227. /* PCI master abort is expected during config cycles */
  228. out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_CAP_DR, 0x40);
  229. orig_pci_err_en = in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_EN);
  230. /* disable master abort reporting */
  231. out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_EN, ~0x40);
  232. /* clear error bits */
  233. out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_DR, ~0);
  234. if (edac_pci_add_device(pci, pdata->edac_idx) > 0) {
  235. debugf3("%s(): failed edac_pci_add_device()\n", __func__);
  236. goto err;
  237. }
  238. if (edac_op_state == EDAC_OPSTATE_INT) {
  239. pdata->irq = irq_of_parse_and_map(op->dev.of_node, 0);
  240. res = devm_request_irq(&op->dev, pdata->irq,
  241. mpc85xx_pci_isr, IRQF_DISABLED,
  242. "[EDAC] PCI err", pci);
  243. if (res < 0) {
  244. printk(KERN_ERR
  245. "%s: Unable to requiest irq %d for "
  246. "MPC85xx PCI err\n", __func__, pdata->irq);
  247. irq_dispose_mapping(pdata->irq);
  248. res = -ENODEV;
  249. goto err2;
  250. }
  251. printk(KERN_INFO EDAC_MOD_STR " acquired irq %d for PCI Err\n",
  252. pdata->irq);
  253. }
  254. devres_remove_group(&op->dev, mpc85xx_pci_err_probe);
  255. debugf3("%s(): success\n", __func__);
  256. printk(KERN_INFO EDAC_MOD_STR " PCI err registered\n");
  257. return 0;
  258. err2:
  259. edac_pci_del_device(&op->dev);
  260. err:
  261. edac_pci_free_ctl_info(pci);
  262. devres_release_group(&op->dev, mpc85xx_pci_err_probe);
  263. return res;
  264. }
  265. static int mpc85xx_pci_err_remove(struct platform_device *op)
  266. {
  267. struct edac_pci_ctl_info *pci = dev_get_drvdata(&op->dev);
  268. struct mpc85xx_pci_pdata *pdata = pci->pvt_info;
  269. debugf0("%s()\n", __func__);
  270. out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_CAP_DR,
  271. orig_pci_err_cap_dr);
  272. out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_EN, orig_pci_err_en);
  273. edac_pci_del_device(pci->dev);
  274. if (edac_op_state == EDAC_OPSTATE_INT)
  275. irq_dispose_mapping(pdata->irq);
  276. edac_pci_free_ctl_info(pci);
  277. return 0;
  278. }
  279. static struct of_device_id mpc85xx_pci_err_of_match[] = {
  280. {
  281. .compatible = "fsl,mpc8540-pcix",
  282. },
  283. {
  284. .compatible = "fsl,mpc8540-pci",
  285. },
  286. {},
  287. };
  288. MODULE_DEVICE_TABLE(of, mpc85xx_pci_err_of_match);
  289. static struct platform_driver mpc85xx_pci_err_driver = {
  290. .probe = mpc85xx_pci_err_probe,
  291. .remove = __devexit_p(mpc85xx_pci_err_remove),
  292. .driver = {
  293. .name = "mpc85xx_pci_err",
  294. .owner = THIS_MODULE,
  295. .of_match_table = mpc85xx_pci_err_of_match,
  296. },
  297. };
  298. #endif /* CONFIG_PCI */
  299. /**************************** L2 Err device ***************************/
  300. /************************ L2 SYSFS parts ***********************************/
  301. static ssize_t mpc85xx_l2_inject_data_hi_show(struct edac_device_ctl_info
  302. *edac_dev, char *data)
  303. {
  304. struct mpc85xx_l2_pdata *pdata = edac_dev->pvt_info;
  305. return sprintf(data, "0x%08x",
  306. in_be32(pdata->l2_vbase + MPC85XX_L2_ERRINJHI));
  307. }
  308. static ssize_t mpc85xx_l2_inject_data_lo_show(struct edac_device_ctl_info
  309. *edac_dev, char *data)
  310. {
  311. struct mpc85xx_l2_pdata *pdata = edac_dev->pvt_info;
  312. return sprintf(data, "0x%08x",
  313. in_be32(pdata->l2_vbase + MPC85XX_L2_ERRINJLO));
  314. }
  315. static ssize_t mpc85xx_l2_inject_ctrl_show(struct edac_device_ctl_info
  316. *edac_dev, char *data)
  317. {
  318. struct mpc85xx_l2_pdata *pdata = edac_dev->pvt_info;
  319. return sprintf(data, "0x%08x",
  320. in_be32(pdata->l2_vbase + MPC85XX_L2_ERRINJCTL));
  321. }
  322. static ssize_t mpc85xx_l2_inject_data_hi_store(struct edac_device_ctl_info
  323. *edac_dev, const char *data,
  324. size_t count)
  325. {
  326. struct mpc85xx_l2_pdata *pdata = edac_dev->pvt_info;
  327. if (isdigit(*data)) {
  328. out_be32(pdata->l2_vbase + MPC85XX_L2_ERRINJHI,
  329. simple_strtoul(data, NULL, 0));
  330. return count;
  331. }
  332. return 0;
  333. }
  334. static ssize_t mpc85xx_l2_inject_data_lo_store(struct edac_device_ctl_info
  335. *edac_dev, const char *data,
  336. size_t count)
  337. {
  338. struct mpc85xx_l2_pdata *pdata = edac_dev->pvt_info;
  339. if (isdigit(*data)) {
  340. out_be32(pdata->l2_vbase + MPC85XX_L2_ERRINJLO,
  341. simple_strtoul(data, NULL, 0));
  342. return count;
  343. }
  344. return 0;
  345. }
  346. static ssize_t mpc85xx_l2_inject_ctrl_store(struct edac_device_ctl_info
  347. *edac_dev, const char *data,
  348. size_t count)
  349. {
  350. struct mpc85xx_l2_pdata *pdata = edac_dev->pvt_info;
  351. if (isdigit(*data)) {
  352. out_be32(pdata->l2_vbase + MPC85XX_L2_ERRINJCTL,
  353. simple_strtoul(data, NULL, 0));
  354. return count;
  355. }
  356. return 0;
  357. }
  358. static struct edac_dev_sysfs_attribute mpc85xx_l2_sysfs_attributes[] = {
  359. {
  360. .attr = {
  361. .name = "inject_data_hi",
  362. .mode = (S_IRUGO | S_IWUSR)
  363. },
  364. .show = mpc85xx_l2_inject_data_hi_show,
  365. .store = mpc85xx_l2_inject_data_hi_store},
  366. {
  367. .attr = {
  368. .name = "inject_data_lo",
  369. .mode = (S_IRUGO | S_IWUSR)
  370. },
  371. .show = mpc85xx_l2_inject_data_lo_show,
  372. .store = mpc85xx_l2_inject_data_lo_store},
  373. {
  374. .attr = {
  375. .name = "inject_ctrl",
  376. .mode = (S_IRUGO | S_IWUSR)
  377. },
  378. .show = mpc85xx_l2_inject_ctrl_show,
  379. .store = mpc85xx_l2_inject_ctrl_store},
  380. /* End of list */
  381. {
  382. .attr = {.name = NULL}
  383. }
  384. };
  385. static void mpc85xx_set_l2_sysfs_attributes(struct edac_device_ctl_info
  386. *edac_dev)
  387. {
  388. edac_dev->sysfs_attributes = mpc85xx_l2_sysfs_attributes;
  389. }
  390. /***************************** L2 ops ***********************************/
  391. static void mpc85xx_l2_check(struct edac_device_ctl_info *edac_dev)
  392. {
  393. struct mpc85xx_l2_pdata *pdata = edac_dev->pvt_info;
  394. u32 err_detect;
  395. err_detect = in_be32(pdata->l2_vbase + MPC85XX_L2_ERRDET);
  396. if (!(err_detect & L2_EDE_MASK))
  397. return;
  398. printk(KERN_ERR "ECC Error in CPU L2 cache\n");
  399. printk(KERN_ERR "L2 Error Detect Register: 0x%08x\n", err_detect);
  400. printk(KERN_ERR "L2 Error Capture Data High Register: 0x%08x\n",
  401. in_be32(pdata->l2_vbase + MPC85XX_L2_CAPTDATAHI));
  402. printk(KERN_ERR "L2 Error Capture Data Lo Register: 0x%08x\n",
  403. in_be32(pdata->l2_vbase + MPC85XX_L2_CAPTDATALO));
  404. printk(KERN_ERR "L2 Error Syndrome Register: 0x%08x\n",
  405. in_be32(pdata->l2_vbase + MPC85XX_L2_CAPTECC));
  406. printk(KERN_ERR "L2 Error Attributes Capture Register: 0x%08x\n",
  407. in_be32(pdata->l2_vbase + MPC85XX_L2_ERRATTR));
  408. printk(KERN_ERR "L2 Error Address Capture Register: 0x%08x\n",
  409. in_be32(pdata->l2_vbase + MPC85XX_L2_ERRADDR));
  410. /* clear error detect register */
  411. out_be32(pdata->l2_vbase + MPC85XX_L2_ERRDET, err_detect);
  412. if (err_detect & L2_EDE_CE_MASK)
  413. edac_device_handle_ce(edac_dev, 0, 0, edac_dev->ctl_name);
  414. if (err_detect & L2_EDE_UE_MASK)
  415. edac_device_handle_ue(edac_dev, 0, 0, edac_dev->ctl_name);
  416. }
  417. static irqreturn_t mpc85xx_l2_isr(int irq, void *dev_id)
  418. {
  419. struct edac_device_ctl_info *edac_dev = dev_id;
  420. struct mpc85xx_l2_pdata *pdata = edac_dev->pvt_info;
  421. u32 err_detect;
  422. err_detect = in_be32(pdata->l2_vbase + MPC85XX_L2_ERRDET);
  423. if (!(err_detect & L2_EDE_MASK))
  424. return IRQ_NONE;
  425. mpc85xx_l2_check(edac_dev);
  426. return IRQ_HANDLED;
  427. }
  428. static int __devinit mpc85xx_l2_err_probe(struct platform_device *op)
  429. {
  430. struct edac_device_ctl_info *edac_dev;
  431. struct mpc85xx_l2_pdata *pdata;
  432. struct resource r;
  433. int res;
  434. if (!devres_open_group(&op->dev, mpc85xx_l2_err_probe, GFP_KERNEL))
  435. return -ENOMEM;
  436. edac_dev = edac_device_alloc_ctl_info(sizeof(*pdata),
  437. "cpu", 1, "L", 1, 2, NULL, 0,
  438. edac_dev_idx);
  439. if (!edac_dev) {
  440. devres_release_group(&op->dev, mpc85xx_l2_err_probe);
  441. return -ENOMEM;
  442. }
  443. pdata = edac_dev->pvt_info;
  444. pdata->name = "mpc85xx_l2_err";
  445. pdata->irq = NO_IRQ;
  446. edac_dev->dev = &op->dev;
  447. dev_set_drvdata(edac_dev->dev, edac_dev);
  448. edac_dev->ctl_name = pdata->name;
  449. edac_dev->dev_name = pdata->name;
  450. res = of_address_to_resource(op->dev.of_node, 0, &r);
  451. if (res) {
  452. printk(KERN_ERR "%s: Unable to get resource for "
  453. "L2 err regs\n", __func__);
  454. goto err;
  455. }
  456. /* we only need the error registers */
  457. r.start += 0xe00;
  458. if (!devm_request_mem_region(&op->dev, r.start, resource_size(&r),
  459. pdata->name)) {
  460. printk(KERN_ERR "%s: Error while requesting mem region\n",
  461. __func__);
  462. res = -EBUSY;
  463. goto err;
  464. }
  465. pdata->l2_vbase = devm_ioremap(&op->dev, r.start, resource_size(&r));
  466. if (!pdata->l2_vbase) {
  467. printk(KERN_ERR "%s: Unable to setup L2 err regs\n", __func__);
  468. res = -ENOMEM;
  469. goto err;
  470. }
  471. out_be32(pdata->l2_vbase + MPC85XX_L2_ERRDET, ~0);
  472. orig_l2_err_disable = in_be32(pdata->l2_vbase + MPC85XX_L2_ERRDIS);
  473. /* clear the err_dis */
  474. out_be32(pdata->l2_vbase + MPC85XX_L2_ERRDIS, 0);
  475. edac_dev->mod_name = EDAC_MOD_STR;
  476. if (edac_op_state == EDAC_OPSTATE_POLL)
  477. edac_dev->edac_check = mpc85xx_l2_check;
  478. mpc85xx_set_l2_sysfs_attributes(edac_dev);
  479. pdata->edac_idx = edac_dev_idx++;
  480. if (edac_device_add_device(edac_dev) > 0) {
  481. debugf3("%s(): failed edac_device_add_device()\n", __func__);
  482. goto err;
  483. }
  484. if (edac_op_state == EDAC_OPSTATE_INT) {
  485. pdata->irq = irq_of_parse_and_map(op->dev.of_node, 0);
  486. res = devm_request_irq(&op->dev, pdata->irq,
  487. mpc85xx_l2_isr, IRQF_DISABLED,
  488. "[EDAC] L2 err", edac_dev);
  489. if (res < 0) {
  490. printk(KERN_ERR
  491. "%s: Unable to requiest irq %d for "
  492. "MPC85xx L2 err\n", __func__, pdata->irq);
  493. irq_dispose_mapping(pdata->irq);
  494. res = -ENODEV;
  495. goto err2;
  496. }
  497. printk(KERN_INFO EDAC_MOD_STR " acquired irq %d for L2 Err\n",
  498. pdata->irq);
  499. edac_dev->op_state = OP_RUNNING_INTERRUPT;
  500. out_be32(pdata->l2_vbase + MPC85XX_L2_ERRINTEN, L2_EIE_MASK);
  501. }
  502. devres_remove_group(&op->dev, mpc85xx_l2_err_probe);
  503. debugf3("%s(): success\n", __func__);
  504. printk(KERN_INFO EDAC_MOD_STR " L2 err registered\n");
  505. return 0;
  506. err2:
  507. edac_device_del_device(&op->dev);
  508. err:
  509. devres_release_group(&op->dev, mpc85xx_l2_err_probe);
  510. edac_device_free_ctl_info(edac_dev);
  511. return res;
  512. }
  513. static int mpc85xx_l2_err_remove(struct platform_device *op)
  514. {
  515. struct edac_device_ctl_info *edac_dev = dev_get_drvdata(&op->dev);
  516. struct mpc85xx_l2_pdata *pdata = edac_dev->pvt_info;
  517. debugf0("%s()\n", __func__);
  518. if (edac_op_state == EDAC_OPSTATE_INT) {
  519. out_be32(pdata->l2_vbase + MPC85XX_L2_ERRINTEN, 0);
  520. irq_dispose_mapping(pdata->irq);
  521. }
  522. out_be32(pdata->l2_vbase + MPC85XX_L2_ERRDIS, orig_l2_err_disable);
  523. edac_device_del_device(&op->dev);
  524. edac_device_free_ctl_info(edac_dev);
  525. return 0;
  526. }
  527. static struct of_device_id mpc85xx_l2_err_of_match[] = {
  528. /* deprecate the fsl,85.. forms in the future, 2.6.30? */
  529. { .compatible = "fsl,8540-l2-cache-controller", },
  530. { .compatible = "fsl,8541-l2-cache-controller", },
  531. { .compatible = "fsl,8544-l2-cache-controller", },
  532. { .compatible = "fsl,8548-l2-cache-controller", },
  533. { .compatible = "fsl,8555-l2-cache-controller", },
  534. { .compatible = "fsl,8568-l2-cache-controller", },
  535. { .compatible = "fsl,mpc8536-l2-cache-controller", },
  536. { .compatible = "fsl,mpc8540-l2-cache-controller", },
  537. { .compatible = "fsl,mpc8541-l2-cache-controller", },
  538. { .compatible = "fsl,mpc8544-l2-cache-controller", },
  539. { .compatible = "fsl,mpc8548-l2-cache-controller", },
  540. { .compatible = "fsl,mpc8555-l2-cache-controller", },
  541. { .compatible = "fsl,mpc8560-l2-cache-controller", },
  542. { .compatible = "fsl,mpc8568-l2-cache-controller", },
  543. { .compatible = "fsl,mpc8569-l2-cache-controller", },
  544. { .compatible = "fsl,mpc8572-l2-cache-controller", },
  545. { .compatible = "fsl,p1020-l2-cache-controller", },
  546. { .compatible = "fsl,p1021-l2-cache-controller", },
  547. { .compatible = "fsl,p2020-l2-cache-controller", },
  548. {},
  549. };
  550. MODULE_DEVICE_TABLE(of, mpc85xx_l2_err_of_match);
  551. static struct platform_driver mpc85xx_l2_err_driver = {
  552. .probe = mpc85xx_l2_err_probe,
  553. .remove = mpc85xx_l2_err_remove,
  554. .driver = {
  555. .name = "mpc85xx_l2_err",
  556. .owner = THIS_MODULE,
  557. .of_match_table = mpc85xx_l2_err_of_match,
  558. },
  559. };
  560. /**************************** MC Err device ***************************/
  561. /*
  562. * Taken from table 8-55 in the MPC8641 User's Manual and/or 9-61 in the
  563. * MPC8572 User's Manual. Each line represents a syndrome bit column as a
  564. * 64-bit value, but split into an upper and lower 32-bit chunk. The labels
  565. * below correspond to Freescale's manuals.
  566. */
  567. static unsigned int ecc_table[16] = {
  568. /* MSB LSB */
  569. /* [0:31] [32:63] */
  570. 0xf00fe11e, 0xc33c0ff7, /* Syndrome bit 7 */
  571. 0x00ff00ff, 0x00fff0ff,
  572. 0x0f0f0f0f, 0x0f0fff00,
  573. 0x11113333, 0x7777000f,
  574. 0x22224444, 0x8888222f,
  575. 0x44448888, 0xffff4441,
  576. 0x8888ffff, 0x11118882,
  577. 0xffff1111, 0x22221114, /* Syndrome bit 0 */
  578. };
  579. /*
  580. * Calculate the correct ECC value for a 64-bit value specified by high:low
  581. */
  582. static u8 calculate_ecc(u32 high, u32 low)
  583. {
  584. u32 mask_low;
  585. u32 mask_high;
  586. int bit_cnt;
  587. u8 ecc = 0;
  588. int i;
  589. int j;
  590. for (i = 0; i < 8; i++) {
  591. mask_high = ecc_table[i * 2];
  592. mask_low = ecc_table[i * 2 + 1];
  593. bit_cnt = 0;
  594. for (j = 0; j < 32; j++) {
  595. if ((mask_high >> j) & 1)
  596. bit_cnt ^= (high >> j) & 1;
  597. if ((mask_low >> j) & 1)
  598. bit_cnt ^= (low >> j) & 1;
  599. }
  600. ecc |= bit_cnt << i;
  601. }
  602. return ecc;
  603. }
  604. /*
  605. * Create the syndrome code which is generated if the data line specified by
  606. * 'bit' failed. Eg generate an 8-bit codes seen in Table 8-55 in the MPC8641
  607. * User's Manual and 9-61 in the MPC8572 User's Manual.
  608. */
  609. static u8 syndrome_from_bit(unsigned int bit) {
  610. int i;
  611. u8 syndrome = 0;
  612. /*
  613. * Cycle through the upper or lower 32-bit portion of each value in
  614. * ecc_table depending on if 'bit' is in the upper or lower half of
  615. * 64-bit data.
  616. */
  617. for (i = bit < 32; i < 16; i += 2)
  618. syndrome |= ((ecc_table[i] >> (bit % 32)) & 1) << (i / 2);
  619. return syndrome;
  620. }
  621. /*
  622. * Decode data and ecc syndrome to determine what went wrong
  623. * Note: This can only decode single-bit errors
  624. */
  625. static void sbe_ecc_decode(u32 cap_high, u32 cap_low, u32 cap_ecc,
  626. int *bad_data_bit, int *bad_ecc_bit)
  627. {
  628. int i;
  629. u8 syndrome;
  630. *bad_data_bit = -1;
  631. *bad_ecc_bit = -1;
  632. /*
  633. * Calculate the ECC of the captured data and XOR it with the captured
  634. * ECC to find an ECC syndrome value we can search for
  635. */
  636. syndrome = calculate_ecc(cap_high, cap_low) ^ cap_ecc;
  637. /* Check if a data line is stuck... */
  638. for (i = 0; i < 64; i++) {
  639. if (syndrome == syndrome_from_bit(i)) {
  640. *bad_data_bit = i;
  641. return;
  642. }
  643. }
  644. /* If data is correct, check ECC bits for errors... */
  645. for (i = 0; i < 8; i++) {
  646. if ((syndrome >> i) & 0x1) {
  647. *bad_ecc_bit = i;
  648. return;
  649. }
  650. }
  651. }
  652. static void mpc85xx_mc_check(struct mem_ctl_info *mci)
  653. {
  654. struct mpc85xx_mc_pdata *pdata = mci->pvt_info;
  655. struct csrow_info *csrow;
  656. u32 bus_width;
  657. u32 err_detect;
  658. u32 syndrome;
  659. u32 err_addr;
  660. u32 pfn;
  661. int row_index;
  662. u32 cap_high;
  663. u32 cap_low;
  664. int bad_data_bit;
  665. int bad_ecc_bit;
  666. err_detect = in_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DETECT);
  667. if (!err_detect)
  668. return;
  669. mpc85xx_mc_printk(mci, KERN_ERR, "Err Detect Register: %#8.8x\n",
  670. err_detect);
  671. /* no more processing if not ECC bit errors */
  672. if (!(err_detect & (DDR_EDE_SBE | DDR_EDE_MBE))) {
  673. out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DETECT, err_detect);
  674. return;
  675. }
  676. syndrome = in_be32(pdata->mc_vbase + MPC85XX_MC_CAPTURE_ECC);
  677. /* Mask off appropriate bits of syndrome based on bus width */
  678. bus_width = (in_be32(pdata->mc_vbase + MPC85XX_MC_DDR_SDRAM_CFG) &
  679. DSC_DBW_MASK) ? 32 : 64;
  680. if (bus_width == 64)
  681. syndrome &= 0xff;
  682. else
  683. syndrome &= 0xffff;
  684. err_addr = in_be32(pdata->mc_vbase + MPC85XX_MC_CAPTURE_ADDRESS);
  685. pfn = err_addr >> PAGE_SHIFT;
  686. for (row_index = 0; row_index < mci->nr_csrows; row_index++) {
  687. csrow = &mci->csrows[row_index];
  688. if ((pfn >= csrow->first_page) && (pfn <= csrow->last_page))
  689. break;
  690. }
  691. cap_high = in_be32(pdata->mc_vbase + MPC85XX_MC_CAPTURE_DATA_HI);
  692. cap_low = in_be32(pdata->mc_vbase + MPC85XX_MC_CAPTURE_DATA_LO);
  693. /*
  694. * Analyze single-bit errors on 64-bit wide buses
  695. * TODO: Add support for 32-bit wide buses
  696. */
  697. if ((err_detect & DDR_EDE_SBE) && (bus_width == 64)) {
  698. sbe_ecc_decode(cap_high, cap_low, syndrome,
  699. &bad_data_bit, &bad_ecc_bit);
  700. if (bad_data_bit != -1)
  701. mpc85xx_mc_printk(mci, KERN_ERR,
  702. "Faulty Data bit: %d\n", bad_data_bit);
  703. if (bad_ecc_bit != -1)
  704. mpc85xx_mc_printk(mci, KERN_ERR,
  705. "Faulty ECC bit: %d\n", bad_ecc_bit);
  706. mpc85xx_mc_printk(mci, KERN_ERR,
  707. "Expected Data / ECC:\t%#8.8x_%08x / %#2.2x\n",
  708. cap_high ^ (1 << (bad_data_bit - 32)),
  709. cap_low ^ (1 << bad_data_bit),
  710. syndrome ^ (1 << bad_ecc_bit));
  711. }
  712. mpc85xx_mc_printk(mci, KERN_ERR,
  713. "Captured Data / ECC:\t%#8.8x_%08x / %#2.2x\n",
  714. cap_high, cap_low, syndrome);
  715. mpc85xx_mc_printk(mci, KERN_ERR, "Err addr: %#8.8x\n", err_addr);
  716. mpc85xx_mc_printk(mci, KERN_ERR, "PFN: %#8.8x\n", pfn);
  717. /* we are out of range */
  718. if (row_index == mci->nr_csrows)
  719. mpc85xx_mc_printk(mci, KERN_ERR, "PFN out of range!\n");
  720. if (err_detect & DDR_EDE_SBE)
  721. edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
  722. pfn, err_addr & ~PAGE_MASK, syndrome,
  723. row_index, 0, -1,
  724. mci->ctl_name, "", NULL);
  725. if (err_detect & DDR_EDE_MBE)
  726. edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
  727. pfn, err_addr & ~PAGE_MASK, syndrome,
  728. row_index, 0, -1,
  729. mci->ctl_name, "", NULL);
  730. out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DETECT, err_detect);
  731. }
  732. static irqreturn_t mpc85xx_mc_isr(int irq, void *dev_id)
  733. {
  734. struct mem_ctl_info *mci = dev_id;
  735. struct mpc85xx_mc_pdata *pdata = mci->pvt_info;
  736. u32 err_detect;
  737. err_detect = in_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DETECT);
  738. if (!err_detect)
  739. return IRQ_NONE;
  740. mpc85xx_mc_check(mci);
  741. return IRQ_HANDLED;
  742. }
  743. static void __devinit mpc85xx_init_csrows(struct mem_ctl_info *mci)
  744. {
  745. struct mpc85xx_mc_pdata *pdata = mci->pvt_info;
  746. struct csrow_info *csrow;
  747. struct dimm_info *dimm;
  748. u32 sdram_ctl;
  749. u32 sdtype;
  750. enum mem_type mtype;
  751. u32 cs_bnds;
  752. int index;
  753. sdram_ctl = in_be32(pdata->mc_vbase + MPC85XX_MC_DDR_SDRAM_CFG);
  754. sdtype = sdram_ctl & DSC_SDTYPE_MASK;
  755. if (sdram_ctl & DSC_RD_EN) {
  756. switch (sdtype) {
  757. case DSC_SDTYPE_DDR:
  758. mtype = MEM_RDDR;
  759. break;
  760. case DSC_SDTYPE_DDR2:
  761. mtype = MEM_RDDR2;
  762. break;
  763. case DSC_SDTYPE_DDR3:
  764. mtype = MEM_RDDR3;
  765. break;
  766. default:
  767. mtype = MEM_UNKNOWN;
  768. break;
  769. }
  770. } else {
  771. switch (sdtype) {
  772. case DSC_SDTYPE_DDR:
  773. mtype = MEM_DDR;
  774. break;
  775. case DSC_SDTYPE_DDR2:
  776. mtype = MEM_DDR2;
  777. break;
  778. case DSC_SDTYPE_DDR3:
  779. mtype = MEM_DDR3;
  780. break;
  781. default:
  782. mtype = MEM_UNKNOWN;
  783. break;
  784. }
  785. }
  786. for (index = 0; index < mci->nr_csrows; index++) {
  787. u32 start;
  788. u32 end;
  789. csrow = &mci->csrows[index];
  790. dimm = csrow->channels[0].dimm;
  791. cs_bnds = in_be32(pdata->mc_vbase + MPC85XX_MC_CS_BNDS_0 +
  792. (index * MPC85XX_MC_CS_BNDS_OFS));
  793. start = (cs_bnds & 0xffff0000) >> 16;
  794. end = (cs_bnds & 0x0000ffff);
  795. if (start == end)
  796. continue; /* not populated */
  797. start <<= (24 - PAGE_SHIFT);
  798. end <<= (24 - PAGE_SHIFT);
  799. end |= (1 << (24 - PAGE_SHIFT)) - 1;
  800. csrow->first_page = start;
  801. csrow->last_page = end;
  802. dimm->nr_pages = end + 1 - start;
  803. dimm->grain = 8;
  804. dimm->mtype = mtype;
  805. dimm->dtype = DEV_UNKNOWN;
  806. if (sdram_ctl & DSC_X32_EN)
  807. dimm->dtype = DEV_X32;
  808. dimm->edac_mode = EDAC_SECDED;
  809. }
  810. }
  811. static int __devinit mpc85xx_mc_err_probe(struct platform_device *op)
  812. {
  813. struct mem_ctl_info *mci;
  814. struct edac_mc_layer layers[2];
  815. struct mpc85xx_mc_pdata *pdata;
  816. struct resource r;
  817. u32 sdram_ctl;
  818. int res;
  819. if (!devres_open_group(&op->dev, mpc85xx_mc_err_probe, GFP_KERNEL))
  820. return -ENOMEM;
  821. layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
  822. layers[0].size = 4;
  823. layers[0].is_virt_csrow = true;
  824. layers[1].type = EDAC_MC_LAYER_CHANNEL;
  825. layers[1].size = 1;
  826. layers[1].is_virt_csrow = false;
  827. mci = edac_mc_alloc(edac_mc_idx, ARRAY_SIZE(layers), sizeof(*pdata));
  828. if (!mci) {
  829. devres_release_group(&op->dev, mpc85xx_mc_err_probe);
  830. return -ENOMEM;
  831. }
  832. pdata = mci->pvt_info;
  833. pdata->name = "mpc85xx_mc_err";
  834. pdata->irq = NO_IRQ;
  835. mci->pdev = &op->dev;
  836. pdata->edac_idx = edac_mc_idx++;
  837. dev_set_drvdata(mci->pdev, mci);
  838. mci->ctl_name = pdata->name;
  839. mci->dev_name = pdata->name;
  840. res = of_address_to_resource(op->dev.of_node, 0, &r);
  841. if (res) {
  842. printk(KERN_ERR "%s: Unable to get resource for MC err regs\n",
  843. __func__);
  844. goto err;
  845. }
  846. if (!devm_request_mem_region(&op->dev, r.start, resource_size(&r),
  847. pdata->name)) {
  848. printk(KERN_ERR "%s: Error while requesting mem region\n",
  849. __func__);
  850. res = -EBUSY;
  851. goto err;
  852. }
  853. pdata->mc_vbase = devm_ioremap(&op->dev, r.start, resource_size(&r));
  854. if (!pdata->mc_vbase) {
  855. printk(KERN_ERR "%s: Unable to setup MC err regs\n", __func__);
  856. res = -ENOMEM;
  857. goto err;
  858. }
  859. sdram_ctl = in_be32(pdata->mc_vbase + MPC85XX_MC_DDR_SDRAM_CFG);
  860. if (!(sdram_ctl & DSC_ECC_EN)) {
  861. /* no ECC */
  862. printk(KERN_WARNING "%s: No ECC DIMMs discovered\n", __func__);
  863. res = -ENODEV;
  864. goto err;
  865. }
  866. debugf3("%s(): init mci\n", __func__);
  867. mci->mtype_cap = MEM_FLAG_RDDR | MEM_FLAG_RDDR2 |
  868. MEM_FLAG_DDR | MEM_FLAG_DDR2;
  869. mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
  870. mci->edac_cap = EDAC_FLAG_SECDED;
  871. mci->mod_name = EDAC_MOD_STR;
  872. mci->mod_ver = MPC85XX_REVISION;
  873. if (edac_op_state == EDAC_OPSTATE_POLL)
  874. mci->edac_check = mpc85xx_mc_check;
  875. mci->ctl_page_to_phys = NULL;
  876. mci->scrub_mode = SCRUB_SW_SRC;
  877. mpc85xx_init_csrows(mci);
  878. /* store the original error disable bits */
  879. orig_ddr_err_disable =
  880. in_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DISABLE);
  881. out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DISABLE, 0);
  882. /* clear all error bits */
  883. out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DETECT, ~0);
  884. if (edac_mc_add_mc(mci)) {
  885. debugf3("%s(): failed edac_mc_add_mc()\n", __func__);
  886. goto err;
  887. }
  888. if (mpc85xx_create_sysfs_attributes(mci)) {
  889. edac_mc_del_mc(mci->pdev);
  890. debugf3("%s(): failed edac_mc_add_mc()\n", __func__);
  891. goto err;
  892. }
  893. if (edac_op_state == EDAC_OPSTATE_INT) {
  894. out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_INT_EN,
  895. DDR_EIE_MBEE | DDR_EIE_SBEE);
  896. /* store the original error management threshold */
  897. orig_ddr_err_sbe = in_be32(pdata->mc_vbase +
  898. MPC85XX_MC_ERR_SBE) & 0xff0000;
  899. /* set threshold to 1 error per interrupt */
  900. out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_SBE, 0x10000);
  901. /* register interrupts */
  902. pdata->irq = irq_of_parse_and_map(op->dev.of_node, 0);
  903. res = devm_request_irq(&op->dev, pdata->irq,
  904. mpc85xx_mc_isr,
  905. IRQF_DISABLED | IRQF_SHARED,
  906. "[EDAC] MC err", mci);
  907. if (res < 0) {
  908. printk(KERN_ERR "%s: Unable to request irq %d for "
  909. "MPC85xx DRAM ERR\n", __func__, pdata->irq);
  910. irq_dispose_mapping(pdata->irq);
  911. res = -ENODEV;
  912. goto err2;
  913. }
  914. printk(KERN_INFO EDAC_MOD_STR " acquired irq %d for MC\n",
  915. pdata->irq);
  916. }
  917. devres_remove_group(&op->dev, mpc85xx_mc_err_probe);
  918. debugf3("%s(): success\n", __func__);
  919. printk(KERN_INFO EDAC_MOD_STR " MC err registered\n");
  920. return 0;
  921. err2:
  922. edac_mc_del_mc(&op->dev);
  923. err:
  924. devres_release_group(&op->dev, mpc85xx_mc_err_probe);
  925. edac_mc_free(mci);
  926. return res;
  927. }
  928. static int mpc85xx_mc_err_remove(struct platform_device *op)
  929. {
  930. struct mem_ctl_info *mci = dev_get_drvdata(&op->dev);
  931. struct mpc85xx_mc_pdata *pdata = mci->pvt_info;
  932. debugf0("%s()\n", __func__);
  933. if (edac_op_state == EDAC_OPSTATE_INT) {
  934. out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_INT_EN, 0);
  935. irq_dispose_mapping(pdata->irq);
  936. }
  937. out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DISABLE,
  938. orig_ddr_err_disable);
  939. out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_SBE, orig_ddr_err_sbe);
  940. mpc85xx_remove_sysfs_attributes(mci);
  941. edac_mc_del_mc(&op->dev);
  942. edac_mc_free(mci);
  943. return 0;
  944. }
  945. static struct of_device_id mpc85xx_mc_err_of_match[] = {
  946. /* deprecate the fsl,85.. forms in the future, 2.6.30? */
  947. { .compatible = "fsl,8540-memory-controller", },
  948. { .compatible = "fsl,8541-memory-controller", },
  949. { .compatible = "fsl,8544-memory-controller", },
  950. { .compatible = "fsl,8548-memory-controller", },
  951. { .compatible = "fsl,8555-memory-controller", },
  952. { .compatible = "fsl,8568-memory-controller", },
  953. { .compatible = "fsl,mpc8536-memory-controller", },
  954. { .compatible = "fsl,mpc8540-memory-controller", },
  955. { .compatible = "fsl,mpc8541-memory-controller", },
  956. { .compatible = "fsl,mpc8544-memory-controller", },
  957. { .compatible = "fsl,mpc8548-memory-controller", },
  958. { .compatible = "fsl,mpc8555-memory-controller", },
  959. { .compatible = "fsl,mpc8560-memory-controller", },
  960. { .compatible = "fsl,mpc8568-memory-controller", },
  961. { .compatible = "fsl,mpc8569-memory-controller", },
  962. { .compatible = "fsl,mpc8572-memory-controller", },
  963. { .compatible = "fsl,mpc8349-memory-controller", },
  964. { .compatible = "fsl,p1020-memory-controller", },
  965. { .compatible = "fsl,p1021-memory-controller", },
  966. { .compatible = "fsl,p2020-memory-controller", },
  967. { .compatible = "fsl,qoriq-memory-controller", },
  968. {},
  969. };
  970. MODULE_DEVICE_TABLE(of, mpc85xx_mc_err_of_match);
  971. static struct platform_driver mpc85xx_mc_err_driver = {
  972. .probe = mpc85xx_mc_err_probe,
  973. .remove = mpc85xx_mc_err_remove,
  974. .driver = {
  975. .name = "mpc85xx_mc_err",
  976. .owner = THIS_MODULE,
  977. .of_match_table = mpc85xx_mc_err_of_match,
  978. },
  979. };
  980. #ifdef CONFIG_FSL_SOC_BOOKE
  981. static void __init mpc85xx_mc_clear_rfxe(void *data)
  982. {
  983. orig_hid1[smp_processor_id()] = mfspr(SPRN_HID1);
  984. mtspr(SPRN_HID1, (orig_hid1[smp_processor_id()] & ~HID1_RFXE));
  985. }
  986. #endif
  987. static int __init mpc85xx_mc_init(void)
  988. {
  989. int res = 0;
  990. u32 pvr = 0;
  991. printk(KERN_INFO "Freescale(R) MPC85xx EDAC driver, "
  992. "(C) 2006 Montavista Software\n");
  993. /* make sure error reporting method is sane */
  994. switch (edac_op_state) {
  995. case EDAC_OPSTATE_POLL:
  996. case EDAC_OPSTATE_INT:
  997. break;
  998. default:
  999. edac_op_state = EDAC_OPSTATE_INT;
  1000. break;
  1001. }
  1002. res = platform_driver_register(&mpc85xx_mc_err_driver);
  1003. if (res)
  1004. printk(KERN_WARNING EDAC_MOD_STR "MC fails to register\n");
  1005. res = platform_driver_register(&mpc85xx_l2_err_driver);
  1006. if (res)
  1007. printk(KERN_WARNING EDAC_MOD_STR "L2 fails to register\n");
  1008. #ifdef CONFIG_PCI
  1009. res = platform_driver_register(&mpc85xx_pci_err_driver);
  1010. if (res)
  1011. printk(KERN_WARNING EDAC_MOD_STR "PCI fails to register\n");
  1012. #endif
  1013. #ifdef CONFIG_FSL_SOC_BOOKE
  1014. pvr = mfspr(SPRN_PVR);
  1015. if ((PVR_VER(pvr) == PVR_VER_E500V1) ||
  1016. (PVR_VER(pvr) == PVR_VER_E500V2)) {
  1017. /*
  1018. * need to clear HID1[RFXE] to disable machine check int
  1019. * so we can catch it
  1020. */
  1021. if (edac_op_state == EDAC_OPSTATE_INT)
  1022. on_each_cpu(mpc85xx_mc_clear_rfxe, NULL, 0);
  1023. }
  1024. #endif
  1025. return 0;
  1026. }
  1027. module_init(mpc85xx_mc_init);
  1028. #ifdef CONFIG_FSL_SOC_BOOKE
  1029. static void __exit mpc85xx_mc_restore_hid1(void *data)
  1030. {
  1031. mtspr(SPRN_HID1, orig_hid1[smp_processor_id()]);
  1032. }
  1033. #endif
  1034. static void __exit mpc85xx_mc_exit(void)
  1035. {
  1036. #ifdef CONFIG_FSL_SOC_BOOKE
  1037. u32 pvr = mfspr(SPRN_PVR);
  1038. if ((PVR_VER(pvr) == PVR_VER_E500V1) ||
  1039. (PVR_VER(pvr) == PVR_VER_E500V2)) {
  1040. on_each_cpu(mpc85xx_mc_restore_hid1, NULL, 0);
  1041. }
  1042. #endif
  1043. #ifdef CONFIG_PCI
  1044. platform_driver_unregister(&mpc85xx_pci_err_driver);
  1045. #endif
  1046. platform_driver_unregister(&mpc85xx_l2_err_driver);
  1047. platform_driver_unregister(&mpc85xx_mc_err_driver);
  1048. }
  1049. module_exit(mpc85xx_mc_exit);
  1050. MODULE_LICENSE("GPL");
  1051. MODULE_AUTHOR("Montavista Software, Inc.");
  1052. module_param(edac_op_state, int, 0444);
  1053. MODULE_PARM_DESC(edac_op_state,
  1054. "EDAC Error Reporting state: 0=Poll, 2=Interrupt");