mpc85xx_edac.c 33 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268
  1. /*
  2. * Freescale MPC85xx Memory Controller kenel module
  3. *
  4. * Author: Dave Jiang <djiang@mvista.com>
  5. *
  6. * 2006-2007 (c) MontaVista Software, Inc. This file is licensed under
  7. * the terms of the GNU General Public License version 2. This program
  8. * is licensed "as is" without any warranty of any kind, whether express
  9. * or implied.
  10. *
  11. */
  12. #include <linux/module.h>
  13. #include <linux/init.h>
  14. #include <linux/interrupt.h>
  15. #include <linux/ctype.h>
  16. #include <linux/io.h>
  17. #include <linux/mod_devicetable.h>
  18. #include <linux/edac.h>
  19. #include <linux/smp.h>
  20. #include <linux/gfp.h>
  21. #include <linux/of_platform.h>
  22. #include <linux/of_device.h>
  23. #include "edac_module.h"
  24. #include "edac_core.h"
  25. #include "mpc85xx_edac.h"
  26. static int edac_dev_idx;
  27. #ifdef CONFIG_PCI
  28. static int edac_pci_idx;
  29. #endif
  30. static int edac_mc_idx;
  31. static u32 orig_ddr_err_disable;
  32. static u32 orig_ddr_err_sbe;
  33. /*
  34. * PCI Err defines
  35. */
  36. #ifdef CONFIG_PCI
  37. static u32 orig_pci_err_cap_dr;
  38. static u32 orig_pci_err_en;
  39. #endif
  40. static u32 orig_l2_err_disable;
  41. #ifdef CONFIG_FSL_SOC_BOOKE
  42. static u32 orig_hid1[2];
  43. #endif
  44. /************************ MC SYSFS parts ***********************************/
  45. #define to_mci(k) container_of(k, struct mem_ctl_info, dev)
  46. static ssize_t mpc85xx_mc_inject_data_hi_show(struct device *dev,
  47. struct device_attribute *mattr,
  48. char *data)
  49. {
  50. struct mem_ctl_info *mci = to_mci(dev);
  51. struct mpc85xx_mc_pdata *pdata = mci->pvt_info;
  52. return sprintf(data, "0x%08x",
  53. in_be32(pdata->mc_vbase +
  54. MPC85XX_MC_DATA_ERR_INJECT_HI));
  55. }
  56. static ssize_t mpc85xx_mc_inject_data_lo_show(struct device *dev,
  57. struct device_attribute *mattr,
  58. char *data)
  59. {
  60. struct mem_ctl_info *mci = to_mci(dev);
  61. struct mpc85xx_mc_pdata *pdata = mci->pvt_info;
  62. return sprintf(data, "0x%08x",
  63. in_be32(pdata->mc_vbase +
  64. MPC85XX_MC_DATA_ERR_INJECT_LO));
  65. }
  66. static ssize_t mpc85xx_mc_inject_ctrl_show(struct device *dev,
  67. struct device_attribute *mattr,
  68. char *data)
  69. {
  70. struct mem_ctl_info *mci = to_mci(dev);
  71. struct mpc85xx_mc_pdata *pdata = mci->pvt_info;
  72. return sprintf(data, "0x%08x",
  73. in_be32(pdata->mc_vbase + MPC85XX_MC_ECC_ERR_INJECT));
  74. }
  75. static ssize_t mpc85xx_mc_inject_data_hi_store(struct device *dev,
  76. struct device_attribute *mattr,
  77. const char *data, size_t count)
  78. {
  79. struct mem_ctl_info *mci = to_mci(dev);
  80. struct mpc85xx_mc_pdata *pdata = mci->pvt_info;
  81. if (isdigit(*data)) {
  82. out_be32(pdata->mc_vbase + MPC85XX_MC_DATA_ERR_INJECT_HI,
  83. simple_strtoul(data, NULL, 0));
  84. return count;
  85. }
  86. return 0;
  87. }
  88. static ssize_t mpc85xx_mc_inject_data_lo_store(struct device *dev,
  89. struct device_attribute *mattr,
  90. const char *data, size_t count)
  91. {
  92. struct mem_ctl_info *mci = to_mci(dev);
  93. struct mpc85xx_mc_pdata *pdata = mci->pvt_info;
  94. if (isdigit(*data)) {
  95. out_be32(pdata->mc_vbase + MPC85XX_MC_DATA_ERR_INJECT_LO,
  96. simple_strtoul(data, NULL, 0));
  97. return count;
  98. }
  99. return 0;
  100. }
  101. static ssize_t mpc85xx_mc_inject_ctrl_store(struct device *dev,
  102. struct device_attribute *mattr,
  103. const char *data, size_t count)
  104. {
  105. struct mem_ctl_info *mci = to_mci(dev);
  106. struct mpc85xx_mc_pdata *pdata = mci->pvt_info;
  107. if (isdigit(*data)) {
  108. out_be32(pdata->mc_vbase + MPC85XX_MC_ECC_ERR_INJECT,
  109. simple_strtoul(data, NULL, 0));
  110. return count;
  111. }
  112. return 0;
  113. }
  114. DEVICE_ATTR(inject_data_hi, S_IRUGO | S_IWUSR,
  115. mpc85xx_mc_inject_data_hi_show, mpc85xx_mc_inject_data_hi_store);
  116. DEVICE_ATTR(inject_data_lo, S_IRUGO | S_IWUSR,
  117. mpc85xx_mc_inject_data_lo_show, mpc85xx_mc_inject_data_lo_store);
  118. DEVICE_ATTR(inject_ctrl, S_IRUGO | S_IWUSR,
  119. mpc85xx_mc_inject_ctrl_show, mpc85xx_mc_inject_ctrl_store);
  120. static int mpc85xx_create_sysfs_attributes(struct mem_ctl_info *mci)
  121. {
  122. int rc;
  123. rc = device_create_file(&mci->dev, &dev_attr_inject_data_hi);
  124. if (rc < 0)
  125. return rc;
  126. rc = device_create_file(&mci->dev, &dev_attr_inject_data_lo);
  127. if (rc < 0)
  128. return rc;
  129. rc = device_create_file(&mci->dev, &dev_attr_inject_ctrl);
  130. if (rc < 0)
  131. return rc;
  132. return 0;
  133. }
  134. static void mpc85xx_remove_sysfs_attributes(struct mem_ctl_info *mci)
  135. {
  136. device_remove_file(&mci->dev, &dev_attr_inject_data_hi);
  137. device_remove_file(&mci->dev, &dev_attr_inject_data_lo);
  138. device_remove_file(&mci->dev, &dev_attr_inject_ctrl);
  139. }
  140. /**************************** PCI Err device ***************************/
  141. #ifdef CONFIG_PCI
  142. static void mpc85xx_pci_check(struct edac_pci_ctl_info *pci)
  143. {
  144. struct mpc85xx_pci_pdata *pdata = pci->pvt_info;
  145. u32 err_detect;
  146. err_detect = in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_DR);
  147. /* master aborts can happen during PCI config cycles */
  148. if (!(err_detect & ~(PCI_EDE_MULTI_ERR | PCI_EDE_MST_ABRT))) {
  149. out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_DR, err_detect);
  150. return;
  151. }
  152. printk(KERN_ERR "PCI error(s) detected\n");
  153. printk(KERN_ERR "PCI/X ERR_DR register: %#08x\n", err_detect);
  154. printk(KERN_ERR "PCI/X ERR_ATTRIB register: %#08x\n",
  155. in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_ATTRIB));
  156. printk(KERN_ERR "PCI/X ERR_ADDR register: %#08x\n",
  157. in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_ADDR));
  158. printk(KERN_ERR "PCI/X ERR_EXT_ADDR register: %#08x\n",
  159. in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_EXT_ADDR));
  160. printk(KERN_ERR "PCI/X ERR_DL register: %#08x\n",
  161. in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_DL));
  162. printk(KERN_ERR "PCI/X ERR_DH register: %#08x\n",
  163. in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_DH));
  164. /* clear error bits */
  165. out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_DR, err_detect);
  166. if (err_detect & PCI_EDE_PERR_MASK)
  167. edac_pci_handle_pe(pci, pci->ctl_name);
  168. if ((err_detect & ~PCI_EDE_MULTI_ERR) & ~PCI_EDE_PERR_MASK)
  169. edac_pci_handle_npe(pci, pci->ctl_name);
  170. }
  171. static irqreturn_t mpc85xx_pci_isr(int irq, void *dev_id)
  172. {
  173. struct edac_pci_ctl_info *pci = dev_id;
  174. struct mpc85xx_pci_pdata *pdata = pci->pvt_info;
  175. u32 err_detect;
  176. err_detect = in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_DR);
  177. if (!err_detect)
  178. return IRQ_NONE;
  179. mpc85xx_pci_check(pci);
  180. return IRQ_HANDLED;
  181. }
  182. static int __devinit mpc85xx_pci_err_probe(struct platform_device *op)
  183. {
  184. struct edac_pci_ctl_info *pci;
  185. struct mpc85xx_pci_pdata *pdata;
  186. struct resource r;
  187. int res = 0;
  188. if (!devres_open_group(&op->dev, mpc85xx_pci_err_probe, GFP_KERNEL))
  189. return -ENOMEM;
  190. pci = edac_pci_alloc_ctl_info(sizeof(*pdata), "mpc85xx_pci_err");
  191. if (!pci)
  192. return -ENOMEM;
  193. pdata = pci->pvt_info;
  194. pdata->name = "mpc85xx_pci_err";
  195. pdata->irq = NO_IRQ;
  196. dev_set_drvdata(&op->dev, pci);
  197. pci->dev = &op->dev;
  198. pci->mod_name = EDAC_MOD_STR;
  199. pci->ctl_name = pdata->name;
  200. pci->dev_name = dev_name(&op->dev);
  201. if (edac_op_state == EDAC_OPSTATE_POLL)
  202. pci->edac_check = mpc85xx_pci_check;
  203. pdata->edac_idx = edac_pci_idx++;
  204. res = of_address_to_resource(op->dev.of_node, 0, &r);
  205. if (res) {
  206. printk(KERN_ERR "%s: Unable to get resource for "
  207. "PCI err regs\n", __func__);
  208. goto err;
  209. }
  210. /* we only need the error registers */
  211. r.start += 0xe00;
  212. if (!devm_request_mem_region(&op->dev, r.start, resource_size(&r),
  213. pdata->name)) {
  214. printk(KERN_ERR "%s: Error while requesting mem region\n",
  215. __func__);
  216. res = -EBUSY;
  217. goto err;
  218. }
  219. pdata->pci_vbase = devm_ioremap(&op->dev, r.start, resource_size(&r));
  220. if (!pdata->pci_vbase) {
  221. printk(KERN_ERR "%s: Unable to setup PCI err regs\n", __func__);
  222. res = -ENOMEM;
  223. goto err;
  224. }
  225. orig_pci_err_cap_dr =
  226. in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_CAP_DR);
  227. /* PCI master abort is expected during config cycles */
  228. out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_CAP_DR, 0x40);
  229. orig_pci_err_en = in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_EN);
  230. /* disable master abort reporting */
  231. out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_EN, ~0x40);
  232. /* clear error bits */
  233. out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_DR, ~0);
  234. if (edac_pci_add_device(pci, pdata->edac_idx) > 0) {
  235. edac_dbg(3, "failed edac_pci_add_device()\n");
  236. goto err;
  237. }
  238. if (edac_op_state == EDAC_OPSTATE_INT) {
  239. pdata->irq = irq_of_parse_and_map(op->dev.of_node, 0);
  240. res = devm_request_irq(&op->dev, pdata->irq,
  241. mpc85xx_pci_isr, IRQF_DISABLED,
  242. "[EDAC] PCI err", pci);
  243. if (res < 0) {
  244. printk(KERN_ERR
  245. "%s: Unable to requiest irq %d for "
  246. "MPC85xx PCI err\n", __func__, pdata->irq);
  247. irq_dispose_mapping(pdata->irq);
  248. res = -ENODEV;
  249. goto err2;
  250. }
  251. printk(KERN_INFO EDAC_MOD_STR " acquired irq %d for PCI Err\n",
  252. pdata->irq);
  253. }
  254. devres_remove_group(&op->dev, mpc85xx_pci_err_probe);
  255. edac_dbg(3, "success\n");
  256. printk(KERN_INFO EDAC_MOD_STR " PCI err registered\n");
  257. return 0;
  258. err2:
  259. edac_pci_del_device(&op->dev);
  260. err:
  261. edac_pci_free_ctl_info(pci);
  262. devres_release_group(&op->dev, mpc85xx_pci_err_probe);
  263. return res;
  264. }
  265. static int mpc85xx_pci_err_remove(struct platform_device *op)
  266. {
  267. struct edac_pci_ctl_info *pci = dev_get_drvdata(&op->dev);
  268. struct mpc85xx_pci_pdata *pdata = pci->pvt_info;
  269. edac_dbg(0, "\n");
  270. out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_CAP_DR,
  271. orig_pci_err_cap_dr);
  272. out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_EN, orig_pci_err_en);
  273. edac_pci_del_device(pci->dev);
  274. if (edac_op_state == EDAC_OPSTATE_INT)
  275. irq_dispose_mapping(pdata->irq);
  276. edac_pci_free_ctl_info(pci);
  277. return 0;
  278. }
  279. static struct of_device_id mpc85xx_pci_err_of_match[] = {
  280. {
  281. .compatible = "fsl,mpc8540-pcix",
  282. },
  283. {
  284. .compatible = "fsl,mpc8540-pci",
  285. },
  286. {},
  287. };
  288. MODULE_DEVICE_TABLE(of, mpc85xx_pci_err_of_match);
  289. static struct platform_driver mpc85xx_pci_err_driver = {
  290. .probe = mpc85xx_pci_err_probe,
  291. .remove = __devexit_p(mpc85xx_pci_err_remove),
  292. .driver = {
  293. .name = "mpc85xx_pci_err",
  294. .owner = THIS_MODULE,
  295. .of_match_table = mpc85xx_pci_err_of_match,
  296. },
  297. };
  298. #endif /* CONFIG_PCI */
  299. /**************************** L2 Err device ***************************/
  300. /************************ L2 SYSFS parts ***********************************/
  301. static ssize_t mpc85xx_l2_inject_data_hi_show(struct edac_device_ctl_info
  302. *edac_dev, char *data)
  303. {
  304. struct mpc85xx_l2_pdata *pdata = edac_dev->pvt_info;
  305. return sprintf(data, "0x%08x",
  306. in_be32(pdata->l2_vbase + MPC85XX_L2_ERRINJHI));
  307. }
  308. static ssize_t mpc85xx_l2_inject_data_lo_show(struct edac_device_ctl_info
  309. *edac_dev, char *data)
  310. {
  311. struct mpc85xx_l2_pdata *pdata = edac_dev->pvt_info;
  312. return sprintf(data, "0x%08x",
  313. in_be32(pdata->l2_vbase + MPC85XX_L2_ERRINJLO));
  314. }
  315. static ssize_t mpc85xx_l2_inject_ctrl_show(struct edac_device_ctl_info
  316. *edac_dev, char *data)
  317. {
  318. struct mpc85xx_l2_pdata *pdata = edac_dev->pvt_info;
  319. return sprintf(data, "0x%08x",
  320. in_be32(pdata->l2_vbase + MPC85XX_L2_ERRINJCTL));
  321. }
  322. static ssize_t mpc85xx_l2_inject_data_hi_store(struct edac_device_ctl_info
  323. *edac_dev, const char *data,
  324. size_t count)
  325. {
  326. struct mpc85xx_l2_pdata *pdata = edac_dev->pvt_info;
  327. if (isdigit(*data)) {
  328. out_be32(pdata->l2_vbase + MPC85XX_L2_ERRINJHI,
  329. simple_strtoul(data, NULL, 0));
  330. return count;
  331. }
  332. return 0;
  333. }
  334. static ssize_t mpc85xx_l2_inject_data_lo_store(struct edac_device_ctl_info
  335. *edac_dev, const char *data,
  336. size_t count)
  337. {
  338. struct mpc85xx_l2_pdata *pdata = edac_dev->pvt_info;
  339. if (isdigit(*data)) {
  340. out_be32(pdata->l2_vbase + MPC85XX_L2_ERRINJLO,
  341. simple_strtoul(data, NULL, 0));
  342. return count;
  343. }
  344. return 0;
  345. }
  346. static ssize_t mpc85xx_l2_inject_ctrl_store(struct edac_device_ctl_info
  347. *edac_dev, const char *data,
  348. size_t count)
  349. {
  350. struct mpc85xx_l2_pdata *pdata = edac_dev->pvt_info;
  351. if (isdigit(*data)) {
  352. out_be32(pdata->l2_vbase + MPC85XX_L2_ERRINJCTL,
  353. simple_strtoul(data, NULL, 0));
  354. return count;
  355. }
  356. return 0;
  357. }
  358. static struct edac_dev_sysfs_attribute mpc85xx_l2_sysfs_attributes[] = {
  359. {
  360. .attr = {
  361. .name = "inject_data_hi",
  362. .mode = (S_IRUGO | S_IWUSR)
  363. },
  364. .show = mpc85xx_l2_inject_data_hi_show,
  365. .store = mpc85xx_l2_inject_data_hi_store},
  366. {
  367. .attr = {
  368. .name = "inject_data_lo",
  369. .mode = (S_IRUGO | S_IWUSR)
  370. },
  371. .show = mpc85xx_l2_inject_data_lo_show,
  372. .store = mpc85xx_l2_inject_data_lo_store},
  373. {
  374. .attr = {
  375. .name = "inject_ctrl",
  376. .mode = (S_IRUGO | S_IWUSR)
  377. },
  378. .show = mpc85xx_l2_inject_ctrl_show,
  379. .store = mpc85xx_l2_inject_ctrl_store},
  380. /* End of list */
  381. {
  382. .attr = {.name = NULL}
  383. }
  384. };
  385. static void mpc85xx_set_l2_sysfs_attributes(struct edac_device_ctl_info
  386. *edac_dev)
  387. {
  388. edac_dev->sysfs_attributes = mpc85xx_l2_sysfs_attributes;
  389. }
  390. /***************************** L2 ops ***********************************/
  391. static void mpc85xx_l2_check(struct edac_device_ctl_info *edac_dev)
  392. {
  393. struct mpc85xx_l2_pdata *pdata = edac_dev->pvt_info;
  394. u32 err_detect;
  395. err_detect = in_be32(pdata->l2_vbase + MPC85XX_L2_ERRDET);
  396. if (!(err_detect & L2_EDE_MASK))
  397. return;
  398. printk(KERN_ERR "ECC Error in CPU L2 cache\n");
  399. printk(KERN_ERR "L2 Error Detect Register: 0x%08x\n", err_detect);
  400. printk(KERN_ERR "L2 Error Capture Data High Register: 0x%08x\n",
  401. in_be32(pdata->l2_vbase + MPC85XX_L2_CAPTDATAHI));
  402. printk(KERN_ERR "L2 Error Capture Data Lo Register: 0x%08x\n",
  403. in_be32(pdata->l2_vbase + MPC85XX_L2_CAPTDATALO));
  404. printk(KERN_ERR "L2 Error Syndrome Register: 0x%08x\n",
  405. in_be32(pdata->l2_vbase + MPC85XX_L2_CAPTECC));
  406. printk(KERN_ERR "L2 Error Attributes Capture Register: 0x%08x\n",
  407. in_be32(pdata->l2_vbase + MPC85XX_L2_ERRATTR));
  408. printk(KERN_ERR "L2 Error Address Capture Register: 0x%08x\n",
  409. in_be32(pdata->l2_vbase + MPC85XX_L2_ERRADDR));
  410. /* clear error detect register */
  411. out_be32(pdata->l2_vbase + MPC85XX_L2_ERRDET, err_detect);
  412. if (err_detect & L2_EDE_CE_MASK)
  413. edac_device_handle_ce(edac_dev, 0, 0, edac_dev->ctl_name);
  414. if (err_detect & L2_EDE_UE_MASK)
  415. edac_device_handle_ue(edac_dev, 0, 0, edac_dev->ctl_name);
  416. }
  417. static irqreturn_t mpc85xx_l2_isr(int irq, void *dev_id)
  418. {
  419. struct edac_device_ctl_info *edac_dev = dev_id;
  420. struct mpc85xx_l2_pdata *pdata = edac_dev->pvt_info;
  421. u32 err_detect;
  422. err_detect = in_be32(pdata->l2_vbase + MPC85XX_L2_ERRDET);
  423. if (!(err_detect & L2_EDE_MASK))
  424. return IRQ_NONE;
  425. mpc85xx_l2_check(edac_dev);
  426. return IRQ_HANDLED;
  427. }
  428. static int __devinit mpc85xx_l2_err_probe(struct platform_device *op)
  429. {
  430. struct edac_device_ctl_info *edac_dev;
  431. struct mpc85xx_l2_pdata *pdata;
  432. struct resource r;
  433. int res;
  434. if (!devres_open_group(&op->dev, mpc85xx_l2_err_probe, GFP_KERNEL))
  435. return -ENOMEM;
  436. edac_dev = edac_device_alloc_ctl_info(sizeof(*pdata),
  437. "cpu", 1, "L", 1, 2, NULL, 0,
  438. edac_dev_idx);
  439. if (!edac_dev) {
  440. devres_release_group(&op->dev, mpc85xx_l2_err_probe);
  441. return -ENOMEM;
  442. }
  443. pdata = edac_dev->pvt_info;
  444. pdata->name = "mpc85xx_l2_err";
  445. pdata->irq = NO_IRQ;
  446. edac_dev->dev = &op->dev;
  447. dev_set_drvdata(edac_dev->dev, edac_dev);
  448. edac_dev->ctl_name = pdata->name;
  449. edac_dev->dev_name = pdata->name;
  450. res = of_address_to_resource(op->dev.of_node, 0, &r);
  451. if (res) {
  452. printk(KERN_ERR "%s: Unable to get resource for "
  453. "L2 err regs\n", __func__);
  454. goto err;
  455. }
  456. /* we only need the error registers */
  457. r.start += 0xe00;
  458. if (!devm_request_mem_region(&op->dev, r.start, resource_size(&r),
  459. pdata->name)) {
  460. printk(KERN_ERR "%s: Error while requesting mem region\n",
  461. __func__);
  462. res = -EBUSY;
  463. goto err;
  464. }
  465. pdata->l2_vbase = devm_ioremap(&op->dev, r.start, resource_size(&r));
  466. if (!pdata->l2_vbase) {
  467. printk(KERN_ERR "%s: Unable to setup L2 err regs\n", __func__);
  468. res = -ENOMEM;
  469. goto err;
  470. }
  471. out_be32(pdata->l2_vbase + MPC85XX_L2_ERRDET, ~0);
  472. orig_l2_err_disable = in_be32(pdata->l2_vbase + MPC85XX_L2_ERRDIS);
  473. /* clear the err_dis */
  474. out_be32(pdata->l2_vbase + MPC85XX_L2_ERRDIS, 0);
  475. edac_dev->mod_name = EDAC_MOD_STR;
  476. if (edac_op_state == EDAC_OPSTATE_POLL)
  477. edac_dev->edac_check = mpc85xx_l2_check;
  478. mpc85xx_set_l2_sysfs_attributes(edac_dev);
  479. pdata->edac_idx = edac_dev_idx++;
  480. if (edac_device_add_device(edac_dev) > 0) {
  481. edac_dbg(3, "failed edac_device_add_device()\n");
  482. goto err;
  483. }
  484. if (edac_op_state == EDAC_OPSTATE_INT) {
  485. pdata->irq = irq_of_parse_and_map(op->dev.of_node, 0);
  486. res = devm_request_irq(&op->dev, pdata->irq,
  487. mpc85xx_l2_isr, IRQF_DISABLED,
  488. "[EDAC] L2 err", edac_dev);
  489. if (res < 0) {
  490. printk(KERN_ERR
  491. "%s: Unable to requiest irq %d for "
  492. "MPC85xx L2 err\n", __func__, pdata->irq);
  493. irq_dispose_mapping(pdata->irq);
  494. res = -ENODEV;
  495. goto err2;
  496. }
  497. printk(KERN_INFO EDAC_MOD_STR " acquired irq %d for L2 Err\n",
  498. pdata->irq);
  499. edac_dev->op_state = OP_RUNNING_INTERRUPT;
  500. out_be32(pdata->l2_vbase + MPC85XX_L2_ERRINTEN, L2_EIE_MASK);
  501. }
  502. devres_remove_group(&op->dev, mpc85xx_l2_err_probe);
  503. edac_dbg(3, "success\n");
  504. printk(KERN_INFO EDAC_MOD_STR " L2 err registered\n");
  505. return 0;
  506. err2:
  507. edac_device_del_device(&op->dev);
  508. err:
  509. devres_release_group(&op->dev, mpc85xx_l2_err_probe);
  510. edac_device_free_ctl_info(edac_dev);
  511. return res;
  512. }
  513. static int mpc85xx_l2_err_remove(struct platform_device *op)
  514. {
  515. struct edac_device_ctl_info *edac_dev = dev_get_drvdata(&op->dev);
  516. struct mpc85xx_l2_pdata *pdata = edac_dev->pvt_info;
  517. edac_dbg(0, "\n");
  518. if (edac_op_state == EDAC_OPSTATE_INT) {
  519. out_be32(pdata->l2_vbase + MPC85XX_L2_ERRINTEN, 0);
  520. irq_dispose_mapping(pdata->irq);
  521. }
  522. out_be32(pdata->l2_vbase + MPC85XX_L2_ERRDIS, orig_l2_err_disable);
  523. edac_device_del_device(&op->dev);
  524. edac_device_free_ctl_info(edac_dev);
  525. return 0;
  526. }
  527. static struct of_device_id mpc85xx_l2_err_of_match[] = {
  528. /* deprecate the fsl,85.. forms in the future, 2.6.30? */
  529. { .compatible = "fsl,8540-l2-cache-controller", },
  530. { .compatible = "fsl,8541-l2-cache-controller", },
  531. { .compatible = "fsl,8544-l2-cache-controller", },
  532. { .compatible = "fsl,8548-l2-cache-controller", },
  533. { .compatible = "fsl,8555-l2-cache-controller", },
  534. { .compatible = "fsl,8568-l2-cache-controller", },
  535. { .compatible = "fsl,mpc8536-l2-cache-controller", },
  536. { .compatible = "fsl,mpc8540-l2-cache-controller", },
  537. { .compatible = "fsl,mpc8541-l2-cache-controller", },
  538. { .compatible = "fsl,mpc8544-l2-cache-controller", },
  539. { .compatible = "fsl,mpc8548-l2-cache-controller", },
  540. { .compatible = "fsl,mpc8555-l2-cache-controller", },
  541. { .compatible = "fsl,mpc8560-l2-cache-controller", },
  542. { .compatible = "fsl,mpc8568-l2-cache-controller", },
  543. { .compatible = "fsl,mpc8569-l2-cache-controller", },
  544. { .compatible = "fsl,mpc8572-l2-cache-controller", },
  545. { .compatible = "fsl,p1020-l2-cache-controller", },
  546. { .compatible = "fsl,p1021-l2-cache-controller", },
  547. { .compatible = "fsl,p2020-l2-cache-controller", },
  548. {},
  549. };
  550. MODULE_DEVICE_TABLE(of, mpc85xx_l2_err_of_match);
  551. static struct platform_driver mpc85xx_l2_err_driver = {
  552. .probe = mpc85xx_l2_err_probe,
  553. .remove = mpc85xx_l2_err_remove,
  554. .driver = {
  555. .name = "mpc85xx_l2_err",
  556. .owner = THIS_MODULE,
  557. .of_match_table = mpc85xx_l2_err_of_match,
  558. },
  559. };
  560. /**************************** MC Err device ***************************/
  561. /*
  562. * Taken from table 8-55 in the MPC8641 User's Manual and/or 9-61 in the
  563. * MPC8572 User's Manual. Each line represents a syndrome bit column as a
  564. * 64-bit value, but split into an upper and lower 32-bit chunk. The labels
  565. * below correspond to Freescale's manuals.
  566. */
  567. static unsigned int ecc_table[16] = {
  568. /* MSB LSB */
  569. /* [0:31] [32:63] */
  570. 0xf00fe11e, 0xc33c0ff7, /* Syndrome bit 7 */
  571. 0x00ff00ff, 0x00fff0ff,
  572. 0x0f0f0f0f, 0x0f0fff00,
  573. 0x11113333, 0x7777000f,
  574. 0x22224444, 0x8888222f,
  575. 0x44448888, 0xffff4441,
  576. 0x8888ffff, 0x11118882,
  577. 0xffff1111, 0x22221114, /* Syndrome bit 0 */
  578. };
  579. /*
  580. * Calculate the correct ECC value for a 64-bit value specified by high:low
  581. */
  582. static u8 calculate_ecc(u32 high, u32 low)
  583. {
  584. u32 mask_low;
  585. u32 mask_high;
  586. int bit_cnt;
  587. u8 ecc = 0;
  588. int i;
  589. int j;
  590. for (i = 0; i < 8; i++) {
  591. mask_high = ecc_table[i * 2];
  592. mask_low = ecc_table[i * 2 + 1];
  593. bit_cnt = 0;
  594. for (j = 0; j < 32; j++) {
  595. if ((mask_high >> j) & 1)
  596. bit_cnt ^= (high >> j) & 1;
  597. if ((mask_low >> j) & 1)
  598. bit_cnt ^= (low >> j) & 1;
  599. }
  600. ecc |= bit_cnt << i;
  601. }
  602. return ecc;
  603. }
  604. /*
  605. * Create the syndrome code which is generated if the data line specified by
  606. * 'bit' failed. Eg generate an 8-bit codes seen in Table 8-55 in the MPC8641
  607. * User's Manual and 9-61 in the MPC8572 User's Manual.
  608. */
  609. static u8 syndrome_from_bit(unsigned int bit) {
  610. int i;
  611. u8 syndrome = 0;
  612. /*
  613. * Cycle through the upper or lower 32-bit portion of each value in
  614. * ecc_table depending on if 'bit' is in the upper or lower half of
  615. * 64-bit data.
  616. */
  617. for (i = bit < 32; i < 16; i += 2)
  618. syndrome |= ((ecc_table[i] >> (bit % 32)) & 1) << (i / 2);
  619. return syndrome;
  620. }
  621. /*
  622. * Decode data and ecc syndrome to determine what went wrong
  623. * Note: This can only decode single-bit errors
  624. */
  625. static void sbe_ecc_decode(u32 cap_high, u32 cap_low, u32 cap_ecc,
  626. int *bad_data_bit, int *bad_ecc_bit)
  627. {
  628. int i;
  629. u8 syndrome;
  630. *bad_data_bit = -1;
  631. *bad_ecc_bit = -1;
  632. /*
  633. * Calculate the ECC of the captured data and XOR it with the captured
  634. * ECC to find an ECC syndrome value we can search for
  635. */
  636. syndrome = calculate_ecc(cap_high, cap_low) ^ cap_ecc;
  637. /* Check if a data line is stuck... */
  638. for (i = 0; i < 64; i++) {
  639. if (syndrome == syndrome_from_bit(i)) {
  640. *bad_data_bit = i;
  641. return;
  642. }
  643. }
  644. /* If data is correct, check ECC bits for errors... */
  645. for (i = 0; i < 8; i++) {
  646. if ((syndrome >> i) & 0x1) {
  647. *bad_ecc_bit = i;
  648. return;
  649. }
  650. }
  651. }
  652. static void mpc85xx_mc_check(struct mem_ctl_info *mci)
  653. {
  654. struct mpc85xx_mc_pdata *pdata = mci->pvt_info;
  655. struct csrow_info *csrow;
  656. u32 bus_width;
  657. u32 err_detect;
  658. u32 syndrome;
  659. u32 err_addr;
  660. u32 pfn;
  661. int row_index;
  662. u32 cap_high;
  663. u32 cap_low;
  664. int bad_data_bit;
  665. int bad_ecc_bit;
  666. err_detect = in_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DETECT);
  667. if (!err_detect)
  668. return;
  669. mpc85xx_mc_printk(mci, KERN_ERR, "Err Detect Register: %#8.8x\n",
  670. err_detect);
  671. /* no more processing if not ECC bit errors */
  672. if (!(err_detect & (DDR_EDE_SBE | DDR_EDE_MBE))) {
  673. out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DETECT, err_detect);
  674. return;
  675. }
  676. syndrome = in_be32(pdata->mc_vbase + MPC85XX_MC_CAPTURE_ECC);
  677. /* Mask off appropriate bits of syndrome based on bus width */
  678. bus_width = (in_be32(pdata->mc_vbase + MPC85XX_MC_DDR_SDRAM_CFG) &
  679. DSC_DBW_MASK) ? 32 : 64;
  680. if (bus_width == 64)
  681. syndrome &= 0xff;
  682. else
  683. syndrome &= 0xffff;
  684. err_addr = in_be32(pdata->mc_vbase + MPC85XX_MC_CAPTURE_ADDRESS);
  685. pfn = err_addr >> PAGE_SHIFT;
  686. for (row_index = 0; row_index < mci->nr_csrows; row_index++) {
  687. csrow = mci->csrows[row_index];
  688. if ((pfn >= csrow->first_page) && (pfn <= csrow->last_page))
  689. break;
  690. }
  691. cap_high = in_be32(pdata->mc_vbase + MPC85XX_MC_CAPTURE_DATA_HI);
  692. cap_low = in_be32(pdata->mc_vbase + MPC85XX_MC_CAPTURE_DATA_LO);
  693. /*
  694. * Analyze single-bit errors on 64-bit wide buses
  695. * TODO: Add support for 32-bit wide buses
  696. */
  697. if ((err_detect & DDR_EDE_SBE) && (bus_width == 64)) {
  698. sbe_ecc_decode(cap_high, cap_low, syndrome,
  699. &bad_data_bit, &bad_ecc_bit);
  700. if (bad_data_bit != -1)
  701. mpc85xx_mc_printk(mci, KERN_ERR,
  702. "Faulty Data bit: %d\n", bad_data_bit);
  703. if (bad_ecc_bit != -1)
  704. mpc85xx_mc_printk(mci, KERN_ERR,
  705. "Faulty ECC bit: %d\n", bad_ecc_bit);
  706. mpc85xx_mc_printk(mci, KERN_ERR,
  707. "Expected Data / ECC:\t%#8.8x_%08x / %#2.2x\n",
  708. cap_high ^ (1 << (bad_data_bit - 32)),
  709. cap_low ^ (1 << bad_data_bit),
  710. syndrome ^ (1 << bad_ecc_bit));
  711. }
  712. mpc85xx_mc_printk(mci, KERN_ERR,
  713. "Captured Data / ECC:\t%#8.8x_%08x / %#2.2x\n",
  714. cap_high, cap_low, syndrome);
  715. mpc85xx_mc_printk(mci, KERN_ERR, "Err addr: %#8.8x\n", err_addr);
  716. mpc85xx_mc_printk(mci, KERN_ERR, "PFN: %#8.8x\n", pfn);
  717. /* we are out of range */
  718. if (row_index == mci->nr_csrows)
  719. mpc85xx_mc_printk(mci, KERN_ERR, "PFN out of range!\n");
  720. if (err_detect & DDR_EDE_SBE)
  721. edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
  722. pfn, err_addr & ~PAGE_MASK, syndrome,
  723. row_index, 0, -1,
  724. mci->ctl_name, "");
  725. if (err_detect & DDR_EDE_MBE)
  726. edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
  727. pfn, err_addr & ~PAGE_MASK, syndrome,
  728. row_index, 0, -1,
  729. mci->ctl_name, "");
  730. out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DETECT, err_detect);
  731. }
  732. static irqreturn_t mpc85xx_mc_isr(int irq, void *dev_id)
  733. {
  734. struct mem_ctl_info *mci = dev_id;
  735. struct mpc85xx_mc_pdata *pdata = mci->pvt_info;
  736. u32 err_detect;
  737. err_detect = in_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DETECT);
  738. if (!err_detect)
  739. return IRQ_NONE;
  740. mpc85xx_mc_check(mci);
  741. return IRQ_HANDLED;
  742. }
  743. static void __devinit mpc85xx_init_csrows(struct mem_ctl_info *mci)
  744. {
  745. struct mpc85xx_mc_pdata *pdata = mci->pvt_info;
  746. struct csrow_info *csrow;
  747. struct dimm_info *dimm;
  748. u32 sdram_ctl;
  749. u32 sdtype;
  750. enum mem_type mtype;
  751. u32 cs_bnds;
  752. int index;
  753. sdram_ctl = in_be32(pdata->mc_vbase + MPC85XX_MC_DDR_SDRAM_CFG);
  754. sdtype = sdram_ctl & DSC_SDTYPE_MASK;
  755. if (sdram_ctl & DSC_RD_EN) {
  756. switch (sdtype) {
  757. case DSC_SDTYPE_DDR:
  758. mtype = MEM_RDDR;
  759. break;
  760. case DSC_SDTYPE_DDR2:
  761. mtype = MEM_RDDR2;
  762. break;
  763. case DSC_SDTYPE_DDR3:
  764. mtype = MEM_RDDR3;
  765. break;
  766. default:
  767. mtype = MEM_UNKNOWN;
  768. break;
  769. }
  770. } else {
  771. switch (sdtype) {
  772. case DSC_SDTYPE_DDR:
  773. mtype = MEM_DDR;
  774. break;
  775. case DSC_SDTYPE_DDR2:
  776. mtype = MEM_DDR2;
  777. break;
  778. case DSC_SDTYPE_DDR3:
  779. mtype = MEM_DDR3;
  780. break;
  781. default:
  782. mtype = MEM_UNKNOWN;
  783. break;
  784. }
  785. }
  786. for (index = 0; index < mci->nr_csrows; index++) {
  787. u32 start;
  788. u32 end;
  789. csrow = mci->csrows[index];
  790. dimm = csrow->channels[0]->dimm;
  791. cs_bnds = in_be32(pdata->mc_vbase + MPC85XX_MC_CS_BNDS_0 +
  792. (index * MPC85XX_MC_CS_BNDS_OFS));
  793. start = (cs_bnds & 0xffff0000) >> 16;
  794. end = (cs_bnds & 0x0000ffff);
  795. if (start == end)
  796. continue; /* not populated */
  797. start <<= (24 - PAGE_SHIFT);
  798. end <<= (24 - PAGE_SHIFT);
  799. end |= (1 << (24 - PAGE_SHIFT)) - 1;
  800. csrow->first_page = start;
  801. csrow->last_page = end;
  802. dimm->nr_pages = end + 1 - start;
  803. dimm->grain = 8;
  804. dimm->mtype = mtype;
  805. dimm->dtype = DEV_UNKNOWN;
  806. if (sdram_ctl & DSC_X32_EN)
  807. dimm->dtype = DEV_X32;
  808. dimm->edac_mode = EDAC_SECDED;
  809. }
  810. }
  811. static int __devinit mpc85xx_mc_err_probe(struct platform_device *op)
  812. {
  813. struct mem_ctl_info *mci;
  814. struct edac_mc_layer layers[2];
  815. struct mpc85xx_mc_pdata *pdata;
  816. struct resource r;
  817. u32 sdram_ctl;
  818. int res;
  819. if (!devres_open_group(&op->dev, mpc85xx_mc_err_probe, GFP_KERNEL))
  820. return -ENOMEM;
  821. layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
  822. layers[0].size = 4;
  823. layers[0].is_virt_csrow = true;
  824. layers[1].type = EDAC_MC_LAYER_CHANNEL;
  825. layers[1].size = 1;
  826. layers[1].is_virt_csrow = false;
  827. mci = edac_mc_alloc(edac_mc_idx, ARRAY_SIZE(layers), layers,
  828. sizeof(*pdata));
  829. if (!mci) {
  830. devres_release_group(&op->dev, mpc85xx_mc_err_probe);
  831. return -ENOMEM;
  832. }
  833. pdata = mci->pvt_info;
  834. pdata->name = "mpc85xx_mc_err";
  835. pdata->irq = NO_IRQ;
  836. mci->pdev = &op->dev;
  837. pdata->edac_idx = edac_mc_idx++;
  838. dev_set_drvdata(mci->pdev, mci);
  839. mci->ctl_name = pdata->name;
  840. mci->dev_name = pdata->name;
  841. res = of_address_to_resource(op->dev.of_node, 0, &r);
  842. if (res) {
  843. printk(KERN_ERR "%s: Unable to get resource for MC err regs\n",
  844. __func__);
  845. goto err;
  846. }
  847. if (!devm_request_mem_region(&op->dev, r.start, resource_size(&r),
  848. pdata->name)) {
  849. printk(KERN_ERR "%s: Error while requesting mem region\n",
  850. __func__);
  851. res = -EBUSY;
  852. goto err;
  853. }
  854. pdata->mc_vbase = devm_ioremap(&op->dev, r.start, resource_size(&r));
  855. if (!pdata->mc_vbase) {
  856. printk(KERN_ERR "%s: Unable to setup MC err regs\n", __func__);
  857. res = -ENOMEM;
  858. goto err;
  859. }
  860. sdram_ctl = in_be32(pdata->mc_vbase + MPC85XX_MC_DDR_SDRAM_CFG);
  861. if (!(sdram_ctl & DSC_ECC_EN)) {
  862. /* no ECC */
  863. printk(KERN_WARNING "%s: No ECC DIMMs discovered\n", __func__);
  864. res = -ENODEV;
  865. goto err;
  866. }
  867. edac_dbg(3, "init mci\n");
  868. mci->mtype_cap = MEM_FLAG_RDDR | MEM_FLAG_RDDR2 |
  869. MEM_FLAG_DDR | MEM_FLAG_DDR2;
  870. mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
  871. mci->edac_cap = EDAC_FLAG_SECDED;
  872. mci->mod_name = EDAC_MOD_STR;
  873. mci->mod_ver = MPC85XX_REVISION;
  874. if (edac_op_state == EDAC_OPSTATE_POLL)
  875. mci->edac_check = mpc85xx_mc_check;
  876. mci->ctl_page_to_phys = NULL;
  877. mci->scrub_mode = SCRUB_SW_SRC;
  878. mpc85xx_init_csrows(mci);
  879. /* store the original error disable bits */
  880. orig_ddr_err_disable =
  881. in_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DISABLE);
  882. out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DISABLE, 0);
  883. /* clear all error bits */
  884. out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DETECT, ~0);
  885. if (edac_mc_add_mc(mci)) {
  886. edac_dbg(3, "failed edac_mc_add_mc()\n");
  887. goto err;
  888. }
  889. if (mpc85xx_create_sysfs_attributes(mci)) {
  890. edac_mc_del_mc(mci->pdev);
  891. edac_dbg(3, "failed edac_mc_add_mc()\n");
  892. goto err;
  893. }
  894. if (edac_op_state == EDAC_OPSTATE_INT) {
  895. out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_INT_EN,
  896. DDR_EIE_MBEE | DDR_EIE_SBEE);
  897. /* store the original error management threshold */
  898. orig_ddr_err_sbe = in_be32(pdata->mc_vbase +
  899. MPC85XX_MC_ERR_SBE) & 0xff0000;
  900. /* set threshold to 1 error per interrupt */
  901. out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_SBE, 0x10000);
  902. /* register interrupts */
  903. pdata->irq = irq_of_parse_and_map(op->dev.of_node, 0);
  904. res = devm_request_irq(&op->dev, pdata->irq,
  905. mpc85xx_mc_isr,
  906. IRQF_DISABLED | IRQF_SHARED,
  907. "[EDAC] MC err", mci);
  908. if (res < 0) {
  909. printk(KERN_ERR "%s: Unable to request irq %d for "
  910. "MPC85xx DRAM ERR\n", __func__, pdata->irq);
  911. irq_dispose_mapping(pdata->irq);
  912. res = -ENODEV;
  913. goto err2;
  914. }
  915. printk(KERN_INFO EDAC_MOD_STR " acquired irq %d for MC\n",
  916. pdata->irq);
  917. }
  918. devres_remove_group(&op->dev, mpc85xx_mc_err_probe);
  919. edac_dbg(3, "success\n");
  920. printk(KERN_INFO EDAC_MOD_STR " MC err registered\n");
  921. return 0;
  922. err2:
  923. edac_mc_del_mc(&op->dev);
  924. err:
  925. devres_release_group(&op->dev, mpc85xx_mc_err_probe);
  926. edac_mc_free(mci);
  927. return res;
  928. }
  929. static int mpc85xx_mc_err_remove(struct platform_device *op)
  930. {
  931. struct mem_ctl_info *mci = dev_get_drvdata(&op->dev);
  932. struct mpc85xx_mc_pdata *pdata = mci->pvt_info;
  933. edac_dbg(0, "\n");
  934. if (edac_op_state == EDAC_OPSTATE_INT) {
  935. out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_INT_EN, 0);
  936. irq_dispose_mapping(pdata->irq);
  937. }
  938. out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DISABLE,
  939. orig_ddr_err_disable);
  940. out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_SBE, orig_ddr_err_sbe);
  941. mpc85xx_remove_sysfs_attributes(mci);
  942. edac_mc_del_mc(&op->dev);
  943. edac_mc_free(mci);
  944. return 0;
  945. }
  946. static struct of_device_id mpc85xx_mc_err_of_match[] = {
  947. /* deprecate the fsl,85.. forms in the future, 2.6.30? */
  948. { .compatible = "fsl,8540-memory-controller", },
  949. { .compatible = "fsl,8541-memory-controller", },
  950. { .compatible = "fsl,8544-memory-controller", },
  951. { .compatible = "fsl,8548-memory-controller", },
  952. { .compatible = "fsl,8555-memory-controller", },
  953. { .compatible = "fsl,8568-memory-controller", },
  954. { .compatible = "fsl,mpc8536-memory-controller", },
  955. { .compatible = "fsl,mpc8540-memory-controller", },
  956. { .compatible = "fsl,mpc8541-memory-controller", },
  957. { .compatible = "fsl,mpc8544-memory-controller", },
  958. { .compatible = "fsl,mpc8548-memory-controller", },
  959. { .compatible = "fsl,mpc8555-memory-controller", },
  960. { .compatible = "fsl,mpc8560-memory-controller", },
  961. { .compatible = "fsl,mpc8568-memory-controller", },
  962. { .compatible = "fsl,mpc8569-memory-controller", },
  963. { .compatible = "fsl,mpc8572-memory-controller", },
  964. { .compatible = "fsl,mpc8349-memory-controller", },
  965. { .compatible = "fsl,p1020-memory-controller", },
  966. { .compatible = "fsl,p1021-memory-controller", },
  967. { .compatible = "fsl,p2020-memory-controller", },
  968. { .compatible = "fsl,qoriq-memory-controller", },
  969. {},
  970. };
  971. MODULE_DEVICE_TABLE(of, mpc85xx_mc_err_of_match);
  972. static struct platform_driver mpc85xx_mc_err_driver = {
  973. .probe = mpc85xx_mc_err_probe,
  974. .remove = mpc85xx_mc_err_remove,
  975. .driver = {
  976. .name = "mpc85xx_mc_err",
  977. .owner = THIS_MODULE,
  978. .of_match_table = mpc85xx_mc_err_of_match,
  979. },
  980. };
  981. #ifdef CONFIG_FSL_SOC_BOOKE
  982. static void __init mpc85xx_mc_clear_rfxe(void *data)
  983. {
  984. orig_hid1[smp_processor_id()] = mfspr(SPRN_HID1);
  985. mtspr(SPRN_HID1, (orig_hid1[smp_processor_id()] & ~HID1_RFXE));
  986. }
  987. #endif
  988. static int __init mpc85xx_mc_init(void)
  989. {
  990. int res = 0;
  991. u32 pvr = 0;
  992. printk(KERN_INFO "Freescale(R) MPC85xx EDAC driver, "
  993. "(C) 2006 Montavista Software\n");
  994. /* make sure error reporting method is sane */
  995. switch (edac_op_state) {
  996. case EDAC_OPSTATE_POLL:
  997. case EDAC_OPSTATE_INT:
  998. break;
  999. default:
  1000. edac_op_state = EDAC_OPSTATE_INT;
  1001. break;
  1002. }
  1003. res = platform_driver_register(&mpc85xx_mc_err_driver);
  1004. if (res)
  1005. printk(KERN_WARNING EDAC_MOD_STR "MC fails to register\n");
  1006. res = platform_driver_register(&mpc85xx_l2_err_driver);
  1007. if (res)
  1008. printk(KERN_WARNING EDAC_MOD_STR "L2 fails to register\n");
  1009. #ifdef CONFIG_PCI
  1010. res = platform_driver_register(&mpc85xx_pci_err_driver);
  1011. if (res)
  1012. printk(KERN_WARNING EDAC_MOD_STR "PCI fails to register\n");
  1013. #endif
  1014. #ifdef CONFIG_FSL_SOC_BOOKE
  1015. pvr = mfspr(SPRN_PVR);
  1016. if ((PVR_VER(pvr) == PVR_VER_E500V1) ||
  1017. (PVR_VER(pvr) == PVR_VER_E500V2)) {
  1018. /*
  1019. * need to clear HID1[RFXE] to disable machine check int
  1020. * so we can catch it
  1021. */
  1022. if (edac_op_state == EDAC_OPSTATE_INT)
  1023. on_each_cpu(mpc85xx_mc_clear_rfxe, NULL, 0);
  1024. }
  1025. #endif
  1026. return 0;
  1027. }
  1028. module_init(mpc85xx_mc_init);
  1029. #ifdef CONFIG_FSL_SOC_BOOKE
  1030. static void __exit mpc85xx_mc_restore_hid1(void *data)
  1031. {
  1032. mtspr(SPRN_HID1, orig_hid1[smp_processor_id()]);
  1033. }
  1034. #endif
  1035. static void __exit mpc85xx_mc_exit(void)
  1036. {
  1037. #ifdef CONFIG_FSL_SOC_BOOKE
  1038. u32 pvr = mfspr(SPRN_PVR);
  1039. if ((PVR_VER(pvr) == PVR_VER_E500V1) ||
  1040. (PVR_VER(pvr) == PVR_VER_E500V2)) {
  1041. on_each_cpu(mpc85xx_mc_restore_hid1, NULL, 0);
  1042. }
  1043. #endif
  1044. #ifdef CONFIG_PCI
  1045. platform_driver_unregister(&mpc85xx_pci_err_driver);
  1046. #endif
  1047. platform_driver_unregister(&mpc85xx_l2_err_driver);
  1048. platform_driver_unregister(&mpc85xx_mc_err_driver);
  1049. }
  1050. module_exit(mpc85xx_mc_exit);
  1051. MODULE_LICENSE("GPL");
  1052. MODULE_AUTHOR("Montavista Software, Inc.");
  1053. module_param(edac_op_state, int, 0444);
  1054. MODULE_PARM_DESC(edac_op_state,
  1055. "EDAC Error Reporting state: 0=Poll, 2=Interrupt");