mpc85xx_edac.c 32 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237
  1. /*
  2. * Freescale MPC85xx Memory Controller kenel module
  3. *
  4. * Author: Dave Jiang <djiang@mvista.com>
  5. *
  6. * 2006-2007 (c) MontaVista Software, Inc. This file is licensed under
  7. * the terms of the GNU General Public License version 2. This program
  8. * is licensed "as is" without any warranty of any kind, whether express
  9. * or implied.
  10. *
  11. */
  12. #include <linux/module.h>
  13. #include <linux/init.h>
  14. #include <linux/interrupt.h>
  15. #include <linux/ctype.h>
  16. #include <linux/io.h>
  17. #include <linux/mod_devicetable.h>
  18. #include <linux/edac.h>
  19. #include <linux/smp.h>
  20. #include <linux/gfp.h>
  21. #include <linux/of_platform.h>
  22. #include <linux/of_device.h>
  23. #include "edac_module.h"
  24. #include "edac_core.h"
  25. #include "mpc85xx_edac.h"
  26. static int edac_dev_idx;
  27. #ifdef CONFIG_PCI
  28. static int edac_pci_idx;
  29. #endif
  30. static int edac_mc_idx;
  31. static u32 orig_ddr_err_disable;
  32. static u32 orig_ddr_err_sbe;
  33. /*
  34. * PCI Err defines
  35. */
  36. #ifdef CONFIG_PCI
  37. static u32 orig_pci_err_cap_dr;
  38. static u32 orig_pci_err_en;
  39. #endif
  40. static u32 orig_l2_err_disable;
  41. #ifdef CONFIG_FSL_SOC_BOOKE
  42. static u32 orig_hid1[2];
  43. #endif
  44. /************************ MC SYSFS parts ***********************************/
  45. static ssize_t mpc85xx_mc_inject_data_hi_show(struct mem_ctl_info *mci,
  46. char *data)
  47. {
  48. struct mpc85xx_mc_pdata *pdata = mci->pvt_info;
  49. return sprintf(data, "0x%08x",
  50. in_be32(pdata->mc_vbase +
  51. MPC85XX_MC_DATA_ERR_INJECT_HI));
  52. }
  53. static ssize_t mpc85xx_mc_inject_data_lo_show(struct mem_ctl_info *mci,
  54. char *data)
  55. {
  56. struct mpc85xx_mc_pdata *pdata = mci->pvt_info;
  57. return sprintf(data, "0x%08x",
  58. in_be32(pdata->mc_vbase +
  59. MPC85XX_MC_DATA_ERR_INJECT_LO));
  60. }
  61. static ssize_t mpc85xx_mc_inject_ctrl_show(struct mem_ctl_info *mci, char *data)
  62. {
  63. struct mpc85xx_mc_pdata *pdata = mci->pvt_info;
  64. return sprintf(data, "0x%08x",
  65. in_be32(pdata->mc_vbase + MPC85XX_MC_ECC_ERR_INJECT));
  66. }
  67. static ssize_t mpc85xx_mc_inject_data_hi_store(struct mem_ctl_info *mci,
  68. const char *data, size_t count)
  69. {
  70. struct mpc85xx_mc_pdata *pdata = mci->pvt_info;
  71. if (isdigit(*data)) {
  72. out_be32(pdata->mc_vbase + MPC85XX_MC_DATA_ERR_INJECT_HI,
  73. simple_strtoul(data, NULL, 0));
  74. return count;
  75. }
  76. return 0;
  77. }
  78. static ssize_t mpc85xx_mc_inject_data_lo_store(struct mem_ctl_info *mci,
  79. const char *data, size_t count)
  80. {
  81. struct mpc85xx_mc_pdata *pdata = mci->pvt_info;
  82. if (isdigit(*data)) {
  83. out_be32(pdata->mc_vbase + MPC85XX_MC_DATA_ERR_INJECT_LO,
  84. simple_strtoul(data, NULL, 0));
  85. return count;
  86. }
  87. return 0;
  88. }
  89. static ssize_t mpc85xx_mc_inject_ctrl_store(struct mem_ctl_info *mci,
  90. const char *data, size_t count)
  91. {
  92. struct mpc85xx_mc_pdata *pdata = mci->pvt_info;
  93. if (isdigit(*data)) {
  94. out_be32(pdata->mc_vbase + MPC85XX_MC_ECC_ERR_INJECT,
  95. simple_strtoul(data, NULL, 0));
  96. return count;
  97. }
  98. return 0;
  99. }
  100. static struct mcidev_sysfs_attribute mpc85xx_mc_sysfs_attributes[] = {
  101. {
  102. .attr = {
  103. .name = "inject_data_hi",
  104. .mode = (S_IRUGO | S_IWUSR)
  105. },
  106. .show = mpc85xx_mc_inject_data_hi_show,
  107. .store = mpc85xx_mc_inject_data_hi_store},
  108. {
  109. .attr = {
  110. .name = "inject_data_lo",
  111. .mode = (S_IRUGO | S_IWUSR)
  112. },
  113. .show = mpc85xx_mc_inject_data_lo_show,
  114. .store = mpc85xx_mc_inject_data_lo_store},
  115. {
  116. .attr = {
  117. .name = "inject_ctrl",
  118. .mode = (S_IRUGO | S_IWUSR)
  119. },
  120. .show = mpc85xx_mc_inject_ctrl_show,
  121. .store = mpc85xx_mc_inject_ctrl_store},
  122. /* End of list */
  123. {
  124. .attr = {.name = NULL}
  125. }
  126. };
  127. static void mpc85xx_set_mc_sysfs_attributes(struct mem_ctl_info *mci)
  128. {
  129. mci->mc_driver_sysfs_attributes = mpc85xx_mc_sysfs_attributes;
  130. }
  131. /**************************** PCI Err device ***************************/
  132. #ifdef CONFIG_PCI
  133. static void mpc85xx_pci_check(struct edac_pci_ctl_info *pci)
  134. {
  135. struct mpc85xx_pci_pdata *pdata = pci->pvt_info;
  136. u32 err_detect;
  137. err_detect = in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_DR);
  138. /* master aborts can happen during PCI config cycles */
  139. if (!(err_detect & ~(PCI_EDE_MULTI_ERR | PCI_EDE_MST_ABRT))) {
  140. out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_DR, err_detect);
  141. return;
  142. }
  143. printk(KERN_ERR "PCI error(s) detected\n");
  144. printk(KERN_ERR "PCI/X ERR_DR register: %#08x\n", err_detect);
  145. printk(KERN_ERR "PCI/X ERR_ATTRIB register: %#08x\n",
  146. in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_ATTRIB));
  147. printk(KERN_ERR "PCI/X ERR_ADDR register: %#08x\n",
  148. in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_ADDR));
  149. printk(KERN_ERR "PCI/X ERR_EXT_ADDR register: %#08x\n",
  150. in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_EXT_ADDR));
  151. printk(KERN_ERR "PCI/X ERR_DL register: %#08x\n",
  152. in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_DL));
  153. printk(KERN_ERR "PCI/X ERR_DH register: %#08x\n",
  154. in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_DH));
  155. /* clear error bits */
  156. out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_DR, err_detect);
  157. if (err_detect & PCI_EDE_PERR_MASK)
  158. edac_pci_handle_pe(pci, pci->ctl_name);
  159. if ((err_detect & ~PCI_EDE_MULTI_ERR) & ~PCI_EDE_PERR_MASK)
  160. edac_pci_handle_npe(pci, pci->ctl_name);
  161. }
  162. static irqreturn_t mpc85xx_pci_isr(int irq, void *dev_id)
  163. {
  164. struct edac_pci_ctl_info *pci = dev_id;
  165. struct mpc85xx_pci_pdata *pdata = pci->pvt_info;
  166. u32 err_detect;
  167. err_detect = in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_DR);
  168. if (!err_detect)
  169. return IRQ_NONE;
  170. mpc85xx_pci_check(pci);
  171. return IRQ_HANDLED;
  172. }
  173. static int __devinit mpc85xx_pci_err_probe(struct platform_device *op)
  174. {
  175. struct edac_pci_ctl_info *pci;
  176. struct mpc85xx_pci_pdata *pdata;
  177. struct resource r;
  178. int res = 0;
  179. if (!devres_open_group(&op->dev, mpc85xx_pci_err_probe, GFP_KERNEL))
  180. return -ENOMEM;
  181. pci = edac_pci_alloc_ctl_info(sizeof(*pdata), "mpc85xx_pci_err");
  182. if (!pci)
  183. return -ENOMEM;
  184. pdata = pci->pvt_info;
  185. pdata->name = "mpc85xx_pci_err";
  186. pdata->irq = NO_IRQ;
  187. dev_set_drvdata(&op->dev, pci);
  188. pci->dev = &op->dev;
  189. pci->mod_name = EDAC_MOD_STR;
  190. pci->ctl_name = pdata->name;
  191. pci->dev_name = dev_name(&op->dev);
  192. if (edac_op_state == EDAC_OPSTATE_POLL)
  193. pci->edac_check = mpc85xx_pci_check;
  194. pdata->edac_idx = edac_pci_idx++;
  195. res = of_address_to_resource(op->dev.of_node, 0, &r);
  196. if (res) {
  197. printk(KERN_ERR "%s: Unable to get resource for "
  198. "PCI err regs\n", __func__);
  199. goto err;
  200. }
  201. /* we only need the error registers */
  202. r.start += 0xe00;
  203. if (!devm_request_mem_region(&op->dev, r.start, resource_size(&r),
  204. pdata->name)) {
  205. printk(KERN_ERR "%s: Error while requesting mem region\n",
  206. __func__);
  207. res = -EBUSY;
  208. goto err;
  209. }
  210. pdata->pci_vbase = devm_ioremap(&op->dev, r.start, resource_size(&r));
  211. if (!pdata->pci_vbase) {
  212. printk(KERN_ERR "%s: Unable to setup PCI err regs\n", __func__);
  213. res = -ENOMEM;
  214. goto err;
  215. }
  216. orig_pci_err_cap_dr =
  217. in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_CAP_DR);
  218. /* PCI master abort is expected during config cycles */
  219. out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_CAP_DR, 0x40);
  220. orig_pci_err_en = in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_EN);
  221. /* disable master abort reporting */
  222. out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_EN, ~0x40);
  223. /* clear error bits */
  224. out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_DR, ~0);
  225. if (edac_pci_add_device(pci, pdata->edac_idx) > 0) {
  226. debugf3("%s(): failed edac_pci_add_device()\n", __func__);
  227. goto err;
  228. }
  229. if (edac_op_state == EDAC_OPSTATE_INT) {
  230. pdata->irq = irq_of_parse_and_map(op->dev.of_node, 0);
  231. res = devm_request_irq(&op->dev, pdata->irq,
  232. mpc85xx_pci_isr, IRQF_DISABLED,
  233. "[EDAC] PCI err", pci);
  234. if (res < 0) {
  235. printk(KERN_ERR
  236. "%s: Unable to requiest irq %d for "
  237. "MPC85xx PCI err\n", __func__, pdata->irq);
  238. irq_dispose_mapping(pdata->irq);
  239. res = -ENODEV;
  240. goto err2;
  241. }
  242. printk(KERN_INFO EDAC_MOD_STR " acquired irq %d for PCI Err\n",
  243. pdata->irq);
  244. }
  245. devres_remove_group(&op->dev, mpc85xx_pci_err_probe);
  246. debugf3("%s(): success\n", __func__);
  247. printk(KERN_INFO EDAC_MOD_STR " PCI err registered\n");
  248. return 0;
  249. err2:
  250. edac_pci_del_device(&op->dev);
  251. err:
  252. edac_pci_free_ctl_info(pci);
  253. devres_release_group(&op->dev, mpc85xx_pci_err_probe);
  254. return res;
  255. }
  256. static int mpc85xx_pci_err_remove(struct platform_device *op)
  257. {
  258. struct edac_pci_ctl_info *pci = dev_get_drvdata(&op->dev);
  259. struct mpc85xx_pci_pdata *pdata = pci->pvt_info;
  260. debugf0("%s()\n", __func__);
  261. out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_CAP_DR,
  262. orig_pci_err_cap_dr);
  263. out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_EN, orig_pci_err_en);
  264. edac_pci_del_device(pci->dev);
  265. if (edac_op_state == EDAC_OPSTATE_INT)
  266. irq_dispose_mapping(pdata->irq);
  267. edac_pci_free_ctl_info(pci);
  268. return 0;
  269. }
  270. static struct of_device_id mpc85xx_pci_err_of_match[] = {
  271. {
  272. .compatible = "fsl,mpc8540-pcix",
  273. },
  274. {
  275. .compatible = "fsl,mpc8540-pci",
  276. },
  277. {},
  278. };
  279. MODULE_DEVICE_TABLE(of, mpc85xx_pci_err_of_match);
  280. static struct platform_driver mpc85xx_pci_err_driver = {
  281. .probe = mpc85xx_pci_err_probe,
  282. .remove = __devexit_p(mpc85xx_pci_err_remove),
  283. .driver = {
  284. .name = "mpc85xx_pci_err",
  285. .owner = THIS_MODULE,
  286. .of_match_table = mpc85xx_pci_err_of_match,
  287. },
  288. };
  289. #endif /* CONFIG_PCI */
  290. /**************************** L2 Err device ***************************/
  291. /************************ L2 SYSFS parts ***********************************/
  292. static ssize_t mpc85xx_l2_inject_data_hi_show(struct edac_device_ctl_info
  293. *edac_dev, char *data)
  294. {
  295. struct mpc85xx_l2_pdata *pdata = edac_dev->pvt_info;
  296. return sprintf(data, "0x%08x",
  297. in_be32(pdata->l2_vbase + MPC85XX_L2_ERRINJHI));
  298. }
  299. static ssize_t mpc85xx_l2_inject_data_lo_show(struct edac_device_ctl_info
  300. *edac_dev, char *data)
  301. {
  302. struct mpc85xx_l2_pdata *pdata = edac_dev->pvt_info;
  303. return sprintf(data, "0x%08x",
  304. in_be32(pdata->l2_vbase + MPC85XX_L2_ERRINJLO));
  305. }
  306. static ssize_t mpc85xx_l2_inject_ctrl_show(struct edac_device_ctl_info
  307. *edac_dev, char *data)
  308. {
  309. struct mpc85xx_l2_pdata *pdata = edac_dev->pvt_info;
  310. return sprintf(data, "0x%08x",
  311. in_be32(pdata->l2_vbase + MPC85XX_L2_ERRINJCTL));
  312. }
  313. static ssize_t mpc85xx_l2_inject_data_hi_store(struct edac_device_ctl_info
  314. *edac_dev, const char *data,
  315. size_t count)
  316. {
  317. struct mpc85xx_l2_pdata *pdata = edac_dev->pvt_info;
  318. if (isdigit(*data)) {
  319. out_be32(pdata->l2_vbase + MPC85XX_L2_ERRINJHI,
  320. simple_strtoul(data, NULL, 0));
  321. return count;
  322. }
  323. return 0;
  324. }
  325. static ssize_t mpc85xx_l2_inject_data_lo_store(struct edac_device_ctl_info
  326. *edac_dev, const char *data,
  327. size_t count)
  328. {
  329. struct mpc85xx_l2_pdata *pdata = edac_dev->pvt_info;
  330. if (isdigit(*data)) {
  331. out_be32(pdata->l2_vbase + MPC85XX_L2_ERRINJLO,
  332. simple_strtoul(data, NULL, 0));
  333. return count;
  334. }
  335. return 0;
  336. }
  337. static ssize_t mpc85xx_l2_inject_ctrl_store(struct edac_device_ctl_info
  338. *edac_dev, const char *data,
  339. size_t count)
  340. {
  341. struct mpc85xx_l2_pdata *pdata = edac_dev->pvt_info;
  342. if (isdigit(*data)) {
  343. out_be32(pdata->l2_vbase + MPC85XX_L2_ERRINJCTL,
  344. simple_strtoul(data, NULL, 0));
  345. return count;
  346. }
  347. return 0;
  348. }
  349. static struct edac_dev_sysfs_attribute mpc85xx_l2_sysfs_attributes[] = {
  350. {
  351. .attr = {
  352. .name = "inject_data_hi",
  353. .mode = (S_IRUGO | S_IWUSR)
  354. },
  355. .show = mpc85xx_l2_inject_data_hi_show,
  356. .store = mpc85xx_l2_inject_data_hi_store},
  357. {
  358. .attr = {
  359. .name = "inject_data_lo",
  360. .mode = (S_IRUGO | S_IWUSR)
  361. },
  362. .show = mpc85xx_l2_inject_data_lo_show,
  363. .store = mpc85xx_l2_inject_data_lo_store},
  364. {
  365. .attr = {
  366. .name = "inject_ctrl",
  367. .mode = (S_IRUGO | S_IWUSR)
  368. },
  369. .show = mpc85xx_l2_inject_ctrl_show,
  370. .store = mpc85xx_l2_inject_ctrl_store},
  371. /* End of list */
  372. {
  373. .attr = {.name = NULL}
  374. }
  375. };
  376. static void mpc85xx_set_l2_sysfs_attributes(struct edac_device_ctl_info
  377. *edac_dev)
  378. {
  379. edac_dev->sysfs_attributes = mpc85xx_l2_sysfs_attributes;
  380. }
  381. /***************************** L2 ops ***********************************/
  382. static void mpc85xx_l2_check(struct edac_device_ctl_info *edac_dev)
  383. {
  384. struct mpc85xx_l2_pdata *pdata = edac_dev->pvt_info;
  385. u32 err_detect;
  386. err_detect = in_be32(pdata->l2_vbase + MPC85XX_L2_ERRDET);
  387. if (!(err_detect & L2_EDE_MASK))
  388. return;
  389. printk(KERN_ERR "ECC Error in CPU L2 cache\n");
  390. printk(KERN_ERR "L2 Error Detect Register: 0x%08x\n", err_detect);
  391. printk(KERN_ERR "L2 Error Capture Data High Register: 0x%08x\n",
  392. in_be32(pdata->l2_vbase + MPC85XX_L2_CAPTDATAHI));
  393. printk(KERN_ERR "L2 Error Capture Data Lo Register: 0x%08x\n",
  394. in_be32(pdata->l2_vbase + MPC85XX_L2_CAPTDATALO));
  395. printk(KERN_ERR "L2 Error Syndrome Register: 0x%08x\n",
  396. in_be32(pdata->l2_vbase + MPC85XX_L2_CAPTECC));
  397. printk(KERN_ERR "L2 Error Attributes Capture Register: 0x%08x\n",
  398. in_be32(pdata->l2_vbase + MPC85XX_L2_ERRATTR));
  399. printk(KERN_ERR "L2 Error Address Capture Register: 0x%08x\n",
  400. in_be32(pdata->l2_vbase + MPC85XX_L2_ERRADDR));
  401. /* clear error detect register */
  402. out_be32(pdata->l2_vbase + MPC85XX_L2_ERRDET, err_detect);
  403. if (err_detect & L2_EDE_CE_MASK)
  404. edac_device_handle_ce(edac_dev, 0, 0, edac_dev->ctl_name);
  405. if (err_detect & L2_EDE_UE_MASK)
  406. edac_device_handle_ue(edac_dev, 0, 0, edac_dev->ctl_name);
  407. }
  408. static irqreturn_t mpc85xx_l2_isr(int irq, void *dev_id)
  409. {
  410. struct edac_device_ctl_info *edac_dev = dev_id;
  411. struct mpc85xx_l2_pdata *pdata = edac_dev->pvt_info;
  412. u32 err_detect;
  413. err_detect = in_be32(pdata->l2_vbase + MPC85XX_L2_ERRDET);
  414. if (!(err_detect & L2_EDE_MASK))
  415. return IRQ_NONE;
  416. mpc85xx_l2_check(edac_dev);
  417. return IRQ_HANDLED;
  418. }
  419. static int __devinit mpc85xx_l2_err_probe(struct platform_device *op)
  420. {
  421. struct edac_device_ctl_info *edac_dev;
  422. struct mpc85xx_l2_pdata *pdata;
  423. struct resource r;
  424. int res;
  425. if (!devres_open_group(&op->dev, mpc85xx_l2_err_probe, GFP_KERNEL))
  426. return -ENOMEM;
  427. edac_dev = edac_device_alloc_ctl_info(sizeof(*pdata),
  428. "cpu", 1, "L", 1, 2, NULL, 0,
  429. edac_dev_idx);
  430. if (!edac_dev) {
  431. devres_release_group(&op->dev, mpc85xx_l2_err_probe);
  432. return -ENOMEM;
  433. }
  434. pdata = edac_dev->pvt_info;
  435. pdata->name = "mpc85xx_l2_err";
  436. pdata->irq = NO_IRQ;
  437. edac_dev->dev = &op->dev;
  438. dev_set_drvdata(edac_dev->dev, edac_dev);
  439. edac_dev->ctl_name = pdata->name;
  440. edac_dev->dev_name = pdata->name;
  441. res = of_address_to_resource(op->dev.of_node, 0, &r);
  442. if (res) {
  443. printk(KERN_ERR "%s: Unable to get resource for "
  444. "L2 err regs\n", __func__);
  445. goto err;
  446. }
  447. /* we only need the error registers */
  448. r.start += 0xe00;
  449. if (!devm_request_mem_region(&op->dev, r.start, resource_size(&r),
  450. pdata->name)) {
  451. printk(KERN_ERR "%s: Error while requesting mem region\n",
  452. __func__);
  453. res = -EBUSY;
  454. goto err;
  455. }
  456. pdata->l2_vbase = devm_ioremap(&op->dev, r.start, resource_size(&r));
  457. if (!pdata->l2_vbase) {
  458. printk(KERN_ERR "%s: Unable to setup L2 err regs\n", __func__);
  459. res = -ENOMEM;
  460. goto err;
  461. }
  462. out_be32(pdata->l2_vbase + MPC85XX_L2_ERRDET, ~0);
  463. orig_l2_err_disable = in_be32(pdata->l2_vbase + MPC85XX_L2_ERRDIS);
  464. /* clear the err_dis */
  465. out_be32(pdata->l2_vbase + MPC85XX_L2_ERRDIS, 0);
  466. edac_dev->mod_name = EDAC_MOD_STR;
  467. if (edac_op_state == EDAC_OPSTATE_POLL)
  468. edac_dev->edac_check = mpc85xx_l2_check;
  469. mpc85xx_set_l2_sysfs_attributes(edac_dev);
  470. pdata->edac_idx = edac_dev_idx++;
  471. if (edac_device_add_device(edac_dev) > 0) {
  472. debugf3("%s(): failed edac_device_add_device()\n", __func__);
  473. goto err;
  474. }
  475. if (edac_op_state == EDAC_OPSTATE_INT) {
  476. pdata->irq = irq_of_parse_and_map(op->dev.of_node, 0);
  477. res = devm_request_irq(&op->dev, pdata->irq,
  478. mpc85xx_l2_isr,
  479. IRQF_DISABLED | IRQF_SHARED,
  480. "[EDAC] L2 err", edac_dev);
  481. if (res < 0) {
  482. printk(KERN_ERR
  483. "%s: Unable to requiest irq %d for "
  484. "MPC85xx L2 err\n", __func__, pdata->irq);
  485. irq_dispose_mapping(pdata->irq);
  486. res = -ENODEV;
  487. goto err2;
  488. }
  489. printk(KERN_INFO EDAC_MOD_STR " acquired irq %d for L2 Err\n",
  490. pdata->irq);
  491. edac_dev->op_state = OP_RUNNING_INTERRUPT;
  492. out_be32(pdata->l2_vbase + MPC85XX_L2_ERRINTEN, L2_EIE_MASK);
  493. }
  494. devres_remove_group(&op->dev, mpc85xx_l2_err_probe);
  495. debugf3("%s(): success\n", __func__);
  496. printk(KERN_INFO EDAC_MOD_STR " L2 err registered\n");
  497. return 0;
  498. err2:
  499. edac_device_del_device(&op->dev);
  500. err:
  501. devres_release_group(&op->dev, mpc85xx_l2_err_probe);
  502. edac_device_free_ctl_info(edac_dev);
  503. return res;
  504. }
  505. static int mpc85xx_l2_err_remove(struct platform_device *op)
  506. {
  507. struct edac_device_ctl_info *edac_dev = dev_get_drvdata(&op->dev);
  508. struct mpc85xx_l2_pdata *pdata = edac_dev->pvt_info;
  509. debugf0("%s()\n", __func__);
  510. if (edac_op_state == EDAC_OPSTATE_INT) {
  511. out_be32(pdata->l2_vbase + MPC85XX_L2_ERRINTEN, 0);
  512. irq_dispose_mapping(pdata->irq);
  513. }
  514. out_be32(pdata->l2_vbase + MPC85XX_L2_ERRDIS, orig_l2_err_disable);
  515. edac_device_del_device(&op->dev);
  516. edac_device_free_ctl_info(edac_dev);
  517. return 0;
  518. }
  519. static struct of_device_id mpc85xx_l2_err_of_match[] = {
  520. /* deprecate the fsl,85.. forms in the future, 2.6.30? */
  521. { .compatible = "fsl,8540-l2-cache-controller", },
  522. { .compatible = "fsl,8541-l2-cache-controller", },
  523. { .compatible = "fsl,8544-l2-cache-controller", },
  524. { .compatible = "fsl,8548-l2-cache-controller", },
  525. { .compatible = "fsl,8555-l2-cache-controller", },
  526. { .compatible = "fsl,8568-l2-cache-controller", },
  527. { .compatible = "fsl,mpc8536-l2-cache-controller", },
  528. { .compatible = "fsl,mpc8540-l2-cache-controller", },
  529. { .compatible = "fsl,mpc8541-l2-cache-controller", },
  530. { .compatible = "fsl,mpc8544-l2-cache-controller", },
  531. { .compatible = "fsl,mpc8548-l2-cache-controller", },
  532. { .compatible = "fsl,mpc8555-l2-cache-controller", },
  533. { .compatible = "fsl,mpc8560-l2-cache-controller", },
  534. { .compatible = "fsl,mpc8568-l2-cache-controller", },
  535. { .compatible = "fsl,mpc8569-l2-cache-controller", },
  536. { .compatible = "fsl,mpc8572-l2-cache-controller", },
  537. { .compatible = "fsl,p1020-l2-cache-controller", },
  538. { .compatible = "fsl,p1021-l2-cache-controller", },
  539. { .compatible = "fsl,p2020-l2-cache-controller", },
  540. {},
  541. };
  542. MODULE_DEVICE_TABLE(of, mpc85xx_l2_err_of_match);
  543. static struct platform_driver mpc85xx_l2_err_driver = {
  544. .probe = mpc85xx_l2_err_probe,
  545. .remove = mpc85xx_l2_err_remove,
  546. .driver = {
  547. .name = "mpc85xx_l2_err",
  548. .owner = THIS_MODULE,
  549. .of_match_table = mpc85xx_l2_err_of_match,
  550. },
  551. };
  552. /**************************** MC Err device ***************************/
  553. /*
  554. * Taken from table 8-55 in the MPC8641 User's Manual and/or 9-61 in the
  555. * MPC8572 User's Manual. Each line represents a syndrome bit column as a
  556. * 64-bit value, but split into an upper and lower 32-bit chunk. The labels
  557. * below correspond to Freescale's manuals.
  558. */
  559. static unsigned int ecc_table[16] = {
  560. /* MSB LSB */
  561. /* [0:31] [32:63] */
  562. 0xf00fe11e, 0xc33c0ff7, /* Syndrome bit 7 */
  563. 0x00ff00ff, 0x00fff0ff,
  564. 0x0f0f0f0f, 0x0f0fff00,
  565. 0x11113333, 0x7777000f,
  566. 0x22224444, 0x8888222f,
  567. 0x44448888, 0xffff4441,
  568. 0x8888ffff, 0x11118882,
  569. 0xffff1111, 0x22221114, /* Syndrome bit 0 */
  570. };
  571. /*
  572. * Calculate the correct ECC value for a 64-bit value specified by high:low
  573. */
  574. static u8 calculate_ecc(u32 high, u32 low)
  575. {
  576. u32 mask_low;
  577. u32 mask_high;
  578. int bit_cnt;
  579. u8 ecc = 0;
  580. int i;
  581. int j;
  582. for (i = 0; i < 8; i++) {
  583. mask_high = ecc_table[i * 2];
  584. mask_low = ecc_table[i * 2 + 1];
  585. bit_cnt = 0;
  586. for (j = 0; j < 32; j++) {
  587. if ((mask_high >> j) & 1)
  588. bit_cnt ^= (high >> j) & 1;
  589. if ((mask_low >> j) & 1)
  590. bit_cnt ^= (low >> j) & 1;
  591. }
  592. ecc |= bit_cnt << i;
  593. }
  594. return ecc;
  595. }
  596. /*
  597. * Create the syndrome code which is generated if the data line specified by
  598. * 'bit' failed. Eg generate an 8-bit codes seen in Table 8-55 in the MPC8641
  599. * User's Manual and 9-61 in the MPC8572 User's Manual.
  600. */
  601. static u8 syndrome_from_bit(unsigned int bit) {
  602. int i;
  603. u8 syndrome = 0;
  604. /*
  605. * Cycle through the upper or lower 32-bit portion of each value in
  606. * ecc_table depending on if 'bit' is in the upper or lower half of
  607. * 64-bit data.
  608. */
  609. for (i = bit < 32; i < 16; i += 2)
  610. syndrome |= ((ecc_table[i] >> (bit % 32)) & 1) << (i / 2);
  611. return syndrome;
  612. }
  613. /*
  614. * Decode data and ecc syndrome to determine what went wrong
  615. * Note: This can only decode single-bit errors
  616. */
  617. static void sbe_ecc_decode(u32 cap_high, u32 cap_low, u32 cap_ecc,
  618. int *bad_data_bit, int *bad_ecc_bit)
  619. {
  620. int i;
  621. u8 syndrome;
  622. *bad_data_bit = -1;
  623. *bad_ecc_bit = -1;
  624. /*
  625. * Calculate the ECC of the captured data and XOR it with the captured
  626. * ECC to find an ECC syndrome value we can search for
  627. */
  628. syndrome = calculate_ecc(cap_high, cap_low) ^ cap_ecc;
  629. /* Check if a data line is stuck... */
  630. for (i = 0; i < 64; i++) {
  631. if (syndrome == syndrome_from_bit(i)) {
  632. *bad_data_bit = i;
  633. return;
  634. }
  635. }
  636. /* If data is correct, check ECC bits for errors... */
  637. for (i = 0; i < 8; i++) {
  638. if ((syndrome >> i) & 0x1) {
  639. *bad_ecc_bit = i;
  640. return;
  641. }
  642. }
  643. }
  644. static void mpc85xx_mc_check(struct mem_ctl_info *mci)
  645. {
  646. struct mpc85xx_mc_pdata *pdata = mci->pvt_info;
  647. struct csrow_info *csrow;
  648. u32 bus_width;
  649. u32 err_detect;
  650. u32 syndrome;
  651. u32 err_addr;
  652. u32 pfn;
  653. int row_index;
  654. u32 cap_high;
  655. u32 cap_low;
  656. int bad_data_bit;
  657. int bad_ecc_bit;
  658. err_detect = in_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DETECT);
  659. if (!err_detect)
  660. return;
  661. mpc85xx_mc_printk(mci, KERN_ERR, "Err Detect Register: %#8.8x\n",
  662. err_detect);
  663. /* no more processing if not ECC bit errors */
  664. if (!(err_detect & (DDR_EDE_SBE | DDR_EDE_MBE))) {
  665. out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DETECT, err_detect);
  666. return;
  667. }
  668. syndrome = in_be32(pdata->mc_vbase + MPC85XX_MC_CAPTURE_ECC);
  669. /* Mask off appropriate bits of syndrome based on bus width */
  670. bus_width = (in_be32(pdata->mc_vbase + MPC85XX_MC_DDR_SDRAM_CFG) &
  671. DSC_DBW_MASK) ? 32 : 64;
  672. if (bus_width == 64)
  673. syndrome &= 0xff;
  674. else
  675. syndrome &= 0xffff;
  676. err_addr = in_be32(pdata->mc_vbase + MPC85XX_MC_CAPTURE_ADDRESS);
  677. pfn = err_addr >> PAGE_SHIFT;
  678. for (row_index = 0; row_index < mci->nr_csrows; row_index++) {
  679. csrow = &mci->csrows[row_index];
  680. if ((pfn >= csrow->first_page) && (pfn <= csrow->last_page))
  681. break;
  682. }
  683. cap_high = in_be32(pdata->mc_vbase + MPC85XX_MC_CAPTURE_DATA_HI);
  684. cap_low = in_be32(pdata->mc_vbase + MPC85XX_MC_CAPTURE_DATA_LO);
  685. /*
  686. * Analyze single-bit errors on 64-bit wide buses
  687. * TODO: Add support for 32-bit wide buses
  688. */
  689. if ((err_detect & DDR_EDE_SBE) && (bus_width == 64)) {
  690. sbe_ecc_decode(cap_high, cap_low, syndrome,
  691. &bad_data_bit, &bad_ecc_bit);
  692. if (bad_data_bit != -1)
  693. mpc85xx_mc_printk(mci, KERN_ERR,
  694. "Faulty Data bit: %d\n", bad_data_bit);
  695. if (bad_ecc_bit != -1)
  696. mpc85xx_mc_printk(mci, KERN_ERR,
  697. "Faulty ECC bit: %d\n", bad_ecc_bit);
  698. mpc85xx_mc_printk(mci, KERN_ERR,
  699. "Expected Data / ECC:\t%#8.8x_%08x / %#2.2x\n",
  700. cap_high ^ (1 << (bad_data_bit - 32)),
  701. cap_low ^ (1 << bad_data_bit),
  702. syndrome ^ (1 << bad_ecc_bit));
  703. }
  704. mpc85xx_mc_printk(mci, KERN_ERR,
  705. "Captured Data / ECC:\t%#8.8x_%08x / %#2.2x\n",
  706. cap_high, cap_low, syndrome);
  707. mpc85xx_mc_printk(mci, KERN_ERR, "Err addr: %#8.8x\n", err_addr);
  708. mpc85xx_mc_printk(mci, KERN_ERR, "PFN: %#8.8x\n", pfn);
  709. /* we are out of range */
  710. if (row_index == mci->nr_csrows)
  711. mpc85xx_mc_printk(mci, KERN_ERR, "PFN out of range!\n");
  712. if (err_detect & DDR_EDE_SBE)
  713. edac_mc_handle_ce(mci, pfn, err_addr & ~PAGE_MASK,
  714. syndrome, row_index, 0, mci->ctl_name);
  715. if (err_detect & DDR_EDE_MBE)
  716. edac_mc_handle_ue(mci, pfn, err_addr & ~PAGE_MASK,
  717. row_index, mci->ctl_name);
  718. out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DETECT, err_detect);
  719. }
  720. static irqreturn_t mpc85xx_mc_isr(int irq, void *dev_id)
  721. {
  722. struct mem_ctl_info *mci = dev_id;
  723. struct mpc85xx_mc_pdata *pdata = mci->pvt_info;
  724. u32 err_detect;
  725. err_detect = in_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DETECT);
  726. if (!err_detect)
  727. return IRQ_NONE;
  728. mpc85xx_mc_check(mci);
  729. return IRQ_HANDLED;
  730. }
  731. static void __devinit mpc85xx_init_csrows(struct mem_ctl_info *mci)
  732. {
  733. struct mpc85xx_mc_pdata *pdata = mci->pvt_info;
  734. struct csrow_info *csrow;
  735. u32 sdram_ctl;
  736. u32 sdtype;
  737. enum mem_type mtype;
  738. u32 cs_bnds;
  739. int index;
  740. sdram_ctl = in_be32(pdata->mc_vbase + MPC85XX_MC_DDR_SDRAM_CFG);
  741. sdtype = sdram_ctl & DSC_SDTYPE_MASK;
  742. if (sdram_ctl & DSC_RD_EN) {
  743. switch (sdtype) {
  744. case DSC_SDTYPE_DDR:
  745. mtype = MEM_RDDR;
  746. break;
  747. case DSC_SDTYPE_DDR2:
  748. mtype = MEM_RDDR2;
  749. break;
  750. case DSC_SDTYPE_DDR3:
  751. mtype = MEM_RDDR3;
  752. break;
  753. default:
  754. mtype = MEM_UNKNOWN;
  755. break;
  756. }
  757. } else {
  758. switch (sdtype) {
  759. case DSC_SDTYPE_DDR:
  760. mtype = MEM_DDR;
  761. break;
  762. case DSC_SDTYPE_DDR2:
  763. mtype = MEM_DDR2;
  764. break;
  765. case DSC_SDTYPE_DDR3:
  766. mtype = MEM_DDR3;
  767. break;
  768. default:
  769. mtype = MEM_UNKNOWN;
  770. break;
  771. }
  772. }
  773. for (index = 0; index < mci->nr_csrows; index++) {
  774. u32 start;
  775. u32 end;
  776. csrow = &mci->csrows[index];
  777. cs_bnds = in_be32(pdata->mc_vbase + MPC85XX_MC_CS_BNDS_0 +
  778. (index * MPC85XX_MC_CS_BNDS_OFS));
  779. start = (cs_bnds & 0xffff0000) >> 16;
  780. end = (cs_bnds & 0x0000ffff);
  781. if (start == end)
  782. continue; /* not populated */
  783. start <<= (24 - PAGE_SHIFT);
  784. end <<= (24 - PAGE_SHIFT);
  785. end |= (1 << (24 - PAGE_SHIFT)) - 1;
  786. csrow->first_page = start;
  787. csrow->last_page = end;
  788. csrow->nr_pages = end + 1 - start;
  789. csrow->grain = 8;
  790. csrow->mtype = mtype;
  791. csrow->dtype = DEV_UNKNOWN;
  792. if (sdram_ctl & DSC_X32_EN)
  793. csrow->dtype = DEV_X32;
  794. csrow->edac_mode = EDAC_SECDED;
  795. }
  796. }
  797. static int __devinit mpc85xx_mc_err_probe(struct platform_device *op)
  798. {
  799. struct mem_ctl_info *mci;
  800. struct mpc85xx_mc_pdata *pdata;
  801. struct resource r;
  802. u32 sdram_ctl;
  803. int res;
  804. if (!devres_open_group(&op->dev, mpc85xx_mc_err_probe, GFP_KERNEL))
  805. return -ENOMEM;
  806. mci = edac_mc_alloc(sizeof(*pdata), 4, 1, edac_mc_idx);
  807. if (!mci) {
  808. devres_release_group(&op->dev, mpc85xx_mc_err_probe);
  809. return -ENOMEM;
  810. }
  811. pdata = mci->pvt_info;
  812. pdata->name = "mpc85xx_mc_err";
  813. pdata->irq = NO_IRQ;
  814. mci->dev = &op->dev;
  815. pdata->edac_idx = edac_mc_idx++;
  816. dev_set_drvdata(mci->dev, mci);
  817. mci->ctl_name = pdata->name;
  818. mci->dev_name = pdata->name;
  819. res = of_address_to_resource(op->dev.of_node, 0, &r);
  820. if (res) {
  821. printk(KERN_ERR "%s: Unable to get resource for MC err regs\n",
  822. __func__);
  823. goto err;
  824. }
  825. if (!devm_request_mem_region(&op->dev, r.start, resource_size(&r),
  826. pdata->name)) {
  827. printk(KERN_ERR "%s: Error while requesting mem region\n",
  828. __func__);
  829. res = -EBUSY;
  830. goto err;
  831. }
  832. pdata->mc_vbase = devm_ioremap(&op->dev, r.start, resource_size(&r));
  833. if (!pdata->mc_vbase) {
  834. printk(KERN_ERR "%s: Unable to setup MC err regs\n", __func__);
  835. res = -ENOMEM;
  836. goto err;
  837. }
  838. sdram_ctl = in_be32(pdata->mc_vbase + MPC85XX_MC_DDR_SDRAM_CFG);
  839. if (!(sdram_ctl & DSC_ECC_EN)) {
  840. /* no ECC */
  841. printk(KERN_WARNING "%s: No ECC DIMMs discovered\n", __func__);
  842. res = -ENODEV;
  843. goto err;
  844. }
  845. debugf3("%s(): init mci\n", __func__);
  846. mci->mtype_cap = MEM_FLAG_RDDR | MEM_FLAG_RDDR2 |
  847. MEM_FLAG_DDR | MEM_FLAG_DDR2;
  848. mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
  849. mci->edac_cap = EDAC_FLAG_SECDED;
  850. mci->mod_name = EDAC_MOD_STR;
  851. mci->mod_ver = MPC85XX_REVISION;
  852. if (edac_op_state == EDAC_OPSTATE_POLL)
  853. mci->edac_check = mpc85xx_mc_check;
  854. mci->ctl_page_to_phys = NULL;
  855. mci->scrub_mode = SCRUB_SW_SRC;
  856. mpc85xx_set_mc_sysfs_attributes(mci);
  857. mpc85xx_init_csrows(mci);
  858. /* store the original error disable bits */
  859. orig_ddr_err_disable =
  860. in_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DISABLE);
  861. out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DISABLE, 0);
  862. /* clear all error bits */
  863. out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DETECT, ~0);
  864. if (edac_mc_add_mc(mci)) {
  865. debugf3("%s(): failed edac_mc_add_mc()\n", __func__);
  866. goto err;
  867. }
  868. if (edac_op_state == EDAC_OPSTATE_INT) {
  869. out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_INT_EN,
  870. DDR_EIE_MBEE | DDR_EIE_SBEE);
  871. /* store the original error management threshold */
  872. orig_ddr_err_sbe = in_be32(pdata->mc_vbase +
  873. MPC85XX_MC_ERR_SBE) & 0xff0000;
  874. /* set threshold to 1 error per interrupt */
  875. out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_SBE, 0x10000);
  876. /* register interrupts */
  877. pdata->irq = irq_of_parse_and_map(op->dev.of_node, 0);
  878. res = devm_request_irq(&op->dev, pdata->irq,
  879. mpc85xx_mc_isr,
  880. IRQF_DISABLED | IRQF_SHARED,
  881. "[EDAC] MC err", mci);
  882. if (res < 0) {
  883. printk(KERN_ERR "%s: Unable to request irq %d for "
  884. "MPC85xx DRAM ERR\n", __func__, pdata->irq);
  885. irq_dispose_mapping(pdata->irq);
  886. res = -ENODEV;
  887. goto err2;
  888. }
  889. printk(KERN_INFO EDAC_MOD_STR " acquired irq %d for MC\n",
  890. pdata->irq);
  891. }
  892. devres_remove_group(&op->dev, mpc85xx_mc_err_probe);
  893. debugf3("%s(): success\n", __func__);
  894. printk(KERN_INFO EDAC_MOD_STR " MC err registered\n");
  895. return 0;
  896. err2:
  897. edac_mc_del_mc(&op->dev);
  898. err:
  899. devres_release_group(&op->dev, mpc85xx_mc_err_probe);
  900. edac_mc_free(mci);
  901. return res;
  902. }
  903. static int mpc85xx_mc_err_remove(struct platform_device *op)
  904. {
  905. struct mem_ctl_info *mci = dev_get_drvdata(&op->dev);
  906. struct mpc85xx_mc_pdata *pdata = mci->pvt_info;
  907. debugf0("%s()\n", __func__);
  908. if (edac_op_state == EDAC_OPSTATE_INT) {
  909. out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_INT_EN, 0);
  910. irq_dispose_mapping(pdata->irq);
  911. }
  912. out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DISABLE,
  913. orig_ddr_err_disable);
  914. out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_SBE, orig_ddr_err_sbe);
  915. edac_mc_del_mc(&op->dev);
  916. edac_mc_free(mci);
  917. return 0;
  918. }
  919. static struct of_device_id mpc85xx_mc_err_of_match[] = {
  920. /* deprecate the fsl,85.. forms in the future, 2.6.30? */
  921. { .compatible = "fsl,8540-memory-controller", },
  922. { .compatible = "fsl,8541-memory-controller", },
  923. { .compatible = "fsl,8544-memory-controller", },
  924. { .compatible = "fsl,8548-memory-controller", },
  925. { .compatible = "fsl,8555-memory-controller", },
  926. { .compatible = "fsl,8568-memory-controller", },
  927. { .compatible = "fsl,mpc8536-memory-controller", },
  928. { .compatible = "fsl,mpc8540-memory-controller", },
  929. { .compatible = "fsl,mpc8541-memory-controller", },
  930. { .compatible = "fsl,mpc8544-memory-controller", },
  931. { .compatible = "fsl,mpc8548-memory-controller", },
  932. { .compatible = "fsl,mpc8555-memory-controller", },
  933. { .compatible = "fsl,mpc8560-memory-controller", },
  934. { .compatible = "fsl,mpc8568-memory-controller", },
  935. { .compatible = "fsl,mpc8569-memory-controller", },
  936. { .compatible = "fsl,mpc8572-memory-controller", },
  937. { .compatible = "fsl,mpc8349-memory-controller", },
  938. { .compatible = "fsl,p1020-memory-controller", },
  939. { .compatible = "fsl,p1021-memory-controller", },
  940. { .compatible = "fsl,p2020-memory-controller", },
  941. { .compatible = "fsl,qoriq-memory-controller", },
  942. {},
  943. };
  944. MODULE_DEVICE_TABLE(of, mpc85xx_mc_err_of_match);
  945. static struct platform_driver mpc85xx_mc_err_driver = {
  946. .probe = mpc85xx_mc_err_probe,
  947. .remove = mpc85xx_mc_err_remove,
  948. .driver = {
  949. .name = "mpc85xx_mc_err",
  950. .owner = THIS_MODULE,
  951. .of_match_table = mpc85xx_mc_err_of_match,
  952. },
  953. };
  954. #ifdef CONFIG_FSL_SOC_BOOKE
  955. static void __init mpc85xx_mc_clear_rfxe(void *data)
  956. {
  957. orig_hid1[smp_processor_id()] = mfspr(SPRN_HID1);
  958. mtspr(SPRN_HID1, (orig_hid1[smp_processor_id()] & ~HID1_RFXE));
  959. }
  960. #endif
  961. static int __init mpc85xx_mc_init(void)
  962. {
  963. int res = 0;
  964. u32 pvr = 0;
  965. printk(KERN_INFO "Freescale(R) MPC85xx EDAC driver, "
  966. "(C) 2006 Montavista Software\n");
  967. /* make sure error reporting method is sane */
  968. switch (edac_op_state) {
  969. case EDAC_OPSTATE_POLL:
  970. case EDAC_OPSTATE_INT:
  971. break;
  972. default:
  973. edac_op_state = EDAC_OPSTATE_INT;
  974. break;
  975. }
  976. res = platform_driver_register(&mpc85xx_mc_err_driver);
  977. if (res)
  978. printk(KERN_WARNING EDAC_MOD_STR "MC fails to register\n");
  979. res = platform_driver_register(&mpc85xx_l2_err_driver);
  980. if (res)
  981. printk(KERN_WARNING EDAC_MOD_STR "L2 fails to register\n");
  982. #ifdef CONFIG_PCI
  983. res = platform_driver_register(&mpc85xx_pci_err_driver);
  984. if (res)
  985. printk(KERN_WARNING EDAC_MOD_STR "PCI fails to register\n");
  986. #endif
  987. #ifdef CONFIG_FSL_SOC_BOOKE
  988. pvr = mfspr(SPRN_PVR);
  989. if ((PVR_VER(pvr) == PVR_VER_E500V1) ||
  990. (PVR_VER(pvr) == PVR_VER_E500V2)) {
  991. /*
  992. * need to clear HID1[RFXE] to disable machine check int
  993. * so we can catch it
  994. */
  995. if (edac_op_state == EDAC_OPSTATE_INT)
  996. on_each_cpu(mpc85xx_mc_clear_rfxe, NULL, 0);
  997. }
  998. #endif
  999. return 0;
  1000. }
  1001. module_init(mpc85xx_mc_init);
  1002. #ifdef CONFIG_FSL_SOC_BOOKE
  1003. static void __exit mpc85xx_mc_restore_hid1(void *data)
  1004. {
  1005. mtspr(SPRN_HID1, orig_hid1[smp_processor_id()]);
  1006. }
  1007. #endif
  1008. static void __exit mpc85xx_mc_exit(void)
  1009. {
  1010. #ifdef CONFIG_FSL_SOC_BOOKE
  1011. u32 pvr = mfspr(SPRN_PVR);
  1012. if ((PVR_VER(pvr) == PVR_VER_E500V1) ||
  1013. (PVR_VER(pvr) == PVR_VER_E500V2)) {
  1014. on_each_cpu(mpc85xx_mc_restore_hid1, NULL, 0);
  1015. }
  1016. #endif
  1017. #ifdef CONFIG_PCI
  1018. platform_driver_unregister(&mpc85xx_pci_err_driver);
  1019. #endif
  1020. platform_driver_unregister(&mpc85xx_l2_err_driver);
  1021. platform_driver_unregister(&mpc85xx_mc_err_driver);
  1022. }
  1023. module_exit(mpc85xx_mc_exit);
  1024. MODULE_LICENSE("GPL");
  1025. MODULE_AUTHOR("Montavista Software, Inc.");
  1026. module_param(edac_op_state, int, 0444);
  1027. MODULE_PARM_DESC(edac_op_state,
  1028. "EDAC Error Reporting state: 0=Poll, 2=Interrupt");