e752x_edac.c 40 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450
  1. /*
  2. * Intel e752x Memory Controller kernel module
  3. * (C) 2004 Linux Networx (http://lnxi.com)
  4. * This file may be distributed under the terms of the
  5. * GNU General Public License.
  6. *
  7. * See "enum e752x_chips" below for supported chipsets
  8. *
  9. * Written by Tom Zimmerman
  10. *
  11. * Contributors:
  12. * Thayne Harbaugh at realmsys.com (?)
  13. * Wang Zhenyu at intel.com
  14. * Dave Jiang at mvista.com
  15. *
  16. * $Id: edac_e752x.c,v 1.5.2.11 2005/10/05 00:43:44 dsp_llnl Exp $
  17. *
  18. */
  19. #include <linux/module.h>
  20. #include <linux/init.h>
  21. #include <linux/pci.h>
  22. #include <linux/pci_ids.h>
  23. #include <linux/edac.h>
  24. #include "edac_core.h"
  25. #define E752X_REVISION " Ver: 2.0.2"
  26. #define EDAC_MOD_STR "e752x_edac"
  27. static int report_non_memory_errors;
  28. static int force_function_unhide;
  29. static int sysbus_parity = -1;
  30. static struct edac_pci_ctl_info *e752x_pci;
  31. #define e752x_printk(level, fmt, arg...) \
  32. edac_printk(level, "e752x", fmt, ##arg)
  33. #define e752x_mc_printk(mci, level, fmt, arg...) \
  34. edac_mc_chipset_printk(mci, level, "e752x", fmt, ##arg)
  35. #ifndef PCI_DEVICE_ID_INTEL_7520_0
  36. #define PCI_DEVICE_ID_INTEL_7520_0 0x3590
  37. #endif /* PCI_DEVICE_ID_INTEL_7520_0 */
  38. #ifndef PCI_DEVICE_ID_INTEL_7520_1_ERR
  39. #define PCI_DEVICE_ID_INTEL_7520_1_ERR 0x3591
  40. #endif /* PCI_DEVICE_ID_INTEL_7520_1_ERR */
  41. #ifndef PCI_DEVICE_ID_INTEL_7525_0
  42. #define PCI_DEVICE_ID_INTEL_7525_0 0x359E
  43. #endif /* PCI_DEVICE_ID_INTEL_7525_0 */
  44. #ifndef PCI_DEVICE_ID_INTEL_7525_1_ERR
  45. #define PCI_DEVICE_ID_INTEL_7525_1_ERR 0x3593
  46. #endif /* PCI_DEVICE_ID_INTEL_7525_1_ERR */
  47. #ifndef PCI_DEVICE_ID_INTEL_7320_0
  48. #define PCI_DEVICE_ID_INTEL_7320_0 0x3592
  49. #endif /* PCI_DEVICE_ID_INTEL_7320_0 */
  50. #ifndef PCI_DEVICE_ID_INTEL_7320_1_ERR
  51. #define PCI_DEVICE_ID_INTEL_7320_1_ERR 0x3593
  52. #endif /* PCI_DEVICE_ID_INTEL_7320_1_ERR */
  53. #ifndef PCI_DEVICE_ID_INTEL_3100_0
  54. #define PCI_DEVICE_ID_INTEL_3100_0 0x35B0
  55. #endif /* PCI_DEVICE_ID_INTEL_3100_0 */
  56. #ifndef PCI_DEVICE_ID_INTEL_3100_1_ERR
  57. #define PCI_DEVICE_ID_INTEL_3100_1_ERR 0x35B1
  58. #endif /* PCI_DEVICE_ID_INTEL_3100_1_ERR */
  59. #define E752X_NR_CSROWS 8 /* number of csrows */
  60. /* E752X register addresses - device 0 function 0 */
  61. #define E752X_MCHSCRB 0x52 /* Memory Scrub register (16b) */
  62. /*
  63. * 6:5 Scrub Completion Count
  64. * 3:2 Scrub Rate (i3100 only)
  65. * 01=fast 10=normal
  66. * 1:0 Scrub Mode enable
  67. * 00=off 10=on
  68. */
  69. #define E752X_DRB 0x60 /* DRAM row boundary register (8b) */
  70. #define E752X_DRA 0x70 /* DRAM row attribute register (8b) */
  71. /*
  72. * 31:30 Device width row 7
  73. * 01=x8 10=x4 11=x8 DDR2
  74. * 27:26 Device width row 6
  75. * 23:22 Device width row 5
  76. * 19:20 Device width row 4
  77. * 15:14 Device width row 3
  78. * 11:10 Device width row 2
  79. * 7:6 Device width row 1
  80. * 3:2 Device width row 0
  81. */
  82. #define E752X_DRC 0x7C /* DRAM controller mode reg (32b) */
  83. /* FIXME:IS THIS RIGHT? */
  84. /*
  85. * 22 Number channels 0=1,1=2
  86. * 19:18 DRB Granularity 32/64MB
  87. */
  88. #define E752X_DRM 0x80 /* Dimm mapping register */
  89. #define E752X_DDRCSR 0x9A /* DDR control and status reg (16b) */
  90. /*
  91. * 14:12 1 single A, 2 single B, 3 dual
  92. */
  93. #define E752X_TOLM 0xC4 /* DRAM top of low memory reg (16b) */
  94. #define E752X_REMAPBASE 0xC6 /* DRAM remap base address reg (16b) */
  95. #define E752X_REMAPLIMIT 0xC8 /* DRAM remap limit address reg (16b) */
  96. #define E752X_REMAPOFFSET 0xCA /* DRAM remap limit offset reg (16b) */
  97. /* E752X register addresses - device 0 function 1 */
  98. #define E752X_FERR_GLOBAL 0x40 /* Global first error register (32b) */
  99. #define E752X_NERR_GLOBAL 0x44 /* Global next error register (32b) */
  100. #define E752X_HI_FERR 0x50 /* Hub interface first error reg (8b) */
  101. #define E752X_HI_NERR 0x52 /* Hub interface next error reg (8b) */
  102. #define E752X_HI_ERRMASK 0x54 /* Hub interface error mask reg (8b) */
  103. #define E752X_HI_SMICMD 0x5A /* Hub interface SMI command reg (8b) */
  104. #define E752X_SYSBUS_FERR 0x60 /* System buss first error reg (16b) */
  105. #define E752X_SYSBUS_NERR 0x62 /* System buss next error reg (16b) */
  106. #define E752X_SYSBUS_ERRMASK 0x64 /* System buss error mask reg (16b) */
  107. #define E752X_SYSBUS_SMICMD 0x6A /* System buss SMI command reg (16b) */
  108. #define E752X_BUF_FERR 0x70 /* Memory buffer first error reg (8b) */
  109. #define E752X_BUF_NERR 0x72 /* Memory buffer next error reg (8b) */
  110. #define E752X_BUF_ERRMASK 0x74 /* Memory buffer error mask reg (8b) */
  111. #define E752X_BUF_SMICMD 0x7A /* Memory buffer SMI cmd reg (8b) */
  112. #define E752X_DRAM_FERR 0x80 /* DRAM first error register (16b) */
  113. #define E752X_DRAM_NERR 0x82 /* DRAM next error register (16b) */
  114. #define E752X_DRAM_ERRMASK 0x84 /* DRAM error mask register (8b) */
  115. #define E752X_DRAM_SMICMD 0x8A /* DRAM SMI command register (8b) */
  116. #define E752X_DRAM_RETR_ADD 0xAC /* DRAM Retry address register (32b) */
  117. #define E752X_DRAM_SEC1_ADD 0xA0 /* DRAM first correctable memory */
  118. /* error address register (32b) */
  119. /*
  120. * 31 Reserved
  121. * 30:2 CE address (64 byte block 34:6
  122. * 1 Reserved
  123. * 0 HiLoCS
  124. */
  125. #define E752X_DRAM_SEC2_ADD 0xC8 /* DRAM first correctable memory */
  126. /* error address register (32b) */
  127. /*
  128. * 31 Reserved
  129. * 30:2 CE address (64 byte block 34:6)
  130. * 1 Reserved
  131. * 0 HiLoCS
  132. */
  133. #define E752X_DRAM_DED_ADD 0xA4 /* DRAM first uncorrectable memory */
  134. /* error address register (32b) */
  135. /*
  136. * 31 Reserved
  137. * 30:2 CE address (64 byte block 34:6)
  138. * 1 Reserved
  139. * 0 HiLoCS
  140. */
  141. #define E752X_DRAM_SCRB_ADD 0xA8 /* DRAM 1st uncorrectable scrub mem */
  142. /* error address register (32b) */
  143. /*
  144. * 31 Reserved
  145. * 30:2 CE address (64 byte block 34:6
  146. * 1 Reserved
  147. * 0 HiLoCS
  148. */
  149. #define E752X_DRAM_SEC1_SYNDROME 0xC4 /* DRAM first correctable memory */
  150. /* error syndrome register (16b) */
  151. #define E752X_DRAM_SEC2_SYNDROME 0xC6 /* DRAM second correctable memory */
  152. /* error syndrome register (16b) */
  153. #define E752X_DEVPRES1 0xF4 /* Device Present 1 register (8b) */
  154. /* 3100 IMCH specific register addresses - device 0 function 1 */
  155. #define I3100_NSI_FERR 0x48 /* NSI first error reg (32b) */
  156. #define I3100_NSI_NERR 0x4C /* NSI next error reg (32b) */
  157. #define I3100_NSI_SMICMD 0x54 /* NSI SMI command register (32b) */
  158. #define I3100_NSI_EMASK 0x90 /* NSI error mask register (32b) */
  159. /* ICH5R register addresses - device 30 function 0 */
  160. #define ICH5R_PCI_STAT 0x06 /* PCI status register (16b) */
  161. #define ICH5R_PCI_2ND_STAT 0x1E /* PCI status secondary reg (16b) */
  162. #define ICH5R_PCI_BRIDGE_CTL 0x3E /* PCI bridge control register (16b) */
  163. enum e752x_chips {
  164. E7520 = 0,
  165. E7525 = 1,
  166. E7320 = 2,
  167. I3100 = 3
  168. };
  169. struct e752x_pvt {
  170. struct pci_dev *bridge_ck;
  171. struct pci_dev *dev_d0f0;
  172. struct pci_dev *dev_d0f1;
  173. u32 tolm;
  174. u32 remapbase;
  175. u32 remaplimit;
  176. int mc_symmetric;
  177. u8 map[8];
  178. int map_type;
  179. const struct e752x_dev_info *dev_info;
  180. };
  181. struct e752x_dev_info {
  182. u16 err_dev;
  183. u16 ctl_dev;
  184. const char *ctl_name;
  185. };
  186. struct e752x_error_info {
  187. u32 ferr_global;
  188. u32 nerr_global;
  189. u32 nsi_ferr; /* 3100 only */
  190. u32 nsi_nerr; /* 3100 only */
  191. u8 hi_ferr; /* all but 3100 */
  192. u8 hi_nerr; /* all but 3100 */
  193. u16 sysbus_ferr;
  194. u16 sysbus_nerr;
  195. u8 buf_ferr;
  196. u8 buf_nerr;
  197. u16 dram_ferr;
  198. u16 dram_nerr;
  199. u32 dram_sec1_add;
  200. u32 dram_sec2_add;
  201. u16 dram_sec1_syndrome;
  202. u16 dram_sec2_syndrome;
  203. u32 dram_ded_add;
  204. u32 dram_scrb_add;
  205. u32 dram_retr_add;
  206. };
  207. static const struct e752x_dev_info e752x_devs[] = {
  208. [E7520] = {
  209. .err_dev = PCI_DEVICE_ID_INTEL_7520_1_ERR,
  210. .ctl_dev = PCI_DEVICE_ID_INTEL_7520_0,
  211. .ctl_name = "E7520"},
  212. [E7525] = {
  213. .err_dev = PCI_DEVICE_ID_INTEL_7525_1_ERR,
  214. .ctl_dev = PCI_DEVICE_ID_INTEL_7525_0,
  215. .ctl_name = "E7525"},
  216. [E7320] = {
  217. .err_dev = PCI_DEVICE_ID_INTEL_7320_1_ERR,
  218. .ctl_dev = PCI_DEVICE_ID_INTEL_7320_0,
  219. .ctl_name = "E7320"},
  220. [I3100] = {
  221. .err_dev = PCI_DEVICE_ID_INTEL_3100_1_ERR,
  222. .ctl_dev = PCI_DEVICE_ID_INTEL_3100_0,
  223. .ctl_name = "3100"},
  224. };
  225. /* Valid scrub rates for the e752x/3100 hardware memory scrubber. We
  226. * map the scrubbing bandwidth to a hardware register value. The 'set'
  227. * operation finds the 'matching or higher value'. Note that scrubbing
  228. * on the e752x can only be enabled/disabled. The 3100 supports
  229. * a normal and fast mode.
  230. */
  231. #define SDRATE_EOT 0xFFFFFFFF
  232. struct scrubrate {
  233. u32 bandwidth; /* bandwidth consumed by scrubbing in bytes/sec */
  234. u16 scrubval; /* register value for scrub rate */
  235. };
  236. /* Rate below assumes same performance as i3100 using PC3200 DDR2 in
  237. * normal mode. e752x bridges don't support choosing normal or fast mode,
  238. * so the scrubbing bandwidth value isn't all that important - scrubbing is
  239. * either on or off.
  240. */
  241. static const struct scrubrate scrubrates_e752x[] = {
  242. {0, 0x00}, /* Scrubbing Off */
  243. {500000, 0x02}, /* Scrubbing On */
  244. {SDRATE_EOT, 0x00} /* End of Table */
  245. };
  246. /* Fast mode: 2 GByte PC3200 DDR2 scrubbed in 33s = 63161283 bytes/s
  247. * Normal mode: 125 (32000 / 256) times slower than fast mode.
  248. */
  249. static const struct scrubrate scrubrates_i3100[] = {
  250. {0, 0x00}, /* Scrubbing Off */
  251. {500000, 0x0a}, /* Normal mode - 32k clocks */
  252. {62500000, 0x06}, /* Fast mode - 256 clocks */
  253. {SDRATE_EOT, 0x00} /* End of Table */
  254. };
  255. static unsigned long ctl_page_to_phys(struct mem_ctl_info *mci,
  256. unsigned long page)
  257. {
  258. u32 remap;
  259. struct e752x_pvt *pvt = (struct e752x_pvt *)mci->pvt_info;
  260. debugf3("%s()\n", __func__);
  261. if (page < pvt->tolm)
  262. return page;
  263. if ((page >= 0x100000) && (page < pvt->remapbase))
  264. return page;
  265. remap = (page - pvt->tolm) + pvt->remapbase;
  266. if (remap < pvt->remaplimit)
  267. return remap;
  268. e752x_printk(KERN_ERR, "Invalid page %lx - out of range\n", page);
  269. return pvt->tolm - 1;
  270. }
  271. static void do_process_ce(struct mem_ctl_info *mci, u16 error_one,
  272. u32 sec1_add, u16 sec1_syndrome)
  273. {
  274. u32 page;
  275. int row;
  276. int channel;
  277. int i;
  278. struct e752x_pvt *pvt = (struct e752x_pvt *)mci->pvt_info;
  279. debugf3("%s()\n", __func__);
  280. /* convert the addr to 4k page */
  281. page = sec1_add >> (PAGE_SHIFT - 4);
  282. /* FIXME - check for -1 */
  283. if (pvt->mc_symmetric) {
  284. /* chip select are bits 14 & 13 */
  285. row = ((page >> 1) & 3);
  286. e752x_printk(KERN_WARNING,
  287. "Test row %d Table %d %d %d %d %d %d %d %d\n", row,
  288. pvt->map[0], pvt->map[1], pvt->map[2], pvt->map[3],
  289. pvt->map[4], pvt->map[5], pvt->map[6],
  290. pvt->map[7]);
  291. /* test for channel remapping */
  292. for (i = 0; i < 8; i++) {
  293. if (pvt->map[i] == row)
  294. break;
  295. }
  296. e752x_printk(KERN_WARNING, "Test computed row %d\n", i);
  297. if (i < 8)
  298. row = i;
  299. else
  300. e752x_mc_printk(mci, KERN_WARNING,
  301. "row %d not found in remap table\n",
  302. row);
  303. } else
  304. row = edac_mc_find_csrow_by_page(mci, page);
  305. /* 0 = channel A, 1 = channel B */
  306. channel = !(error_one & 1);
  307. /* e752x mc reads 34:6 of the DRAM linear address */
  308. edac_mc_handle_ce(mci, page, offset_in_page(sec1_add << 4),
  309. sec1_syndrome, row, channel, "e752x CE");
  310. }
  311. static inline void process_ce(struct mem_ctl_info *mci, u16 error_one,
  312. u32 sec1_add, u16 sec1_syndrome, int *error_found,
  313. int handle_error)
  314. {
  315. *error_found = 1;
  316. if (handle_error)
  317. do_process_ce(mci, error_one, sec1_add, sec1_syndrome);
  318. }
  319. static void do_process_ue(struct mem_ctl_info *mci, u16 error_one,
  320. u32 ded_add, u32 scrb_add)
  321. {
  322. u32 error_2b, block_page;
  323. int row;
  324. struct e752x_pvt *pvt = (struct e752x_pvt *)mci->pvt_info;
  325. debugf3("%s()\n", __func__);
  326. if (error_one & 0x0202) {
  327. error_2b = ded_add;
  328. /* convert to 4k address */
  329. block_page = error_2b >> (PAGE_SHIFT - 4);
  330. row = pvt->mc_symmetric ?
  331. /* chip select are bits 14 & 13 */
  332. ((block_page >> 1) & 3) :
  333. edac_mc_find_csrow_by_page(mci, block_page);
  334. /* e752x mc reads 34:6 of the DRAM linear address */
  335. edac_mc_handle_ue(mci, block_page,
  336. offset_in_page(error_2b << 4),
  337. row, "e752x UE from Read");
  338. }
  339. if (error_one & 0x0404) {
  340. error_2b = scrb_add;
  341. /* convert to 4k address */
  342. block_page = error_2b >> (PAGE_SHIFT - 4);
  343. row = pvt->mc_symmetric ?
  344. /* chip select are bits 14 & 13 */
  345. ((block_page >> 1) & 3) :
  346. edac_mc_find_csrow_by_page(mci, block_page);
  347. /* e752x mc reads 34:6 of the DRAM linear address */
  348. edac_mc_handle_ue(mci, block_page,
  349. offset_in_page(error_2b << 4),
  350. row, "e752x UE from Scruber");
  351. }
  352. }
  353. static inline void process_ue(struct mem_ctl_info *mci, u16 error_one,
  354. u32 ded_add, u32 scrb_add, int *error_found,
  355. int handle_error)
  356. {
  357. *error_found = 1;
  358. if (handle_error)
  359. do_process_ue(mci, error_one, ded_add, scrb_add);
  360. }
  361. static inline void process_ue_no_info_wr(struct mem_ctl_info *mci,
  362. int *error_found, int handle_error)
  363. {
  364. *error_found = 1;
  365. if (!handle_error)
  366. return;
  367. debugf3("%s()\n", __func__);
  368. edac_mc_handle_ue_no_info(mci, "e752x UE log memory write");
  369. }
  370. static void do_process_ded_retry(struct mem_ctl_info *mci, u16 error,
  371. u32 retry_add)
  372. {
  373. u32 error_1b, page;
  374. int row;
  375. struct e752x_pvt *pvt = (struct e752x_pvt *)mci->pvt_info;
  376. error_1b = retry_add;
  377. page = error_1b >> (PAGE_SHIFT - 4); /* convert the addr to 4k page */
  378. /* chip select are bits 14 & 13 */
  379. row = pvt->mc_symmetric ? ((page >> 1) & 3) :
  380. edac_mc_find_csrow_by_page(mci, page);
  381. e752x_mc_printk(mci, KERN_WARNING,
  382. "CE page 0x%lx, row %d : Memory read retry\n",
  383. (long unsigned int)page, row);
  384. }
  385. static inline void process_ded_retry(struct mem_ctl_info *mci, u16 error,
  386. u32 retry_add, int *error_found,
  387. int handle_error)
  388. {
  389. *error_found = 1;
  390. if (handle_error)
  391. do_process_ded_retry(mci, error, retry_add);
  392. }
  393. static inline void process_threshold_ce(struct mem_ctl_info *mci, u16 error,
  394. int *error_found, int handle_error)
  395. {
  396. *error_found = 1;
  397. if (handle_error)
  398. e752x_mc_printk(mci, KERN_WARNING, "Memory threshold CE\n");
  399. }
  400. static char *global_message[11] = {
  401. "PCI Express C1",
  402. "PCI Express C",
  403. "PCI Express B1",
  404. "PCI Express B",
  405. "PCI Express A1",
  406. "PCI Express A",
  407. "DMA Controller",
  408. "HUB or NS Interface",
  409. "System Bus",
  410. "DRAM Controller", /* 9th entry */
  411. "Internal Buffer"
  412. };
  413. #define DRAM_ENTRY 9
  414. static char *fatal_message[2] = { "Non-Fatal ", "Fatal " };
  415. static void do_global_error(int fatal, u32 errors)
  416. {
  417. int i;
  418. for (i = 0; i < 11; i++) {
  419. if (errors & (1 << i)) {
  420. /* If the error is from DRAM Controller OR
  421. * we are to report ALL errors, then
  422. * report the error
  423. */
  424. if ((i == DRAM_ENTRY) || report_non_memory_errors)
  425. e752x_printk(KERN_WARNING, "%sError %s\n",
  426. fatal_message[fatal],
  427. global_message[i]);
  428. }
  429. }
  430. }
  431. static inline void global_error(int fatal, u32 errors, int *error_found,
  432. int handle_error)
  433. {
  434. *error_found = 1;
  435. if (handle_error)
  436. do_global_error(fatal, errors);
  437. }
  438. static char *hub_message[7] = {
  439. "HI Address or Command Parity", "HI Illegal Access",
  440. "HI Internal Parity", "Out of Range Access",
  441. "HI Data Parity", "Enhanced Config Access",
  442. "Hub Interface Target Abort"
  443. };
  444. static void do_hub_error(int fatal, u8 errors)
  445. {
  446. int i;
  447. for (i = 0; i < 7; i++) {
  448. if (errors & (1 << i))
  449. e752x_printk(KERN_WARNING, "%sError %s\n",
  450. fatal_message[fatal], hub_message[i]);
  451. }
  452. }
  453. static inline void hub_error(int fatal, u8 errors, int *error_found,
  454. int handle_error)
  455. {
  456. *error_found = 1;
  457. if (handle_error)
  458. do_hub_error(fatal, errors);
  459. }
  460. #define NSI_FATAL_MASK 0x0c080081
  461. #define NSI_NON_FATAL_MASK 0x23a0ba64
  462. #define NSI_ERR_MASK (NSI_FATAL_MASK | NSI_NON_FATAL_MASK)
  463. static char *nsi_message[30] = {
  464. "NSI Link Down", /* NSI_FERR/NSI_NERR bit 0, fatal error */
  465. "", /* reserved */
  466. "NSI Parity Error", /* bit 2, non-fatal */
  467. "", /* reserved */
  468. "", /* reserved */
  469. "Correctable Error Message", /* bit 5, non-fatal */
  470. "Non-Fatal Error Message", /* bit 6, non-fatal */
  471. "Fatal Error Message", /* bit 7, fatal */
  472. "", /* reserved */
  473. "Receiver Error", /* bit 9, non-fatal */
  474. "", /* reserved */
  475. "Bad TLP", /* bit 11, non-fatal */
  476. "Bad DLLP", /* bit 12, non-fatal */
  477. "REPLAY_NUM Rollover", /* bit 13, non-fatal */
  478. "", /* reserved */
  479. "Replay Timer Timeout", /* bit 15, non-fatal */
  480. "", /* reserved */
  481. "", /* reserved */
  482. "", /* reserved */
  483. "Data Link Protocol Error", /* bit 19, fatal */
  484. "", /* reserved */
  485. "Poisoned TLP", /* bit 21, non-fatal */
  486. "", /* reserved */
  487. "Completion Timeout", /* bit 23, non-fatal */
  488. "Completer Abort", /* bit 24, non-fatal */
  489. "Unexpected Completion", /* bit 25, non-fatal */
  490. "Receiver Overflow", /* bit 26, fatal */
  491. "Malformed TLP", /* bit 27, fatal */
  492. "", /* reserved */
  493. "Unsupported Request" /* bit 29, non-fatal */
  494. };
  495. static void do_nsi_error(int fatal, u32 errors)
  496. {
  497. int i;
  498. for (i = 0; i < 30; i++) {
  499. if (errors & (1 << i))
  500. printk(KERN_WARNING "%sError %s\n",
  501. fatal_message[fatal], nsi_message[i]);
  502. }
  503. }
  504. static inline void nsi_error(int fatal, u32 errors, int *error_found,
  505. int handle_error)
  506. {
  507. *error_found = 1;
  508. if (handle_error)
  509. do_nsi_error(fatal, errors);
  510. }
  511. static char *membuf_message[4] = {
  512. "Internal PMWB to DRAM parity",
  513. "Internal PMWB to System Bus Parity",
  514. "Internal System Bus or IO to PMWB Parity",
  515. "Internal DRAM to PMWB Parity"
  516. };
  517. static void do_membuf_error(u8 errors)
  518. {
  519. int i;
  520. for (i = 0; i < 4; i++) {
  521. if (errors & (1 << i))
  522. e752x_printk(KERN_WARNING, "Non-Fatal Error %s\n",
  523. membuf_message[i]);
  524. }
  525. }
  526. static inline void membuf_error(u8 errors, int *error_found, int handle_error)
  527. {
  528. *error_found = 1;
  529. if (handle_error)
  530. do_membuf_error(errors);
  531. }
  532. static char *sysbus_message[10] = {
  533. "Addr or Request Parity",
  534. "Data Strobe Glitch",
  535. "Addr Strobe Glitch",
  536. "Data Parity",
  537. "Addr Above TOM",
  538. "Non DRAM Lock Error",
  539. "MCERR", "BINIT",
  540. "Memory Parity",
  541. "IO Subsystem Parity"
  542. };
  543. static void do_sysbus_error(int fatal, u32 errors)
  544. {
  545. int i;
  546. for (i = 0; i < 10; i++) {
  547. if (errors & (1 << i))
  548. e752x_printk(KERN_WARNING, "%sError System Bus %s\n",
  549. fatal_message[fatal], sysbus_message[i]);
  550. }
  551. }
  552. static inline void sysbus_error(int fatal, u32 errors, int *error_found,
  553. int handle_error)
  554. {
  555. *error_found = 1;
  556. if (handle_error)
  557. do_sysbus_error(fatal, errors);
  558. }
  559. static void e752x_check_hub_interface(struct e752x_error_info *info,
  560. int *error_found, int handle_error)
  561. {
  562. u8 stat8;
  563. //pci_read_config_byte(dev,E752X_HI_FERR,&stat8);
  564. stat8 = info->hi_ferr;
  565. if (stat8 & 0x7f) { /* Error, so process */
  566. stat8 &= 0x7f;
  567. if (stat8 & 0x2b)
  568. hub_error(1, stat8 & 0x2b, error_found, handle_error);
  569. if (stat8 & 0x54)
  570. hub_error(0, stat8 & 0x54, error_found, handle_error);
  571. }
  572. //pci_read_config_byte(dev,E752X_HI_NERR,&stat8);
  573. stat8 = info->hi_nerr;
  574. if (stat8 & 0x7f) { /* Error, so process */
  575. stat8 &= 0x7f;
  576. if (stat8 & 0x2b)
  577. hub_error(1, stat8 & 0x2b, error_found, handle_error);
  578. if (stat8 & 0x54)
  579. hub_error(0, stat8 & 0x54, error_found, handle_error);
  580. }
  581. }
  582. static void e752x_check_ns_interface(struct e752x_error_info *info,
  583. int *error_found, int handle_error)
  584. {
  585. u32 stat32;
  586. stat32 = info->nsi_ferr;
  587. if (stat32 & NSI_ERR_MASK) { /* Error, so process */
  588. if (stat32 & NSI_FATAL_MASK) /* check for fatal errors */
  589. nsi_error(1, stat32 & NSI_FATAL_MASK, error_found,
  590. handle_error);
  591. if (stat32 & NSI_NON_FATAL_MASK) /* check for non-fatal ones */
  592. nsi_error(0, stat32 & NSI_NON_FATAL_MASK, error_found,
  593. handle_error);
  594. }
  595. stat32 = info->nsi_nerr;
  596. if (stat32 & NSI_ERR_MASK) {
  597. if (stat32 & NSI_FATAL_MASK)
  598. nsi_error(1, stat32 & NSI_FATAL_MASK, error_found,
  599. handle_error);
  600. if (stat32 & NSI_NON_FATAL_MASK)
  601. nsi_error(0, stat32 & NSI_NON_FATAL_MASK, error_found,
  602. handle_error);
  603. }
  604. }
  605. static void e752x_check_sysbus(struct e752x_error_info *info,
  606. int *error_found, int handle_error)
  607. {
  608. u32 stat32, error32;
  609. //pci_read_config_dword(dev,E752X_SYSBUS_FERR,&stat32);
  610. stat32 = info->sysbus_ferr + (info->sysbus_nerr << 16);
  611. if (stat32 == 0)
  612. return; /* no errors */
  613. error32 = (stat32 >> 16) & 0x3ff;
  614. stat32 = stat32 & 0x3ff;
  615. if (stat32 & 0x087)
  616. sysbus_error(1, stat32 & 0x087, error_found, handle_error);
  617. if (stat32 & 0x378)
  618. sysbus_error(0, stat32 & 0x378, error_found, handle_error);
  619. if (error32 & 0x087)
  620. sysbus_error(1, error32 & 0x087, error_found, handle_error);
  621. if (error32 & 0x378)
  622. sysbus_error(0, error32 & 0x378, error_found, handle_error);
  623. }
  624. static void e752x_check_membuf(struct e752x_error_info *info,
  625. int *error_found, int handle_error)
  626. {
  627. u8 stat8;
  628. stat8 = info->buf_ferr;
  629. if (stat8 & 0x0f) { /* Error, so process */
  630. stat8 &= 0x0f;
  631. membuf_error(stat8, error_found, handle_error);
  632. }
  633. stat8 = info->buf_nerr;
  634. if (stat8 & 0x0f) { /* Error, so process */
  635. stat8 &= 0x0f;
  636. membuf_error(stat8, error_found, handle_error);
  637. }
  638. }
  639. static void e752x_check_dram(struct mem_ctl_info *mci,
  640. struct e752x_error_info *info, int *error_found,
  641. int handle_error)
  642. {
  643. u16 error_one, error_next;
  644. error_one = info->dram_ferr;
  645. error_next = info->dram_nerr;
  646. /* decode and report errors */
  647. if (error_one & 0x0101) /* check first error correctable */
  648. process_ce(mci, error_one, info->dram_sec1_add,
  649. info->dram_sec1_syndrome, error_found, handle_error);
  650. if (error_next & 0x0101) /* check next error correctable */
  651. process_ce(mci, error_next, info->dram_sec2_add,
  652. info->dram_sec2_syndrome, error_found, handle_error);
  653. if (error_one & 0x4040)
  654. process_ue_no_info_wr(mci, error_found, handle_error);
  655. if (error_next & 0x4040)
  656. process_ue_no_info_wr(mci, error_found, handle_error);
  657. if (error_one & 0x2020)
  658. process_ded_retry(mci, error_one, info->dram_retr_add,
  659. error_found, handle_error);
  660. if (error_next & 0x2020)
  661. process_ded_retry(mci, error_next, info->dram_retr_add,
  662. error_found, handle_error);
  663. if (error_one & 0x0808)
  664. process_threshold_ce(mci, error_one, error_found, handle_error);
  665. if (error_next & 0x0808)
  666. process_threshold_ce(mci, error_next, error_found,
  667. handle_error);
  668. if (error_one & 0x0606)
  669. process_ue(mci, error_one, info->dram_ded_add,
  670. info->dram_scrb_add, error_found, handle_error);
  671. if (error_next & 0x0606)
  672. process_ue(mci, error_next, info->dram_ded_add,
  673. info->dram_scrb_add, error_found, handle_error);
  674. }
  675. static void e752x_get_error_info(struct mem_ctl_info *mci,
  676. struct e752x_error_info *info)
  677. {
  678. struct pci_dev *dev;
  679. struct e752x_pvt *pvt;
  680. memset(info, 0, sizeof(*info));
  681. pvt = (struct e752x_pvt *)mci->pvt_info;
  682. dev = pvt->dev_d0f1;
  683. pci_read_config_dword(dev, E752X_FERR_GLOBAL, &info->ferr_global);
  684. if (info->ferr_global) {
  685. if (pvt->dev_info->err_dev == PCI_DEVICE_ID_INTEL_3100_1_ERR) {
  686. pci_read_config_dword(dev, I3100_NSI_FERR,
  687. &info->nsi_ferr);
  688. info->hi_ferr = 0;
  689. } else {
  690. pci_read_config_byte(dev, E752X_HI_FERR,
  691. &info->hi_ferr);
  692. info->nsi_ferr = 0;
  693. }
  694. pci_read_config_word(dev, E752X_SYSBUS_FERR,
  695. &info->sysbus_ferr);
  696. pci_read_config_byte(dev, E752X_BUF_FERR, &info->buf_ferr);
  697. pci_read_config_word(dev, E752X_DRAM_FERR, &info->dram_ferr);
  698. pci_read_config_dword(dev, E752X_DRAM_SEC1_ADD,
  699. &info->dram_sec1_add);
  700. pci_read_config_word(dev, E752X_DRAM_SEC1_SYNDROME,
  701. &info->dram_sec1_syndrome);
  702. pci_read_config_dword(dev, E752X_DRAM_DED_ADD,
  703. &info->dram_ded_add);
  704. pci_read_config_dword(dev, E752X_DRAM_SCRB_ADD,
  705. &info->dram_scrb_add);
  706. pci_read_config_dword(dev, E752X_DRAM_RETR_ADD,
  707. &info->dram_retr_add);
  708. /* ignore the reserved bits just in case */
  709. if (info->hi_ferr & 0x7f)
  710. pci_write_config_byte(dev, E752X_HI_FERR,
  711. info->hi_ferr);
  712. if (info->nsi_ferr & NSI_ERR_MASK)
  713. pci_write_config_dword(dev, I3100_NSI_FERR,
  714. info->nsi_ferr);
  715. if (info->sysbus_ferr)
  716. pci_write_config_word(dev, E752X_SYSBUS_FERR,
  717. info->sysbus_ferr);
  718. if (info->buf_ferr & 0x0f)
  719. pci_write_config_byte(dev, E752X_BUF_FERR,
  720. info->buf_ferr);
  721. if (info->dram_ferr)
  722. pci_write_bits16(pvt->bridge_ck, E752X_DRAM_FERR,
  723. info->dram_ferr, info->dram_ferr);
  724. pci_write_config_dword(dev, E752X_FERR_GLOBAL,
  725. info->ferr_global);
  726. }
  727. pci_read_config_dword(dev, E752X_NERR_GLOBAL, &info->nerr_global);
  728. if (info->nerr_global) {
  729. if (pvt->dev_info->err_dev == PCI_DEVICE_ID_INTEL_3100_1_ERR) {
  730. pci_read_config_dword(dev, I3100_NSI_NERR,
  731. &info->nsi_nerr);
  732. info->hi_nerr = 0;
  733. } else {
  734. pci_read_config_byte(dev, E752X_HI_NERR,
  735. &info->hi_nerr);
  736. info->nsi_nerr = 0;
  737. }
  738. pci_read_config_word(dev, E752X_SYSBUS_NERR,
  739. &info->sysbus_nerr);
  740. pci_read_config_byte(dev, E752X_BUF_NERR, &info->buf_nerr);
  741. pci_read_config_word(dev, E752X_DRAM_NERR, &info->dram_nerr);
  742. pci_read_config_dword(dev, E752X_DRAM_SEC2_ADD,
  743. &info->dram_sec2_add);
  744. pci_read_config_word(dev, E752X_DRAM_SEC2_SYNDROME,
  745. &info->dram_sec2_syndrome);
  746. if (info->hi_nerr & 0x7f)
  747. pci_write_config_byte(dev, E752X_HI_NERR,
  748. info->hi_nerr);
  749. if (info->nsi_nerr & NSI_ERR_MASK)
  750. pci_write_config_dword(dev, I3100_NSI_NERR,
  751. info->nsi_nerr);
  752. if (info->sysbus_nerr)
  753. pci_write_config_word(dev, E752X_SYSBUS_NERR,
  754. info->sysbus_nerr);
  755. if (info->buf_nerr & 0x0f)
  756. pci_write_config_byte(dev, E752X_BUF_NERR,
  757. info->buf_nerr);
  758. if (info->dram_nerr)
  759. pci_write_bits16(pvt->bridge_ck, E752X_DRAM_NERR,
  760. info->dram_nerr, info->dram_nerr);
  761. pci_write_config_dword(dev, E752X_NERR_GLOBAL,
  762. info->nerr_global);
  763. }
  764. }
  765. static int e752x_process_error_info(struct mem_ctl_info *mci,
  766. struct e752x_error_info *info,
  767. int handle_errors)
  768. {
  769. u32 error32, stat32;
  770. int error_found;
  771. error_found = 0;
  772. error32 = (info->ferr_global >> 18) & 0x3ff;
  773. stat32 = (info->ferr_global >> 4) & 0x7ff;
  774. if (error32)
  775. global_error(1, error32, &error_found, handle_errors);
  776. if (stat32)
  777. global_error(0, stat32, &error_found, handle_errors);
  778. error32 = (info->nerr_global >> 18) & 0x3ff;
  779. stat32 = (info->nerr_global >> 4) & 0x7ff;
  780. if (error32)
  781. global_error(1, error32, &error_found, handle_errors);
  782. if (stat32)
  783. global_error(0, stat32, &error_found, handle_errors);
  784. e752x_check_hub_interface(info, &error_found, handle_errors);
  785. e752x_check_ns_interface(info, &error_found, handle_errors);
  786. e752x_check_sysbus(info, &error_found, handle_errors);
  787. e752x_check_membuf(info, &error_found, handle_errors);
  788. e752x_check_dram(mci, info, &error_found, handle_errors);
  789. return error_found;
  790. }
  791. static void e752x_check(struct mem_ctl_info *mci)
  792. {
  793. struct e752x_error_info info;
  794. debugf3("%s()\n", __func__);
  795. e752x_get_error_info(mci, &info);
  796. e752x_process_error_info(mci, &info, 1);
  797. }
  798. /* Program byte/sec bandwidth scrub rate to hardware */
  799. static int set_sdram_scrub_rate(struct mem_ctl_info *mci, u32 new_bw)
  800. {
  801. const struct scrubrate *scrubrates;
  802. struct e752x_pvt *pvt = (struct e752x_pvt *) mci->pvt_info;
  803. struct pci_dev *pdev = pvt->dev_d0f0;
  804. int i;
  805. if (pvt->dev_info->ctl_dev == PCI_DEVICE_ID_INTEL_3100_0)
  806. scrubrates = scrubrates_i3100;
  807. else
  808. scrubrates = scrubrates_e752x;
  809. /* Translate the desired scrub rate to a e752x/3100 register value.
  810. * Search for the bandwidth that is equal or greater than the
  811. * desired rate and program the cooresponding register value.
  812. */
  813. for (i = 0; scrubrates[i].bandwidth != SDRATE_EOT; i++)
  814. if (scrubrates[i].bandwidth >= new_bw)
  815. break;
  816. if (scrubrates[i].bandwidth == SDRATE_EOT)
  817. return -1;
  818. pci_write_config_word(pdev, E752X_MCHSCRB, scrubrates[i].scrubval);
  819. return scrubrates[i].bandwidth;
  820. }
  821. /* Convert current scrub rate value into byte/sec bandwidth */
  822. static int get_sdram_scrub_rate(struct mem_ctl_info *mci)
  823. {
  824. const struct scrubrate *scrubrates;
  825. struct e752x_pvt *pvt = (struct e752x_pvt *) mci->pvt_info;
  826. struct pci_dev *pdev = pvt->dev_d0f0;
  827. u16 scrubval;
  828. int i;
  829. if (pvt->dev_info->ctl_dev == PCI_DEVICE_ID_INTEL_3100_0)
  830. scrubrates = scrubrates_i3100;
  831. else
  832. scrubrates = scrubrates_e752x;
  833. /* Find the bandwidth matching the memory scrubber configuration */
  834. pci_read_config_word(pdev, E752X_MCHSCRB, &scrubval);
  835. scrubval = scrubval & 0x0f;
  836. for (i = 0; scrubrates[i].bandwidth != SDRATE_EOT; i++)
  837. if (scrubrates[i].scrubval == scrubval)
  838. break;
  839. if (scrubrates[i].bandwidth == SDRATE_EOT) {
  840. e752x_printk(KERN_WARNING,
  841. "Invalid sdram scrub control value: 0x%x\n", scrubval);
  842. return -1;
  843. }
  844. return scrubrates[i].bandwidth;
  845. }
  846. /* Return 1 if dual channel mode is active. Else return 0. */
  847. static inline int dual_channel_active(u16 ddrcsr)
  848. {
  849. return (((ddrcsr >> 12) & 3) == 3);
  850. }
  851. /* Remap csrow index numbers if map_type is "reverse"
  852. */
  853. static inline int remap_csrow_index(struct mem_ctl_info *mci, int index)
  854. {
  855. struct e752x_pvt *pvt = mci->pvt_info;
  856. if (!pvt->map_type)
  857. return (7 - index);
  858. return (index);
  859. }
  860. static void e752x_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev,
  861. u16 ddrcsr)
  862. {
  863. struct csrow_info *csrow;
  864. unsigned long last_cumul_size;
  865. int index, mem_dev, drc_chan;
  866. int drc_drbg; /* DRB granularity 0=64mb, 1=128mb */
  867. int drc_ddim; /* DRAM Data Integrity Mode 0=none, 2=edac */
  868. u8 value;
  869. u32 dra, drc, cumul_size;
  870. dra = 0;
  871. for (index = 0; index < 4; index++) {
  872. u8 dra_reg;
  873. pci_read_config_byte(pdev, E752X_DRA + index, &dra_reg);
  874. dra |= dra_reg << (index * 8);
  875. }
  876. pci_read_config_dword(pdev, E752X_DRC, &drc);
  877. drc_chan = dual_channel_active(ddrcsr);
  878. drc_drbg = drc_chan + 1; /* 128 in dual mode, 64 in single */
  879. drc_ddim = (drc >> 20) & 0x3;
  880. /* The dram row boundary (DRB) reg values are boundary address for
  881. * each DRAM row with a granularity of 64 or 128MB (single/dual
  882. * channel operation). DRB regs are cumulative; therefore DRB7 will
  883. * contain the total memory contained in all eight rows.
  884. */
  885. for (last_cumul_size = index = 0; index < mci->nr_csrows; index++) {
  886. /* mem_dev 0=x8, 1=x4 */
  887. mem_dev = (dra >> (index * 4 + 2)) & 0x3;
  888. csrow = &mci->csrows[remap_csrow_index(mci, index)];
  889. mem_dev = (mem_dev == 2);
  890. pci_read_config_byte(pdev, E752X_DRB + index, &value);
  891. /* convert a 128 or 64 MiB DRB to a page size. */
  892. cumul_size = value << (25 + drc_drbg - PAGE_SHIFT);
  893. debugf3("%s(): (%d) cumul_size 0x%x\n", __func__, index,
  894. cumul_size);
  895. if (cumul_size == last_cumul_size)
  896. continue; /* not populated */
  897. csrow->first_page = last_cumul_size;
  898. csrow->last_page = cumul_size - 1;
  899. csrow->nr_pages = cumul_size - last_cumul_size;
  900. last_cumul_size = cumul_size;
  901. csrow->grain = 1 << 12; /* 4KiB - resolution of CELOG */
  902. csrow->mtype = MEM_RDDR; /* only one type supported */
  903. csrow->dtype = mem_dev ? DEV_X4 : DEV_X8;
  904. /*
  905. * if single channel or x8 devices then SECDED
  906. * if dual channel and x4 then S4ECD4ED
  907. */
  908. if (drc_ddim) {
  909. if (drc_chan && mem_dev) {
  910. csrow->edac_mode = EDAC_S4ECD4ED;
  911. mci->edac_cap |= EDAC_FLAG_S4ECD4ED;
  912. } else {
  913. csrow->edac_mode = EDAC_SECDED;
  914. mci->edac_cap |= EDAC_FLAG_SECDED;
  915. }
  916. } else
  917. csrow->edac_mode = EDAC_NONE;
  918. }
  919. }
  920. static void e752x_init_mem_map_table(struct pci_dev *pdev,
  921. struct e752x_pvt *pvt)
  922. {
  923. int index;
  924. u8 value, last, row;
  925. last = 0;
  926. row = 0;
  927. for (index = 0; index < 8; index += 2) {
  928. pci_read_config_byte(pdev, E752X_DRB + index, &value);
  929. /* test if there is a dimm in this slot */
  930. if (value == last) {
  931. /* no dimm in the slot, so flag it as empty */
  932. pvt->map[index] = 0xff;
  933. pvt->map[index + 1] = 0xff;
  934. } else { /* there is a dimm in the slot */
  935. pvt->map[index] = row;
  936. row++;
  937. last = value;
  938. /* test the next value to see if the dimm is double
  939. * sided
  940. */
  941. pci_read_config_byte(pdev, E752X_DRB + index + 1,
  942. &value);
  943. /* the dimm is single sided, so flag as empty */
  944. /* this is a double sided dimm to save the next row #*/
  945. pvt->map[index + 1] = (value == last) ? 0xff : row;
  946. row++;
  947. last = value;
  948. }
  949. }
  950. }
  951. /* Return 0 on success or 1 on failure. */
  952. static int e752x_get_devs(struct pci_dev *pdev, int dev_idx,
  953. struct e752x_pvt *pvt)
  954. {
  955. struct pci_dev *dev;
  956. pvt->bridge_ck = pci_get_device(PCI_VENDOR_ID_INTEL,
  957. pvt->dev_info->err_dev, pvt->bridge_ck);
  958. if (pvt->bridge_ck == NULL)
  959. pvt->bridge_ck = pci_scan_single_device(pdev->bus,
  960. PCI_DEVFN(0, 1));
  961. if (pvt->bridge_ck == NULL) {
  962. e752x_printk(KERN_ERR, "error reporting device not found:"
  963. "vendor %x device 0x%x (broken BIOS?)\n",
  964. PCI_VENDOR_ID_INTEL, e752x_devs[dev_idx].err_dev);
  965. return 1;
  966. }
  967. dev = pci_get_device(PCI_VENDOR_ID_INTEL,
  968. e752x_devs[dev_idx].ctl_dev,
  969. NULL);
  970. if (dev == NULL)
  971. goto fail;
  972. pvt->dev_d0f0 = dev;
  973. pvt->dev_d0f1 = pci_dev_get(pvt->bridge_ck);
  974. return 0;
  975. fail:
  976. pci_dev_put(pvt->bridge_ck);
  977. return 1;
  978. }
  979. /* Setup system bus parity mask register.
  980. * Sysbus parity supported on:
  981. * e7320/e7520/e7525 + Xeon
  982. */
  983. static void e752x_init_sysbus_parity_mask(struct e752x_pvt *pvt)
  984. {
  985. char *cpu_id = cpu_data(0).x86_model_id;
  986. struct pci_dev *dev = pvt->dev_d0f1;
  987. int enable = 1;
  988. /* Allow module parameter override, else see if CPU supports parity */
  989. if (sysbus_parity != -1) {
  990. enable = sysbus_parity;
  991. } else if (cpu_id[0] && !strstr(cpu_id, "Xeon")) {
  992. e752x_printk(KERN_INFO, "System Bus Parity not "
  993. "supported by CPU, disabling\n");
  994. enable = 0;
  995. }
  996. if (enable)
  997. pci_write_config_word(dev, E752X_SYSBUS_ERRMASK, 0x0000);
  998. else
  999. pci_write_config_word(dev, E752X_SYSBUS_ERRMASK, 0x0309);
  1000. }
  1001. static void e752x_init_error_reporting_regs(struct e752x_pvt *pvt)
  1002. {
  1003. struct pci_dev *dev;
  1004. dev = pvt->dev_d0f1;
  1005. /* Turn off error disable & SMI in case the BIOS turned it on */
  1006. if (pvt->dev_info->err_dev == PCI_DEVICE_ID_INTEL_3100_1_ERR) {
  1007. pci_write_config_dword(dev, I3100_NSI_EMASK, 0);
  1008. pci_write_config_dword(dev, I3100_NSI_SMICMD, 0);
  1009. } else {
  1010. pci_write_config_byte(dev, E752X_HI_ERRMASK, 0x00);
  1011. pci_write_config_byte(dev, E752X_HI_SMICMD, 0x00);
  1012. }
  1013. e752x_init_sysbus_parity_mask(pvt);
  1014. pci_write_config_word(dev, E752X_SYSBUS_SMICMD, 0x00);
  1015. pci_write_config_byte(dev, E752X_BUF_ERRMASK, 0x00);
  1016. pci_write_config_byte(dev, E752X_BUF_SMICMD, 0x00);
  1017. pci_write_config_byte(dev, E752X_DRAM_ERRMASK, 0x00);
  1018. pci_write_config_byte(dev, E752X_DRAM_SMICMD, 0x00);
  1019. }
  1020. static int e752x_probe1(struct pci_dev *pdev, int dev_idx)
  1021. {
  1022. u16 pci_data;
  1023. u8 stat8;
  1024. struct mem_ctl_info *mci;
  1025. struct e752x_pvt *pvt;
  1026. u16 ddrcsr;
  1027. int drc_chan; /* Number of channels 0=1chan,1=2chan */
  1028. struct e752x_error_info discard;
  1029. debugf0("%s(): mci\n", __func__);
  1030. debugf0("Starting Probe1\n");
  1031. /* check to see if device 0 function 1 is enabled; if it isn't, we
  1032. * assume the BIOS has reserved it for a reason and is expecting
  1033. * exclusive access, we take care not to violate that assumption and
  1034. * fail the probe. */
  1035. pci_read_config_byte(pdev, E752X_DEVPRES1, &stat8);
  1036. if (!force_function_unhide && !(stat8 & (1 << 5))) {
  1037. printk(KERN_INFO "Contact your BIOS vendor to see if the "
  1038. "E752x error registers can be safely un-hidden\n");
  1039. return -ENODEV;
  1040. }
  1041. stat8 |= (1 << 5);
  1042. pci_write_config_byte(pdev, E752X_DEVPRES1, stat8);
  1043. pci_read_config_word(pdev, E752X_DDRCSR, &ddrcsr);
  1044. /* FIXME: should check >>12 or 0xf, true for all? */
  1045. /* Dual channel = 1, Single channel = 0 */
  1046. drc_chan = dual_channel_active(ddrcsr);
  1047. mci = edac_mc_alloc(sizeof(*pvt), E752X_NR_CSROWS, drc_chan + 1, 0);
  1048. if (mci == NULL) {
  1049. return -ENOMEM;
  1050. }
  1051. debugf3("%s(): init mci\n", __func__);
  1052. mci->mtype_cap = MEM_FLAG_RDDR;
  1053. /* 3100 IMCH supports SECDEC only */
  1054. mci->edac_ctl_cap = (dev_idx == I3100) ? EDAC_FLAG_SECDED :
  1055. (EDAC_FLAG_NONE | EDAC_FLAG_SECDED | EDAC_FLAG_S4ECD4ED);
  1056. /* FIXME - what if different memory types are in different csrows? */
  1057. mci->mod_name = EDAC_MOD_STR;
  1058. mci->mod_ver = E752X_REVISION;
  1059. mci->dev = &pdev->dev;
  1060. debugf3("%s(): init pvt\n", __func__);
  1061. pvt = (struct e752x_pvt *)mci->pvt_info;
  1062. pvt->dev_info = &e752x_devs[dev_idx];
  1063. pvt->mc_symmetric = ((ddrcsr & 0x10) != 0);
  1064. if (e752x_get_devs(pdev, dev_idx, pvt)) {
  1065. edac_mc_free(mci);
  1066. return -ENODEV;
  1067. }
  1068. debugf3("%s(): more mci init\n", __func__);
  1069. mci->ctl_name = pvt->dev_info->ctl_name;
  1070. mci->dev_name = pci_name(pdev);
  1071. mci->edac_check = e752x_check;
  1072. mci->ctl_page_to_phys = ctl_page_to_phys;
  1073. mci->set_sdram_scrub_rate = set_sdram_scrub_rate;
  1074. mci->get_sdram_scrub_rate = get_sdram_scrub_rate;
  1075. /* set the map type. 1 = normal, 0 = reversed
  1076. * Must be set before e752x_init_csrows in case csrow mapping
  1077. * is reversed.
  1078. */
  1079. pci_read_config_byte(pdev, E752X_DRM, &stat8);
  1080. pvt->map_type = ((stat8 & 0x0f) > ((stat8 >> 4) & 0x0f));
  1081. e752x_init_csrows(mci, pdev, ddrcsr);
  1082. e752x_init_mem_map_table(pdev, pvt);
  1083. if (dev_idx == I3100)
  1084. mci->edac_cap = EDAC_FLAG_SECDED; /* the only mode supported */
  1085. else
  1086. mci->edac_cap |= EDAC_FLAG_NONE;
  1087. debugf3("%s(): tolm, remapbase, remaplimit\n", __func__);
  1088. /* load the top of low memory, remap base, and remap limit vars */
  1089. pci_read_config_word(pdev, E752X_TOLM, &pci_data);
  1090. pvt->tolm = ((u32) pci_data) << 4;
  1091. pci_read_config_word(pdev, E752X_REMAPBASE, &pci_data);
  1092. pvt->remapbase = ((u32) pci_data) << 14;
  1093. pci_read_config_word(pdev, E752X_REMAPLIMIT, &pci_data);
  1094. pvt->remaplimit = ((u32) pci_data) << 14;
  1095. e752x_printk(KERN_INFO,
  1096. "tolm = %x, remapbase = %x, remaplimit = %x\n",
  1097. pvt->tolm, pvt->remapbase, pvt->remaplimit);
  1098. /* Here we assume that we will never see multiple instances of this
  1099. * type of memory controller. The ID is therefore hardcoded to 0.
  1100. */
  1101. if (edac_mc_add_mc(mci)) {
  1102. debugf3("%s(): failed edac_mc_add_mc()\n", __func__);
  1103. goto fail;
  1104. }
  1105. e752x_init_error_reporting_regs(pvt);
  1106. e752x_get_error_info(mci, &discard); /* clear other MCH errors */
  1107. /* allocating generic PCI control info */
  1108. e752x_pci = edac_pci_create_generic_ctl(&pdev->dev, EDAC_MOD_STR);
  1109. if (!e752x_pci) {
  1110. printk(KERN_WARNING
  1111. "%s(): Unable to create PCI control\n", __func__);
  1112. printk(KERN_WARNING
  1113. "%s(): PCI error report via EDAC not setup\n",
  1114. __func__);
  1115. }
  1116. /* get this far and it's successful */
  1117. debugf3("%s(): success\n", __func__);
  1118. return 0;
  1119. fail:
  1120. pci_dev_put(pvt->dev_d0f0);
  1121. pci_dev_put(pvt->dev_d0f1);
  1122. pci_dev_put(pvt->bridge_ck);
  1123. edac_mc_free(mci);
  1124. return -ENODEV;
  1125. }
  1126. /* returns count (>= 0), or negative on error */
  1127. static int __devinit e752x_init_one(struct pci_dev *pdev,
  1128. const struct pci_device_id *ent)
  1129. {
  1130. debugf0("%s()\n", __func__);
  1131. /* wake up and enable device */
  1132. if (pci_enable_device(pdev) < 0)
  1133. return -EIO;
  1134. return e752x_probe1(pdev, ent->driver_data);
  1135. }
  1136. static void __devexit e752x_remove_one(struct pci_dev *pdev)
  1137. {
  1138. struct mem_ctl_info *mci;
  1139. struct e752x_pvt *pvt;
  1140. debugf0("%s()\n", __func__);
  1141. if (e752x_pci)
  1142. edac_pci_release_generic_ctl(e752x_pci);
  1143. if ((mci = edac_mc_del_mc(&pdev->dev)) == NULL)
  1144. return;
  1145. pvt = (struct e752x_pvt *)mci->pvt_info;
  1146. pci_dev_put(pvt->dev_d0f0);
  1147. pci_dev_put(pvt->dev_d0f1);
  1148. pci_dev_put(pvt->bridge_ck);
  1149. edac_mc_free(mci);
  1150. }
  1151. static const struct pci_device_id e752x_pci_tbl[] __devinitdata = {
  1152. {
  1153. PCI_VEND_DEV(INTEL, 7520_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
  1154. E7520},
  1155. {
  1156. PCI_VEND_DEV(INTEL, 7525_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
  1157. E7525},
  1158. {
  1159. PCI_VEND_DEV(INTEL, 7320_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
  1160. E7320},
  1161. {
  1162. PCI_VEND_DEV(INTEL, 3100_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
  1163. I3100},
  1164. {
  1165. 0,
  1166. } /* 0 terminated list. */
  1167. };
  1168. MODULE_DEVICE_TABLE(pci, e752x_pci_tbl);
  1169. static struct pci_driver e752x_driver = {
  1170. .name = EDAC_MOD_STR,
  1171. .probe = e752x_init_one,
  1172. .remove = __devexit_p(e752x_remove_one),
  1173. .id_table = e752x_pci_tbl,
  1174. };
  1175. static int __init e752x_init(void)
  1176. {
  1177. int pci_rc;
  1178. debugf3("%s()\n", __func__);
  1179. /* Ensure that the OPSTATE is set correctly for POLL or NMI */
  1180. opstate_init();
  1181. pci_rc = pci_register_driver(&e752x_driver);
  1182. return (pci_rc < 0) ? pci_rc : 0;
  1183. }
  1184. static void __exit e752x_exit(void)
  1185. {
  1186. debugf3("%s()\n", __func__);
  1187. pci_unregister_driver(&e752x_driver);
  1188. }
  1189. module_init(e752x_init);
  1190. module_exit(e752x_exit);
  1191. MODULE_LICENSE("GPL");
  1192. MODULE_AUTHOR("Linux Networx (http://lnxi.com) Tom Zimmerman\n");
  1193. MODULE_DESCRIPTION("MC support for Intel e752x/3100 memory controllers");
  1194. module_param(force_function_unhide, int, 0444);
  1195. MODULE_PARM_DESC(force_function_unhide, "if BIOS sets Dev0:Fun1 up as hidden:"
  1196. " 1=force unhide and hope BIOS doesn't fight driver for "
  1197. "Dev0:Fun1 access");
  1198. module_param(edac_op_state, int, 0444);
  1199. MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
  1200. module_param(sysbus_parity, int, 0444);
  1201. MODULE_PARM_DESC(sysbus_parity, "0=disable system bus parity checking,"
  1202. " 1=enable system bus parity checking, default=auto-detect");
  1203. module_param(report_non_memory_errors, int, 0644);
  1204. MODULE_PARM_DESC(report_non_memory_errors, "0=disable non-memory error "
  1205. "reporting, 1=enable non-memory error reporting");