edac_mc.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920
  1. /*
  2. * edac_mc kernel module
  3. * (C) 2005, 2006 Linux Networx (http://lnxi.com)
  4. * This file may be distributed under the terms of the
  5. * GNU General Public License.
  6. *
  7. * Written by Thayne Harbaugh
  8. * Based on work by Dan Hollis <goemon at anime dot net> and others.
  9. * http://www.anime.net/~goemon/linux-ecc/
  10. *
  11. * Modified by Dave Peterson and Doug Thompson
  12. *
  13. */
  14. #include <linux/module.h>
  15. #include <linux/proc_fs.h>
  16. #include <linux/kernel.h>
  17. #include <linux/types.h>
  18. #include <linux/smp.h>
  19. #include <linux/init.h>
  20. #include <linux/sysctl.h>
  21. #include <linux/highmem.h>
  22. #include <linux/timer.h>
  23. #include <linux/slab.h>
  24. #include <linux/jiffies.h>
  25. #include <linux/spinlock.h>
  26. #include <linux/list.h>
  27. #include <linux/sysdev.h>
  28. #include <linux/ctype.h>
  29. #include <linux/edac.h>
  30. #include <asm/uaccess.h>
  31. #include <asm/page.h>
  32. #include <asm/edac.h>
  33. #include "edac_core.h"
  34. #include "edac_module.h"
  35. /* lock to memory controller's control array */
  36. static DEFINE_MUTEX(mem_ctls_mutex);
  37. static LIST_HEAD(mc_devices);
  38. #ifdef CONFIG_EDAC_DEBUG
  39. static void edac_mc_dump_channel(struct channel_info *chan)
  40. {
  41. debugf4("\tchannel = %p\n", chan);
  42. debugf4("\tchannel->chan_idx = %d\n", chan->chan_idx);
  43. debugf4("\tchannel->ce_count = %d\n", chan->ce_count);
  44. debugf4("\tchannel->label = '%s'\n", chan->label);
  45. debugf4("\tchannel->csrow = %p\n\n", chan->csrow);
  46. }
  47. static void edac_mc_dump_csrow(struct csrow_info *csrow)
  48. {
  49. debugf4("\tcsrow = %p\n", csrow);
  50. debugf4("\tcsrow->csrow_idx = %d\n", csrow->csrow_idx);
  51. debugf4("\tcsrow->first_page = 0x%lx\n", csrow->first_page);
  52. debugf4("\tcsrow->last_page = 0x%lx\n", csrow->last_page);
  53. debugf4("\tcsrow->page_mask = 0x%lx\n", csrow->page_mask);
  54. debugf4("\tcsrow->nr_pages = 0x%x\n", csrow->nr_pages);
  55. debugf4("\tcsrow->nr_channels = %d\n", csrow->nr_channels);
  56. debugf4("\tcsrow->channels = %p\n", csrow->channels);
  57. debugf4("\tcsrow->mci = %p\n\n", csrow->mci);
  58. }
  59. static void edac_mc_dump_mci(struct mem_ctl_info *mci)
  60. {
  61. debugf3("\tmci = %p\n", mci);
  62. debugf3("\tmci->mtype_cap = %lx\n", mci->mtype_cap);
  63. debugf3("\tmci->edac_ctl_cap = %lx\n", mci->edac_ctl_cap);
  64. debugf3("\tmci->edac_cap = %lx\n", mci->edac_cap);
  65. debugf4("\tmci->edac_check = %p\n", mci->edac_check);
  66. debugf3("\tmci->nr_csrows = %d, csrows = %p\n",
  67. mci->nr_csrows, mci->csrows);
  68. debugf3("\tdev = %p\n", mci->dev);
  69. debugf3("\tmod_name:ctl_name = %s:%s\n", mci->mod_name, mci->ctl_name);
  70. debugf3("\tpvt_info = %p\n\n", mci->pvt_info);
  71. }
  72. #endif /* CONFIG_EDAC_DEBUG */
  73. /*
  74. * keep those in sync with the enum mem_type
  75. */
  76. const char *edac_mem_types[] = {
  77. "Empty csrow",
  78. "Reserved csrow type",
  79. "Unknown csrow type",
  80. "Fast page mode RAM",
  81. "Extended data out RAM",
  82. "Burst Extended data out RAM",
  83. "Single data rate SDRAM",
  84. "Registered single data rate SDRAM",
  85. "Double data rate SDRAM",
  86. "Registered Double data rate SDRAM",
  87. "Rambus DRAM",
  88. "Unbuffered DDR2 RAM",
  89. "Fully buffered DDR2",
  90. "Registered DDR2 RAM",
  91. "Rambus XDR",
  92. "Unbuffered DDR3 RAM",
  93. "Registered DDR3 RAM",
  94. };
  95. EXPORT_SYMBOL_GPL(edac_mem_types);
  96. /* 'ptr' points to a possibly unaligned item X such that sizeof(X) is 'size'.
  97. * Adjust 'ptr' so that its alignment is at least as stringent as what the
  98. * compiler would provide for X and return the aligned result.
  99. *
  100. * If 'size' is a constant, the compiler will optimize this whole function
  101. * down to either a no-op or the addition of a constant to the value of 'ptr'.
  102. */
  103. void *edac_align_ptr(void *ptr, unsigned size)
  104. {
  105. unsigned align, r;
  106. /* Here we assume that the alignment of a "long long" is the most
  107. * stringent alignment that the compiler will ever provide by default.
  108. * As far as I know, this is a reasonable assumption.
  109. */
  110. if (size > sizeof(long))
  111. align = sizeof(long long);
  112. else if (size > sizeof(int))
  113. align = sizeof(long);
  114. else if (size > sizeof(short))
  115. align = sizeof(int);
  116. else if (size > sizeof(char))
  117. align = sizeof(short);
  118. else
  119. return (char *)ptr;
  120. r = size % align;
  121. if (r == 0)
  122. return (char *)ptr;
  123. return (void *)(((unsigned long)ptr) + align - r);
  124. }
  125. /**
  126. * edac_mc_alloc: Allocate a struct mem_ctl_info structure
  127. * @size_pvt: size of private storage needed
  128. * @nr_csrows: Number of CWROWS needed for this MC
  129. * @nr_chans: Number of channels for the MC
  130. *
  131. * Everything is kmalloc'ed as one big chunk - more efficient.
  132. * Only can be used if all structures have the same lifetime - otherwise
  133. * you have to allocate and initialize your own structures.
  134. *
  135. * Use edac_mc_free() to free mc structures allocated by this function.
  136. *
  137. * Returns:
  138. * NULL allocation failed
  139. * struct mem_ctl_info pointer
  140. */
  141. struct mem_ctl_info *edac_mc_alloc(unsigned sz_pvt, unsigned nr_csrows,
  142. unsigned nr_chans, int edac_index)
  143. {
  144. struct mem_ctl_info *mci;
  145. struct csrow_info *csi, *csrow;
  146. struct channel_info *chi, *chp, *chan;
  147. void *pvt;
  148. unsigned size;
  149. int row, chn;
  150. int err;
  151. /* Figure out the offsets of the various items from the start of an mc
  152. * structure. We want the alignment of each item to be at least as
  153. * stringent as what the compiler would provide if we could simply
  154. * hardcode everything into a single struct.
  155. */
  156. mci = (struct mem_ctl_info *)0;
  157. csi = edac_align_ptr(&mci[1], sizeof(*csi));
  158. chi = edac_align_ptr(&csi[nr_csrows], sizeof(*chi));
  159. pvt = edac_align_ptr(&chi[nr_chans * nr_csrows], sz_pvt);
  160. size = ((unsigned long)pvt) + sz_pvt;
  161. mci = kzalloc(size, GFP_KERNEL);
  162. if (mci == NULL)
  163. return NULL;
  164. /* Adjust pointers so they point within the memory we just allocated
  165. * rather than an imaginary chunk of memory located at address 0.
  166. */
  167. csi = (struct csrow_info *)(((char *)mci) + ((unsigned long)csi));
  168. chi = (struct channel_info *)(((char *)mci) + ((unsigned long)chi));
  169. pvt = sz_pvt ? (((char *)mci) + ((unsigned long)pvt)) : NULL;
  170. /* setup index and various internal pointers */
  171. mci->mc_idx = edac_index;
  172. mci->csrows = csi;
  173. mci->pvt_info = pvt;
  174. mci->nr_csrows = nr_csrows;
  175. for (row = 0; row < nr_csrows; row++) {
  176. csrow = &csi[row];
  177. csrow->csrow_idx = row;
  178. csrow->mci = mci;
  179. csrow->nr_channels = nr_chans;
  180. chp = &chi[row * nr_chans];
  181. csrow->channels = chp;
  182. for (chn = 0; chn < nr_chans; chn++) {
  183. chan = &chp[chn];
  184. chan->chan_idx = chn;
  185. chan->csrow = csrow;
  186. }
  187. }
  188. mci->op_state = OP_ALLOC;
  189. INIT_LIST_HEAD(&mci->grp_kobj_list);
  190. /*
  191. * Initialize the 'root' kobj for the edac_mc controller
  192. */
  193. err = edac_mc_register_sysfs_main_kobj(mci);
  194. if (err) {
  195. kfree(mci);
  196. return NULL;
  197. }
  198. /* at this point, the root kobj is valid, and in order to
  199. * 'free' the object, then the function:
  200. * edac_mc_unregister_sysfs_main_kobj() must be called
  201. * which will perform kobj unregistration and the actual free
  202. * will occur during the kobject callback operation
  203. */
  204. return mci;
  205. }
  206. EXPORT_SYMBOL_GPL(edac_mc_alloc);
  207. /**
  208. * edac_mc_free
  209. * 'Free' a previously allocated 'mci' structure
  210. * @mci: pointer to a struct mem_ctl_info structure
  211. */
  212. void edac_mc_free(struct mem_ctl_info *mci)
  213. {
  214. debugf1("%s()\n", __func__);
  215. edac_mc_unregister_sysfs_main_kobj(mci);
  216. /* free the mci instance memory here */
  217. kfree(mci);
  218. }
  219. EXPORT_SYMBOL_GPL(edac_mc_free);
  220. /**
  221. * find_mci_by_dev
  222. *
  223. * scan list of controllers looking for the one that manages
  224. * the 'dev' device
  225. * @dev: pointer to a struct device related with the MCI
  226. */
  227. struct mem_ctl_info *find_mci_by_dev(struct device *dev)
  228. {
  229. struct mem_ctl_info *mci;
  230. struct list_head *item;
  231. debugf3("%s()\n", __func__);
  232. list_for_each(item, &mc_devices) {
  233. mci = list_entry(item, struct mem_ctl_info, link);
  234. if (mci->dev == dev)
  235. return mci;
  236. }
  237. return NULL;
  238. }
  239. EXPORT_SYMBOL_GPL(find_mci_by_dev);
  240. /*
  241. * handler for EDAC to check if NMI type handler has asserted interrupt
  242. */
  243. static int edac_mc_assert_error_check_and_clear(void)
  244. {
  245. int old_state;
  246. if (edac_op_state == EDAC_OPSTATE_POLL)
  247. return 1;
  248. old_state = edac_err_assert;
  249. edac_err_assert = 0;
  250. return old_state;
  251. }
  252. /*
  253. * edac_mc_workq_function
  254. * performs the operation scheduled by a workq request
  255. */
  256. static void edac_mc_workq_function(struct work_struct *work_req)
  257. {
  258. struct delayed_work *d_work = to_delayed_work(work_req);
  259. struct mem_ctl_info *mci = to_edac_mem_ctl_work(d_work);
  260. mutex_lock(&mem_ctls_mutex);
  261. /* if this control struct has movd to offline state, we are done */
  262. if (mci->op_state == OP_OFFLINE) {
  263. mutex_unlock(&mem_ctls_mutex);
  264. return;
  265. }
  266. /* Only poll controllers that are running polled and have a check */
  267. if (edac_mc_assert_error_check_and_clear() && (mci->edac_check != NULL))
  268. mci->edac_check(mci);
  269. mutex_unlock(&mem_ctls_mutex);
  270. /* Reschedule */
  271. queue_delayed_work(edac_workqueue, &mci->work,
  272. msecs_to_jiffies(edac_mc_get_poll_msec()));
  273. }
  274. /*
  275. * edac_mc_workq_setup
  276. * initialize a workq item for this mci
  277. * passing in the new delay period in msec
  278. *
  279. * locking model:
  280. *
  281. * called with the mem_ctls_mutex held
  282. */
  283. static void edac_mc_workq_setup(struct mem_ctl_info *mci, unsigned msec)
  284. {
  285. debugf0("%s()\n", __func__);
  286. /* if this instance is not in the POLL state, then simply return */
  287. if (mci->op_state != OP_RUNNING_POLL)
  288. return;
  289. INIT_DELAYED_WORK(&mci->work, edac_mc_workq_function);
  290. queue_delayed_work(edac_workqueue, &mci->work, msecs_to_jiffies(msec));
  291. }
  292. /*
  293. * edac_mc_workq_teardown
  294. * stop the workq processing on this mci
  295. *
  296. * locking model:
  297. *
  298. * called WITHOUT lock held
  299. */
  300. static void edac_mc_workq_teardown(struct mem_ctl_info *mci)
  301. {
  302. int status;
  303. if (mci->op_state != OP_RUNNING_POLL)
  304. return;
  305. status = cancel_delayed_work(&mci->work);
  306. if (status == 0) {
  307. debugf0("%s() not canceled, flush the queue\n",
  308. __func__);
  309. /* workq instance might be running, wait for it */
  310. flush_workqueue(edac_workqueue);
  311. }
  312. }
  313. /*
  314. * edac_mc_reset_delay_period(unsigned long value)
  315. *
  316. * user space has updated our poll period value, need to
  317. * reset our workq delays
  318. */
  319. void edac_mc_reset_delay_period(int value)
  320. {
  321. struct mem_ctl_info *mci;
  322. struct list_head *item;
  323. mutex_lock(&mem_ctls_mutex);
  324. /* scan the list and turn off all workq timers, doing so under lock
  325. */
  326. list_for_each(item, &mc_devices) {
  327. mci = list_entry(item, struct mem_ctl_info, link);
  328. if (mci->op_state == OP_RUNNING_POLL)
  329. cancel_delayed_work(&mci->work);
  330. }
  331. mutex_unlock(&mem_ctls_mutex);
  332. /* re-walk the list, and reset the poll delay */
  333. mutex_lock(&mem_ctls_mutex);
  334. list_for_each(item, &mc_devices) {
  335. mci = list_entry(item, struct mem_ctl_info, link);
  336. edac_mc_workq_setup(mci, (unsigned long) value);
  337. }
  338. mutex_unlock(&mem_ctls_mutex);
  339. }
  340. /* Return 0 on success, 1 on failure.
  341. * Before calling this function, caller must
  342. * assign a unique value to mci->mc_idx.
  343. *
  344. * locking model:
  345. *
  346. * called with the mem_ctls_mutex lock held
  347. */
  348. static int add_mc_to_global_list(struct mem_ctl_info *mci)
  349. {
  350. struct list_head *item, *insert_before;
  351. struct mem_ctl_info *p;
  352. insert_before = &mc_devices;
  353. p = find_mci_by_dev(mci->dev);
  354. if (unlikely(p != NULL))
  355. goto fail0;
  356. list_for_each(item, &mc_devices) {
  357. p = list_entry(item, struct mem_ctl_info, link);
  358. if (p->mc_idx >= mci->mc_idx) {
  359. if (unlikely(p->mc_idx == mci->mc_idx))
  360. goto fail1;
  361. insert_before = item;
  362. break;
  363. }
  364. }
  365. list_add_tail_rcu(&mci->link, insert_before);
  366. atomic_inc(&edac_handlers);
  367. return 0;
  368. fail0:
  369. edac_printk(KERN_WARNING, EDAC_MC,
  370. "%s (%s) %s %s already assigned %d\n", dev_name(p->dev),
  371. edac_dev_name(mci), p->mod_name, p->ctl_name, p->mc_idx);
  372. return 1;
  373. fail1:
  374. edac_printk(KERN_WARNING, EDAC_MC,
  375. "bug in low-level driver: attempt to assign\n"
  376. " duplicate mc_idx %d in %s()\n", p->mc_idx, __func__);
  377. return 1;
  378. }
  379. static void del_mc_from_global_list(struct mem_ctl_info *mci)
  380. {
  381. atomic_dec(&edac_handlers);
  382. list_del_rcu(&mci->link);
  383. /* these are for safe removal of devices from global list while
  384. * NMI handlers may be traversing list
  385. */
  386. synchronize_rcu();
  387. INIT_LIST_HEAD(&mci->link);
  388. }
  389. /**
  390. * edac_mc_find: Search for a mem_ctl_info structure whose index is 'idx'.
  391. *
  392. * If found, return a pointer to the structure.
  393. * Else return NULL.
  394. *
  395. * Caller must hold mem_ctls_mutex.
  396. */
  397. struct mem_ctl_info *edac_mc_find(int idx)
  398. {
  399. struct list_head *item;
  400. struct mem_ctl_info *mci;
  401. list_for_each(item, &mc_devices) {
  402. mci = list_entry(item, struct mem_ctl_info, link);
  403. if (mci->mc_idx >= idx) {
  404. if (mci->mc_idx == idx)
  405. return mci;
  406. break;
  407. }
  408. }
  409. return NULL;
  410. }
  411. EXPORT_SYMBOL(edac_mc_find);
  412. /**
  413. * edac_mc_add_mc: Insert the 'mci' structure into the mci global list and
  414. * create sysfs entries associated with mci structure
  415. * @mci: pointer to the mci structure to be added to the list
  416. * @mc_idx: A unique numeric identifier to be assigned to the 'mci' structure.
  417. *
  418. * Return:
  419. * 0 Success
  420. * !0 Failure
  421. */
  422. /* FIXME - should a warning be printed if no error detection? correction? */
  423. int edac_mc_add_mc(struct mem_ctl_info *mci)
  424. {
  425. debugf0("%s()\n", __func__);
  426. #ifdef CONFIG_EDAC_DEBUG
  427. if (edac_debug_level >= 3)
  428. edac_mc_dump_mci(mci);
  429. if (edac_debug_level >= 4) {
  430. int i;
  431. for (i = 0; i < mci->nr_csrows; i++) {
  432. int j;
  433. edac_mc_dump_csrow(&mci->csrows[i]);
  434. for (j = 0; j < mci->csrows[i].nr_channels; j++)
  435. edac_mc_dump_channel(&mci->csrows[i].
  436. channels[j]);
  437. }
  438. }
  439. #endif
  440. mutex_lock(&mem_ctls_mutex);
  441. if (add_mc_to_global_list(mci))
  442. goto fail0;
  443. /* set load time so that error rate can be tracked */
  444. mci->start_time = jiffies;
  445. if (edac_create_sysfs_mci_device(mci)) {
  446. edac_mc_printk(mci, KERN_WARNING,
  447. "failed to create sysfs device\n");
  448. goto fail1;
  449. }
  450. /* If there IS a check routine, then we are running POLLED */
  451. if (mci->edac_check != NULL) {
  452. /* This instance is NOW RUNNING */
  453. mci->op_state = OP_RUNNING_POLL;
  454. edac_mc_workq_setup(mci, edac_mc_get_poll_msec());
  455. } else {
  456. mci->op_state = OP_RUNNING_INTERRUPT;
  457. }
  458. /* Report action taken */
  459. edac_mc_printk(mci, KERN_INFO, "Giving out device to '%s' '%s':"
  460. " DEV %s\n", mci->mod_name, mci->ctl_name, edac_dev_name(mci));
  461. mutex_unlock(&mem_ctls_mutex);
  462. return 0;
  463. fail1:
  464. del_mc_from_global_list(mci);
  465. fail0:
  466. mutex_unlock(&mem_ctls_mutex);
  467. return 1;
  468. }
  469. EXPORT_SYMBOL_GPL(edac_mc_add_mc);
  470. /**
  471. * edac_mc_del_mc: Remove sysfs entries for specified mci structure and
  472. * remove mci structure from global list
  473. * @pdev: Pointer to 'struct device' representing mci structure to remove.
  474. *
  475. * Return pointer to removed mci structure, or NULL if device not found.
  476. */
  477. struct mem_ctl_info *edac_mc_del_mc(struct device *dev)
  478. {
  479. struct mem_ctl_info *mci;
  480. debugf0("%s()\n", __func__);
  481. mutex_lock(&mem_ctls_mutex);
  482. /* find the requested mci struct in the global list */
  483. mci = find_mci_by_dev(dev);
  484. if (mci == NULL) {
  485. mutex_unlock(&mem_ctls_mutex);
  486. return NULL;
  487. }
  488. del_mc_from_global_list(mci);
  489. mutex_unlock(&mem_ctls_mutex);
  490. /* flush workq processes */
  491. edac_mc_workq_teardown(mci);
  492. /* marking MCI offline */
  493. mci->op_state = OP_OFFLINE;
  494. /* remove from sysfs */
  495. edac_remove_sysfs_mci_device(mci);
  496. edac_printk(KERN_INFO, EDAC_MC,
  497. "Removed device %d for %s %s: DEV %s\n", mci->mc_idx,
  498. mci->mod_name, mci->ctl_name, edac_dev_name(mci));
  499. return mci;
  500. }
  501. EXPORT_SYMBOL_GPL(edac_mc_del_mc);
  502. static void edac_mc_scrub_block(unsigned long page, unsigned long offset,
  503. u32 size)
  504. {
  505. struct page *pg;
  506. void *virt_addr;
  507. unsigned long flags = 0;
  508. debugf3("%s()\n", __func__);
  509. /* ECC error page was not in our memory. Ignore it. */
  510. if (!pfn_valid(page))
  511. return;
  512. /* Find the actual page structure then map it and fix */
  513. pg = pfn_to_page(page);
  514. if (PageHighMem(pg))
  515. local_irq_save(flags);
  516. virt_addr = kmap_atomic(pg, KM_BOUNCE_READ);
  517. /* Perform architecture specific atomic scrub operation */
  518. atomic_scrub(virt_addr + offset, size);
  519. /* Unmap and complete */
  520. kunmap_atomic(virt_addr, KM_BOUNCE_READ);
  521. if (PageHighMem(pg))
  522. local_irq_restore(flags);
  523. }
  524. /* FIXME - should return -1 */
  525. int edac_mc_find_csrow_by_page(struct mem_ctl_info *mci, unsigned long page)
  526. {
  527. struct csrow_info *csrows = mci->csrows;
  528. int row, i;
  529. debugf1("MC%d: %s(): 0x%lx\n", mci->mc_idx, __func__, page);
  530. row = -1;
  531. for (i = 0; i < mci->nr_csrows; i++) {
  532. struct csrow_info *csrow = &csrows[i];
  533. if (csrow->nr_pages == 0)
  534. continue;
  535. debugf3("MC%d: %s(): first(0x%lx) page(0x%lx) last(0x%lx) "
  536. "mask(0x%lx)\n", mci->mc_idx, __func__,
  537. csrow->first_page, page, csrow->last_page,
  538. csrow->page_mask);
  539. if ((page >= csrow->first_page) &&
  540. (page <= csrow->last_page) &&
  541. ((page & csrow->page_mask) ==
  542. (csrow->first_page & csrow->page_mask))) {
  543. row = i;
  544. break;
  545. }
  546. }
  547. if (row == -1)
  548. edac_mc_printk(mci, KERN_ERR,
  549. "could not look up page error address %lx\n",
  550. (unsigned long)page);
  551. return row;
  552. }
  553. EXPORT_SYMBOL_GPL(edac_mc_find_csrow_by_page);
  554. /* FIXME - setable log (warning/emerg) levels */
  555. /* FIXME - integrate with evlog: http://evlog.sourceforge.net/ */
  556. void edac_mc_handle_ce(struct mem_ctl_info *mci,
  557. unsigned long page_frame_number,
  558. unsigned long offset_in_page, unsigned long syndrome,
  559. int row, int channel, const char *msg)
  560. {
  561. unsigned long remapped_page;
  562. debugf3("MC%d: %s()\n", mci->mc_idx, __func__);
  563. /* FIXME - maybe make panic on INTERNAL ERROR an option */
  564. if (row >= mci->nr_csrows || row < 0) {
  565. /* something is wrong */
  566. edac_mc_printk(mci, KERN_ERR,
  567. "INTERNAL ERROR: row out of range "
  568. "(%d >= %d)\n", row, mci->nr_csrows);
  569. edac_mc_handle_ce_no_info(mci, "INTERNAL ERROR");
  570. return;
  571. }
  572. if (channel >= mci->csrows[row].nr_channels || channel < 0) {
  573. /* something is wrong */
  574. edac_mc_printk(mci, KERN_ERR,
  575. "INTERNAL ERROR: channel out of range "
  576. "(%d >= %d)\n", channel,
  577. mci->csrows[row].nr_channels);
  578. edac_mc_handle_ce_no_info(mci, "INTERNAL ERROR");
  579. return;
  580. }
  581. if (edac_mc_get_log_ce())
  582. /* FIXME - put in DIMM location */
  583. edac_mc_printk(mci, KERN_WARNING,
  584. "CE page 0x%lx, offset 0x%lx, grain %d, syndrome "
  585. "0x%lx, row %d, channel %d, label \"%s\": %s\n",
  586. page_frame_number, offset_in_page,
  587. mci->csrows[row].grain, syndrome, row, channel,
  588. mci->csrows[row].channels[channel].label, msg);
  589. mci->ce_count++;
  590. mci->csrows[row].ce_count++;
  591. mci->csrows[row].channels[channel].ce_count++;
  592. if (mci->scrub_mode & SCRUB_SW_SRC) {
  593. /*
  594. * Some MC's can remap memory so that it is still available
  595. * at a different address when PCI devices map into memory.
  596. * MC's that can't do this lose the memory where PCI devices
  597. * are mapped. This mapping is MC dependent and so we call
  598. * back into the MC driver for it to map the MC page to
  599. * a physical (CPU) page which can then be mapped to a virtual
  600. * page - which can then be scrubbed.
  601. */
  602. remapped_page = mci->ctl_page_to_phys ?
  603. mci->ctl_page_to_phys(mci, page_frame_number) :
  604. page_frame_number;
  605. edac_mc_scrub_block(remapped_page, offset_in_page,
  606. mci->csrows[row].grain);
  607. }
  608. }
  609. EXPORT_SYMBOL_GPL(edac_mc_handle_ce);
  610. void edac_mc_handle_ce_no_info(struct mem_ctl_info *mci, const char *msg)
  611. {
  612. if (edac_mc_get_log_ce())
  613. edac_mc_printk(mci, KERN_WARNING,
  614. "CE - no information available: %s\n", msg);
  615. mci->ce_noinfo_count++;
  616. mci->ce_count++;
  617. }
  618. EXPORT_SYMBOL_GPL(edac_mc_handle_ce_no_info);
  619. void edac_mc_handle_ue(struct mem_ctl_info *mci,
  620. unsigned long page_frame_number,
  621. unsigned long offset_in_page, int row, const char *msg)
  622. {
  623. int len = EDAC_MC_LABEL_LEN * 4;
  624. char labels[len + 1];
  625. char *pos = labels;
  626. int chan;
  627. int chars;
  628. debugf3("MC%d: %s()\n", mci->mc_idx, __func__);
  629. /* FIXME - maybe make panic on INTERNAL ERROR an option */
  630. if (row >= mci->nr_csrows || row < 0) {
  631. /* something is wrong */
  632. edac_mc_printk(mci, KERN_ERR,
  633. "INTERNAL ERROR: row out of range "
  634. "(%d >= %d)\n", row, mci->nr_csrows);
  635. edac_mc_handle_ue_no_info(mci, "INTERNAL ERROR");
  636. return;
  637. }
  638. chars = snprintf(pos, len + 1, "%s",
  639. mci->csrows[row].channels[0].label);
  640. len -= chars;
  641. pos += chars;
  642. for (chan = 1; (chan < mci->csrows[row].nr_channels) && (len > 0);
  643. chan++) {
  644. chars = snprintf(pos, len + 1, ":%s",
  645. mci->csrows[row].channels[chan].label);
  646. len -= chars;
  647. pos += chars;
  648. }
  649. if (edac_mc_get_log_ue())
  650. edac_mc_printk(mci, KERN_EMERG,
  651. "UE page 0x%lx, offset 0x%lx, grain %d, row %d, "
  652. "labels \"%s\": %s\n", page_frame_number,
  653. offset_in_page, mci->csrows[row].grain, row,
  654. labels, msg);
  655. if (edac_mc_get_panic_on_ue())
  656. panic("EDAC MC%d: UE page 0x%lx, offset 0x%lx, grain %d, "
  657. "row %d, labels \"%s\": %s\n", mci->mc_idx,
  658. page_frame_number, offset_in_page,
  659. mci->csrows[row].grain, row, labels, msg);
  660. mci->ue_count++;
  661. mci->csrows[row].ue_count++;
  662. }
  663. EXPORT_SYMBOL_GPL(edac_mc_handle_ue);
  664. void edac_mc_handle_ue_no_info(struct mem_ctl_info *mci, const char *msg)
  665. {
  666. if (edac_mc_get_panic_on_ue())
  667. panic("EDAC MC%d: Uncorrected Error", mci->mc_idx);
  668. if (edac_mc_get_log_ue())
  669. edac_mc_printk(mci, KERN_WARNING,
  670. "UE - no information available: %s\n", msg);
  671. mci->ue_noinfo_count++;
  672. mci->ue_count++;
  673. }
  674. EXPORT_SYMBOL_GPL(edac_mc_handle_ue_no_info);
  675. /*************************************************************
  676. * On Fully Buffered DIMM modules, this help function is
  677. * called to process UE events
  678. */
  679. void edac_mc_handle_fbd_ue(struct mem_ctl_info *mci,
  680. unsigned int csrow,
  681. unsigned int channela,
  682. unsigned int channelb, char *msg)
  683. {
  684. int len = EDAC_MC_LABEL_LEN * 4;
  685. char labels[len + 1];
  686. char *pos = labels;
  687. int chars;
  688. if (csrow >= mci->nr_csrows) {
  689. /* something is wrong */
  690. edac_mc_printk(mci, KERN_ERR,
  691. "INTERNAL ERROR: row out of range (%d >= %d)\n",
  692. csrow, mci->nr_csrows);
  693. edac_mc_handle_ue_no_info(mci, "INTERNAL ERROR");
  694. return;
  695. }
  696. if (channela >= mci->csrows[csrow].nr_channels) {
  697. /* something is wrong */
  698. edac_mc_printk(mci, KERN_ERR,
  699. "INTERNAL ERROR: channel-a out of range "
  700. "(%d >= %d)\n",
  701. channela, mci->csrows[csrow].nr_channels);
  702. edac_mc_handle_ue_no_info(mci, "INTERNAL ERROR");
  703. return;
  704. }
  705. if (channelb >= mci->csrows[csrow].nr_channels) {
  706. /* something is wrong */
  707. edac_mc_printk(mci, KERN_ERR,
  708. "INTERNAL ERROR: channel-b out of range "
  709. "(%d >= %d)\n",
  710. channelb, mci->csrows[csrow].nr_channels);
  711. edac_mc_handle_ue_no_info(mci, "INTERNAL ERROR");
  712. return;
  713. }
  714. mci->ue_count++;
  715. mci->csrows[csrow].ue_count++;
  716. /* Generate the DIMM labels from the specified channels */
  717. chars = snprintf(pos, len + 1, "%s",
  718. mci->csrows[csrow].channels[channela].label);
  719. len -= chars;
  720. pos += chars;
  721. chars = snprintf(pos, len + 1, "-%s",
  722. mci->csrows[csrow].channels[channelb].label);
  723. if (edac_mc_get_log_ue())
  724. edac_mc_printk(mci, KERN_EMERG,
  725. "UE row %d, channel-a= %d channel-b= %d "
  726. "labels \"%s\": %s\n", csrow, channela, channelb,
  727. labels, msg);
  728. if (edac_mc_get_panic_on_ue())
  729. panic("UE row %d, channel-a= %d channel-b= %d "
  730. "labels \"%s\": %s\n", csrow, channela,
  731. channelb, labels, msg);
  732. }
  733. EXPORT_SYMBOL(edac_mc_handle_fbd_ue);
  734. /*************************************************************
  735. * On Fully Buffered DIMM modules, this help function is
  736. * called to process CE events
  737. */
  738. void edac_mc_handle_fbd_ce(struct mem_ctl_info *mci,
  739. unsigned int csrow, unsigned int channel, char *msg)
  740. {
  741. /* Ensure boundary values */
  742. if (csrow >= mci->nr_csrows) {
  743. /* something is wrong */
  744. edac_mc_printk(mci, KERN_ERR,
  745. "INTERNAL ERROR: row out of range (%d >= %d)\n",
  746. csrow, mci->nr_csrows);
  747. edac_mc_handle_ce_no_info(mci, "INTERNAL ERROR");
  748. return;
  749. }
  750. if (channel >= mci->csrows[csrow].nr_channels) {
  751. /* something is wrong */
  752. edac_mc_printk(mci, KERN_ERR,
  753. "INTERNAL ERROR: channel out of range (%d >= %d)\n",
  754. channel, mci->csrows[csrow].nr_channels);
  755. edac_mc_handle_ce_no_info(mci, "INTERNAL ERROR");
  756. return;
  757. }
  758. if (edac_mc_get_log_ce())
  759. /* FIXME - put in DIMM location */
  760. edac_mc_printk(mci, KERN_WARNING,
  761. "CE row %d, channel %d, label \"%s\": %s\n",
  762. csrow, channel,
  763. mci->csrows[csrow].channels[channel].label, msg);
  764. mci->ce_count++;
  765. mci->csrows[csrow].ce_count++;
  766. mci->csrows[csrow].channels[channel].ce_count++;
  767. }
  768. EXPORT_SYMBOL(edac_mc_handle_fbd_ce);