debugobjects.c 26 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091
  1. /*
  2. * Generic infrastructure for lifetime debugging of objects.
  3. *
  4. * Started by Thomas Gleixner
  5. *
  6. * Copyright (C) 2008, Thomas Gleixner <tglx@linutronix.de>
  7. *
  8. * For licencing details see kernel-base/COPYING
  9. */
  10. #include <linux/debugobjects.h>
  11. #include <linux/interrupt.h>
  12. #include <linux/sched.h>
  13. #include <linux/seq_file.h>
  14. #include <linux/debugfs.h>
  15. #include <linux/slab.h>
  16. #include <linux/hash.h>
  17. #define ODEBUG_HASH_BITS 14
  18. #define ODEBUG_HASH_SIZE (1 << ODEBUG_HASH_BITS)
  19. #define ODEBUG_POOL_SIZE 512
  20. #define ODEBUG_POOL_MIN_LEVEL 256
  21. #define ODEBUG_CHUNK_SHIFT PAGE_SHIFT
  22. #define ODEBUG_CHUNK_SIZE (1 << ODEBUG_CHUNK_SHIFT)
  23. #define ODEBUG_CHUNK_MASK (~(ODEBUG_CHUNK_SIZE - 1))
  24. struct debug_bucket {
  25. struct hlist_head list;
  26. raw_spinlock_t lock;
  27. };
  28. static struct debug_bucket obj_hash[ODEBUG_HASH_SIZE];
  29. static struct debug_obj obj_static_pool[ODEBUG_POOL_SIZE] __initdata;
  30. static DEFINE_RAW_SPINLOCK(pool_lock);
  31. static HLIST_HEAD(obj_pool);
  32. static int obj_pool_min_free = ODEBUG_POOL_SIZE;
  33. static int obj_pool_free = ODEBUG_POOL_SIZE;
  34. static int obj_pool_used;
  35. static int obj_pool_max_used;
  36. static struct kmem_cache *obj_cache;
  37. static int debug_objects_maxchain __read_mostly;
  38. static int debug_objects_fixups __read_mostly;
  39. static int debug_objects_warnings __read_mostly;
  40. static int debug_objects_enabled __read_mostly
  41. = CONFIG_DEBUG_OBJECTS_ENABLE_DEFAULT;
  42. static struct debug_obj_descr *descr_test __read_mostly;
  43. static void free_obj_work(struct work_struct *work);
  44. static DECLARE_WORK(debug_obj_work, free_obj_work);
  45. static int __init enable_object_debug(char *str)
  46. {
  47. debug_objects_enabled = 1;
  48. return 0;
  49. }
  50. static int __init disable_object_debug(char *str)
  51. {
  52. debug_objects_enabled = 0;
  53. return 0;
  54. }
  55. early_param("debug_objects", enable_object_debug);
  56. early_param("no_debug_objects", disable_object_debug);
  57. static const char *obj_states[ODEBUG_STATE_MAX] = {
  58. [ODEBUG_STATE_NONE] = "none",
  59. [ODEBUG_STATE_INIT] = "initialized",
  60. [ODEBUG_STATE_INACTIVE] = "inactive",
  61. [ODEBUG_STATE_ACTIVE] = "active",
  62. [ODEBUG_STATE_DESTROYED] = "destroyed",
  63. [ODEBUG_STATE_NOTAVAILABLE] = "not available",
  64. };
  65. static int fill_pool(void)
  66. {
  67. gfp_t gfp = GFP_ATOMIC | __GFP_NORETRY | __GFP_NOWARN;
  68. struct debug_obj *new;
  69. unsigned long flags;
  70. if (likely(obj_pool_free >= ODEBUG_POOL_MIN_LEVEL))
  71. return obj_pool_free;
  72. if (unlikely(!obj_cache))
  73. return obj_pool_free;
  74. while (obj_pool_free < ODEBUG_POOL_MIN_LEVEL) {
  75. new = kmem_cache_zalloc(obj_cache, gfp);
  76. if (!new)
  77. return obj_pool_free;
  78. raw_spin_lock_irqsave(&pool_lock, flags);
  79. hlist_add_head(&new->node, &obj_pool);
  80. obj_pool_free++;
  81. raw_spin_unlock_irqrestore(&pool_lock, flags);
  82. }
  83. return obj_pool_free;
  84. }
  85. /*
  86. * Lookup an object in the hash bucket.
  87. */
  88. static struct debug_obj *lookup_object(void *addr, struct debug_bucket *b)
  89. {
  90. struct hlist_node *node;
  91. struct debug_obj *obj;
  92. int cnt = 0;
  93. hlist_for_each_entry(obj, node, &b->list, node) {
  94. cnt++;
  95. if (obj->object == addr)
  96. return obj;
  97. }
  98. if (cnt > debug_objects_maxchain)
  99. debug_objects_maxchain = cnt;
  100. return NULL;
  101. }
  102. /*
  103. * Allocate a new object. If the pool is empty, switch off the debugger.
  104. * Must be called with interrupts disabled.
  105. */
  106. static struct debug_obj *
  107. alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr)
  108. {
  109. struct debug_obj *obj = NULL;
  110. raw_spin_lock(&pool_lock);
  111. if (obj_pool.first) {
  112. obj = hlist_entry(obj_pool.first, typeof(*obj), node);
  113. obj->object = addr;
  114. obj->descr = descr;
  115. obj->state = ODEBUG_STATE_NONE;
  116. obj->astate = 0;
  117. hlist_del(&obj->node);
  118. hlist_add_head(&obj->node, &b->list);
  119. obj_pool_used++;
  120. if (obj_pool_used > obj_pool_max_used)
  121. obj_pool_max_used = obj_pool_used;
  122. obj_pool_free--;
  123. if (obj_pool_free < obj_pool_min_free)
  124. obj_pool_min_free = obj_pool_free;
  125. }
  126. raw_spin_unlock(&pool_lock);
  127. return obj;
  128. }
  129. /*
  130. * workqueue function to free objects.
  131. */
  132. static void free_obj_work(struct work_struct *work)
  133. {
  134. struct debug_obj *obj;
  135. unsigned long flags;
  136. raw_spin_lock_irqsave(&pool_lock, flags);
  137. while (obj_pool_free > ODEBUG_POOL_SIZE) {
  138. obj = hlist_entry(obj_pool.first, typeof(*obj), node);
  139. hlist_del(&obj->node);
  140. obj_pool_free--;
  141. /*
  142. * We release pool_lock across kmem_cache_free() to
  143. * avoid contention on pool_lock.
  144. */
  145. raw_spin_unlock_irqrestore(&pool_lock, flags);
  146. kmem_cache_free(obj_cache, obj);
  147. raw_spin_lock_irqsave(&pool_lock, flags);
  148. }
  149. raw_spin_unlock_irqrestore(&pool_lock, flags);
  150. }
  151. /*
  152. * Put the object back into the pool and schedule work to free objects
  153. * if necessary.
  154. */
  155. static void free_object(struct debug_obj *obj)
  156. {
  157. unsigned long flags;
  158. int sched = 0;
  159. raw_spin_lock_irqsave(&pool_lock, flags);
  160. /*
  161. * schedule work when the pool is filled and the cache is
  162. * initialized:
  163. */
  164. if (obj_pool_free > ODEBUG_POOL_SIZE && obj_cache)
  165. sched = keventd_up() && !work_pending(&debug_obj_work);
  166. hlist_add_head(&obj->node, &obj_pool);
  167. obj_pool_free++;
  168. obj_pool_used--;
  169. raw_spin_unlock_irqrestore(&pool_lock, flags);
  170. if (sched)
  171. schedule_work(&debug_obj_work);
  172. }
  173. /*
  174. * We run out of memory. That means we probably have tons of objects
  175. * allocated.
  176. */
  177. static void debug_objects_oom(void)
  178. {
  179. struct debug_bucket *db = obj_hash;
  180. struct hlist_node *node, *tmp;
  181. HLIST_HEAD(freelist);
  182. struct debug_obj *obj;
  183. unsigned long flags;
  184. int i;
  185. printk(KERN_WARNING "ODEBUG: Out of memory. ODEBUG disabled\n");
  186. for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
  187. raw_spin_lock_irqsave(&db->lock, flags);
  188. hlist_move_list(&db->list, &freelist);
  189. raw_spin_unlock_irqrestore(&db->lock, flags);
  190. /* Now free them */
  191. hlist_for_each_entry_safe(obj, node, tmp, &freelist, node) {
  192. hlist_del(&obj->node);
  193. free_object(obj);
  194. }
  195. }
  196. }
  197. /*
  198. * We use the pfn of the address for the hash. That way we can check
  199. * for freed objects simply by checking the affected bucket.
  200. */
  201. static struct debug_bucket *get_bucket(unsigned long addr)
  202. {
  203. unsigned long hash;
  204. hash = hash_long((addr >> ODEBUG_CHUNK_SHIFT), ODEBUG_HASH_BITS);
  205. return &obj_hash[hash];
  206. }
  207. static void debug_print_object(struct debug_obj *obj, char *msg)
  208. {
  209. struct debug_obj_descr *descr = obj->descr;
  210. static int limit;
  211. if (limit < 5 && descr != descr_test) {
  212. void *hint = descr->debug_hint ?
  213. descr->debug_hint(obj->object) : NULL;
  214. limit++;
  215. WARN(1, KERN_ERR "ODEBUG: %s %s (active state %u) "
  216. "object type: %s hint: %pS\n",
  217. msg, obj_states[obj->state], obj->astate,
  218. descr->name, hint);
  219. }
  220. debug_objects_warnings++;
  221. }
  222. /*
  223. * Try to repair the damage, so we have a better chance to get useful
  224. * debug output.
  225. */
  226. static int
  227. debug_object_fixup(int (*fixup)(void *addr, enum debug_obj_state state),
  228. void * addr, enum debug_obj_state state)
  229. {
  230. int fixed = 0;
  231. if (fixup)
  232. fixed = fixup(addr, state);
  233. debug_objects_fixups += fixed;
  234. return fixed;
  235. }
  236. static void debug_object_is_on_stack(void *addr, int onstack)
  237. {
  238. int is_on_stack;
  239. static int limit;
  240. if (limit > 4)
  241. return;
  242. is_on_stack = object_is_on_stack(addr);
  243. if (is_on_stack == onstack)
  244. return;
  245. limit++;
  246. if (is_on_stack)
  247. printk(KERN_WARNING
  248. "ODEBUG: object is on stack, but not annotated\n");
  249. else
  250. printk(KERN_WARNING
  251. "ODEBUG: object is not on stack, but annotated\n");
  252. WARN_ON(1);
  253. }
  254. static void
  255. __debug_object_init(void *addr, struct debug_obj_descr *descr, int onstack)
  256. {
  257. enum debug_obj_state state;
  258. struct debug_bucket *db;
  259. struct debug_obj *obj;
  260. unsigned long flags;
  261. fill_pool();
  262. db = get_bucket((unsigned long) addr);
  263. raw_spin_lock_irqsave(&db->lock, flags);
  264. obj = lookup_object(addr, db);
  265. if (!obj) {
  266. obj = alloc_object(addr, db, descr);
  267. if (!obj) {
  268. debug_objects_enabled = 0;
  269. raw_spin_unlock_irqrestore(&db->lock, flags);
  270. debug_objects_oom();
  271. return;
  272. }
  273. debug_object_is_on_stack(addr, onstack);
  274. }
  275. switch (obj->state) {
  276. case ODEBUG_STATE_NONE:
  277. case ODEBUG_STATE_INIT:
  278. case ODEBUG_STATE_INACTIVE:
  279. obj->state = ODEBUG_STATE_INIT;
  280. break;
  281. case ODEBUG_STATE_ACTIVE:
  282. debug_print_object(obj, "init");
  283. state = obj->state;
  284. raw_spin_unlock_irqrestore(&db->lock, flags);
  285. debug_object_fixup(descr->fixup_init, addr, state);
  286. return;
  287. case ODEBUG_STATE_DESTROYED:
  288. debug_print_object(obj, "init");
  289. break;
  290. default:
  291. break;
  292. }
  293. raw_spin_unlock_irqrestore(&db->lock, flags);
  294. }
  295. /**
  296. * debug_object_init - debug checks when an object is initialized
  297. * @addr: address of the object
  298. * @descr: pointer to an object specific debug description structure
  299. */
  300. void debug_object_init(void *addr, struct debug_obj_descr *descr)
  301. {
  302. if (!debug_objects_enabled)
  303. return;
  304. __debug_object_init(addr, descr, 0);
  305. }
  306. /**
  307. * debug_object_init_on_stack - debug checks when an object on stack is
  308. * initialized
  309. * @addr: address of the object
  310. * @descr: pointer to an object specific debug description structure
  311. */
  312. void debug_object_init_on_stack(void *addr, struct debug_obj_descr *descr)
  313. {
  314. if (!debug_objects_enabled)
  315. return;
  316. __debug_object_init(addr, descr, 1);
  317. }
  318. /**
  319. * debug_object_activate - debug checks when an object is activated
  320. * @addr: address of the object
  321. * @descr: pointer to an object specific debug description structure
  322. */
  323. void debug_object_activate(void *addr, struct debug_obj_descr *descr)
  324. {
  325. enum debug_obj_state state;
  326. struct debug_bucket *db;
  327. struct debug_obj *obj;
  328. unsigned long flags;
  329. struct debug_obj o = { .object = addr,
  330. .state = ODEBUG_STATE_NOTAVAILABLE,
  331. .descr = descr };
  332. if (!debug_objects_enabled)
  333. return;
  334. db = get_bucket((unsigned long) addr);
  335. raw_spin_lock_irqsave(&db->lock, flags);
  336. obj = lookup_object(addr, db);
  337. if (obj) {
  338. switch (obj->state) {
  339. case ODEBUG_STATE_INIT:
  340. case ODEBUG_STATE_INACTIVE:
  341. obj->state = ODEBUG_STATE_ACTIVE;
  342. break;
  343. case ODEBUG_STATE_ACTIVE:
  344. debug_print_object(obj, "activate");
  345. state = obj->state;
  346. raw_spin_unlock_irqrestore(&db->lock, flags);
  347. debug_object_fixup(descr->fixup_activate, addr, state);
  348. return;
  349. case ODEBUG_STATE_DESTROYED:
  350. debug_print_object(obj, "activate");
  351. break;
  352. default:
  353. break;
  354. }
  355. raw_spin_unlock_irqrestore(&db->lock, flags);
  356. return;
  357. }
  358. raw_spin_unlock_irqrestore(&db->lock, flags);
  359. /*
  360. * This happens when a static object is activated. We
  361. * let the type specific code decide whether this is
  362. * true or not.
  363. */
  364. if (debug_object_fixup(descr->fixup_activate, addr,
  365. ODEBUG_STATE_NOTAVAILABLE))
  366. debug_print_object(&o, "activate");
  367. }
  368. /**
  369. * debug_object_deactivate - debug checks when an object is deactivated
  370. * @addr: address of the object
  371. * @descr: pointer to an object specific debug description structure
  372. */
  373. void debug_object_deactivate(void *addr, struct debug_obj_descr *descr)
  374. {
  375. struct debug_bucket *db;
  376. struct debug_obj *obj;
  377. unsigned long flags;
  378. if (!debug_objects_enabled)
  379. return;
  380. db = get_bucket((unsigned long) addr);
  381. raw_spin_lock_irqsave(&db->lock, flags);
  382. obj = lookup_object(addr, db);
  383. if (obj) {
  384. switch (obj->state) {
  385. case ODEBUG_STATE_INIT:
  386. case ODEBUG_STATE_INACTIVE:
  387. case ODEBUG_STATE_ACTIVE:
  388. if (!obj->astate)
  389. obj->state = ODEBUG_STATE_INACTIVE;
  390. else
  391. debug_print_object(obj, "deactivate");
  392. break;
  393. case ODEBUG_STATE_DESTROYED:
  394. debug_print_object(obj, "deactivate");
  395. break;
  396. default:
  397. break;
  398. }
  399. } else {
  400. struct debug_obj o = { .object = addr,
  401. .state = ODEBUG_STATE_NOTAVAILABLE,
  402. .descr = descr };
  403. debug_print_object(&o, "deactivate");
  404. }
  405. raw_spin_unlock_irqrestore(&db->lock, flags);
  406. }
  407. /**
  408. * debug_object_destroy - debug checks when an object is destroyed
  409. * @addr: address of the object
  410. * @descr: pointer to an object specific debug description structure
  411. */
  412. void debug_object_destroy(void *addr, struct debug_obj_descr *descr)
  413. {
  414. enum debug_obj_state state;
  415. struct debug_bucket *db;
  416. struct debug_obj *obj;
  417. unsigned long flags;
  418. if (!debug_objects_enabled)
  419. return;
  420. db = get_bucket((unsigned long) addr);
  421. raw_spin_lock_irqsave(&db->lock, flags);
  422. obj = lookup_object(addr, db);
  423. if (!obj)
  424. goto out_unlock;
  425. switch (obj->state) {
  426. case ODEBUG_STATE_NONE:
  427. case ODEBUG_STATE_INIT:
  428. case ODEBUG_STATE_INACTIVE:
  429. obj->state = ODEBUG_STATE_DESTROYED;
  430. break;
  431. case ODEBUG_STATE_ACTIVE:
  432. debug_print_object(obj, "destroy");
  433. state = obj->state;
  434. raw_spin_unlock_irqrestore(&db->lock, flags);
  435. debug_object_fixup(descr->fixup_destroy, addr, state);
  436. return;
  437. case ODEBUG_STATE_DESTROYED:
  438. debug_print_object(obj, "destroy");
  439. break;
  440. default:
  441. break;
  442. }
  443. out_unlock:
  444. raw_spin_unlock_irqrestore(&db->lock, flags);
  445. }
  446. /**
  447. * debug_object_free - debug checks when an object is freed
  448. * @addr: address of the object
  449. * @descr: pointer to an object specific debug description structure
  450. */
  451. void debug_object_free(void *addr, struct debug_obj_descr *descr)
  452. {
  453. enum debug_obj_state state;
  454. struct debug_bucket *db;
  455. struct debug_obj *obj;
  456. unsigned long flags;
  457. if (!debug_objects_enabled)
  458. return;
  459. db = get_bucket((unsigned long) addr);
  460. raw_spin_lock_irqsave(&db->lock, flags);
  461. obj = lookup_object(addr, db);
  462. if (!obj)
  463. goto out_unlock;
  464. switch (obj->state) {
  465. case ODEBUG_STATE_ACTIVE:
  466. debug_print_object(obj, "free");
  467. state = obj->state;
  468. raw_spin_unlock_irqrestore(&db->lock, flags);
  469. debug_object_fixup(descr->fixup_free, addr, state);
  470. return;
  471. default:
  472. hlist_del(&obj->node);
  473. raw_spin_unlock_irqrestore(&db->lock, flags);
  474. free_object(obj);
  475. return;
  476. }
  477. out_unlock:
  478. raw_spin_unlock_irqrestore(&db->lock, flags);
  479. }
  480. /**
  481. * debug_object_assert_init - debug checks when object should be init-ed
  482. * @addr: address of the object
  483. * @descr: pointer to an object specific debug description structure
  484. */
  485. void debug_object_assert_init(void *addr, struct debug_obj_descr *descr)
  486. {
  487. struct debug_bucket *db;
  488. struct debug_obj *obj;
  489. unsigned long flags;
  490. if (!debug_objects_enabled)
  491. return;
  492. db = get_bucket((unsigned long) addr);
  493. raw_spin_lock_irqsave(&db->lock, flags);
  494. obj = lookup_object(addr, db);
  495. if (!obj) {
  496. struct debug_obj o = { .object = addr,
  497. .state = ODEBUG_STATE_NOTAVAILABLE,
  498. .descr = descr };
  499. raw_spin_unlock_irqrestore(&db->lock, flags);
  500. /*
  501. * Maybe the object is static. Let the type specific
  502. * code decide what to do.
  503. */
  504. if (debug_object_fixup(descr->fixup_assert_init, addr,
  505. ODEBUG_STATE_NOTAVAILABLE))
  506. debug_print_object(&o, "assert_init");
  507. return;
  508. }
  509. raw_spin_unlock_irqrestore(&db->lock, flags);
  510. }
  511. /**
  512. * debug_object_active_state - debug checks object usage state machine
  513. * @addr: address of the object
  514. * @descr: pointer to an object specific debug description structure
  515. * @expect: expected state
  516. * @next: state to move to if expected state is found
  517. */
  518. void
  519. debug_object_active_state(void *addr, struct debug_obj_descr *descr,
  520. unsigned int expect, unsigned int next)
  521. {
  522. struct debug_bucket *db;
  523. struct debug_obj *obj;
  524. unsigned long flags;
  525. if (!debug_objects_enabled)
  526. return;
  527. db = get_bucket((unsigned long) addr);
  528. raw_spin_lock_irqsave(&db->lock, flags);
  529. obj = lookup_object(addr, db);
  530. if (obj) {
  531. switch (obj->state) {
  532. case ODEBUG_STATE_ACTIVE:
  533. if (obj->astate == expect)
  534. obj->astate = next;
  535. else
  536. debug_print_object(obj, "active_state");
  537. break;
  538. default:
  539. debug_print_object(obj, "active_state");
  540. break;
  541. }
  542. } else {
  543. struct debug_obj o = { .object = addr,
  544. .state = ODEBUG_STATE_NOTAVAILABLE,
  545. .descr = descr };
  546. debug_print_object(&o, "active_state");
  547. }
  548. raw_spin_unlock_irqrestore(&db->lock, flags);
  549. }
  550. #ifdef CONFIG_DEBUG_OBJECTS_FREE
  551. static void __debug_check_no_obj_freed(const void *address, unsigned long size)
  552. {
  553. unsigned long flags, oaddr, saddr, eaddr, paddr, chunks;
  554. struct hlist_node *node, *tmp;
  555. HLIST_HEAD(freelist);
  556. struct debug_obj_descr *descr;
  557. enum debug_obj_state state;
  558. struct debug_bucket *db;
  559. struct debug_obj *obj;
  560. int cnt;
  561. saddr = (unsigned long) address;
  562. eaddr = saddr + size;
  563. paddr = saddr & ODEBUG_CHUNK_MASK;
  564. chunks = ((eaddr - paddr) + (ODEBUG_CHUNK_SIZE - 1));
  565. chunks >>= ODEBUG_CHUNK_SHIFT;
  566. for (;chunks > 0; chunks--, paddr += ODEBUG_CHUNK_SIZE) {
  567. db = get_bucket(paddr);
  568. repeat:
  569. cnt = 0;
  570. raw_spin_lock_irqsave(&db->lock, flags);
  571. hlist_for_each_entry_safe(obj, node, tmp, &db->list, node) {
  572. cnt++;
  573. oaddr = (unsigned long) obj->object;
  574. if (oaddr < saddr || oaddr >= eaddr)
  575. continue;
  576. switch (obj->state) {
  577. case ODEBUG_STATE_ACTIVE:
  578. debug_print_object(obj, "free");
  579. descr = obj->descr;
  580. state = obj->state;
  581. raw_spin_unlock_irqrestore(&db->lock, flags);
  582. debug_object_fixup(descr->fixup_free,
  583. (void *) oaddr, state);
  584. goto repeat;
  585. default:
  586. hlist_del(&obj->node);
  587. hlist_add_head(&obj->node, &freelist);
  588. break;
  589. }
  590. }
  591. raw_spin_unlock_irqrestore(&db->lock, flags);
  592. /* Now free them */
  593. hlist_for_each_entry_safe(obj, node, tmp, &freelist, node) {
  594. hlist_del(&obj->node);
  595. free_object(obj);
  596. }
  597. if (cnt > debug_objects_maxchain)
  598. debug_objects_maxchain = cnt;
  599. }
  600. }
  601. void debug_check_no_obj_freed(const void *address, unsigned long size)
  602. {
  603. if (debug_objects_enabled)
  604. __debug_check_no_obj_freed(address, size);
  605. }
  606. #endif
  607. #ifdef CONFIG_DEBUG_FS
  608. static int debug_stats_show(struct seq_file *m, void *v)
  609. {
  610. seq_printf(m, "max_chain :%d\n", debug_objects_maxchain);
  611. seq_printf(m, "warnings :%d\n", debug_objects_warnings);
  612. seq_printf(m, "fixups :%d\n", debug_objects_fixups);
  613. seq_printf(m, "pool_free :%d\n", obj_pool_free);
  614. seq_printf(m, "pool_min_free :%d\n", obj_pool_min_free);
  615. seq_printf(m, "pool_used :%d\n", obj_pool_used);
  616. seq_printf(m, "pool_max_used :%d\n", obj_pool_max_used);
  617. return 0;
  618. }
  619. static int debug_stats_open(struct inode *inode, struct file *filp)
  620. {
  621. return single_open(filp, debug_stats_show, NULL);
  622. }
  623. static const struct file_operations debug_stats_fops = {
  624. .open = debug_stats_open,
  625. .read = seq_read,
  626. .llseek = seq_lseek,
  627. .release = single_release,
  628. };
  629. static int __init debug_objects_init_debugfs(void)
  630. {
  631. struct dentry *dbgdir, *dbgstats;
  632. if (!debug_objects_enabled)
  633. return 0;
  634. dbgdir = debugfs_create_dir("debug_objects", NULL);
  635. if (!dbgdir)
  636. return -ENOMEM;
  637. dbgstats = debugfs_create_file("stats", 0444, dbgdir, NULL,
  638. &debug_stats_fops);
  639. if (!dbgstats)
  640. goto err;
  641. return 0;
  642. err:
  643. debugfs_remove(dbgdir);
  644. return -ENOMEM;
  645. }
  646. __initcall(debug_objects_init_debugfs);
  647. #else
  648. static inline void debug_objects_init_debugfs(void) { }
  649. #endif
  650. #ifdef CONFIG_DEBUG_OBJECTS_SELFTEST
  651. /* Random data structure for the self test */
  652. struct self_test {
  653. unsigned long dummy1[6];
  654. int static_init;
  655. unsigned long dummy2[3];
  656. };
  657. static __initdata struct debug_obj_descr descr_type_test;
  658. /*
  659. * fixup_init is called when:
  660. * - an active object is initialized
  661. */
  662. static int __init fixup_init(void *addr, enum debug_obj_state state)
  663. {
  664. struct self_test *obj = addr;
  665. switch (state) {
  666. case ODEBUG_STATE_ACTIVE:
  667. debug_object_deactivate(obj, &descr_type_test);
  668. debug_object_init(obj, &descr_type_test);
  669. return 1;
  670. default:
  671. return 0;
  672. }
  673. }
  674. /*
  675. * fixup_activate is called when:
  676. * - an active object is activated
  677. * - an unknown object is activated (might be a statically initialized object)
  678. */
  679. static int __init fixup_activate(void *addr, enum debug_obj_state state)
  680. {
  681. struct self_test *obj = addr;
  682. switch (state) {
  683. case ODEBUG_STATE_NOTAVAILABLE:
  684. if (obj->static_init == 1) {
  685. debug_object_init(obj, &descr_type_test);
  686. debug_object_activate(obj, &descr_type_test);
  687. return 0;
  688. }
  689. return 1;
  690. case ODEBUG_STATE_ACTIVE:
  691. debug_object_deactivate(obj, &descr_type_test);
  692. debug_object_activate(obj, &descr_type_test);
  693. return 1;
  694. default:
  695. return 0;
  696. }
  697. }
  698. /*
  699. * fixup_destroy is called when:
  700. * - an active object is destroyed
  701. */
  702. static int __init fixup_destroy(void *addr, enum debug_obj_state state)
  703. {
  704. struct self_test *obj = addr;
  705. switch (state) {
  706. case ODEBUG_STATE_ACTIVE:
  707. debug_object_deactivate(obj, &descr_type_test);
  708. debug_object_destroy(obj, &descr_type_test);
  709. return 1;
  710. default:
  711. return 0;
  712. }
  713. }
  714. /*
  715. * fixup_free is called when:
  716. * - an active object is freed
  717. */
  718. static int __init fixup_free(void *addr, enum debug_obj_state state)
  719. {
  720. struct self_test *obj = addr;
  721. switch (state) {
  722. case ODEBUG_STATE_ACTIVE:
  723. debug_object_deactivate(obj, &descr_type_test);
  724. debug_object_free(obj, &descr_type_test);
  725. return 1;
  726. default:
  727. return 0;
  728. }
  729. }
  730. static int __init
  731. check_results(void *addr, enum debug_obj_state state, int fixups, int warnings)
  732. {
  733. struct debug_bucket *db;
  734. struct debug_obj *obj;
  735. unsigned long flags;
  736. int res = -EINVAL;
  737. db = get_bucket((unsigned long) addr);
  738. raw_spin_lock_irqsave(&db->lock, flags);
  739. obj = lookup_object(addr, db);
  740. if (!obj && state != ODEBUG_STATE_NONE) {
  741. WARN(1, KERN_ERR "ODEBUG: selftest object not found\n");
  742. goto out;
  743. }
  744. if (obj && obj->state != state) {
  745. WARN(1, KERN_ERR "ODEBUG: selftest wrong state: %d != %d\n",
  746. obj->state, state);
  747. goto out;
  748. }
  749. if (fixups != debug_objects_fixups) {
  750. WARN(1, KERN_ERR "ODEBUG: selftest fixups failed %d != %d\n",
  751. fixups, debug_objects_fixups);
  752. goto out;
  753. }
  754. if (warnings != debug_objects_warnings) {
  755. WARN(1, KERN_ERR "ODEBUG: selftest warnings failed %d != %d\n",
  756. warnings, debug_objects_warnings);
  757. goto out;
  758. }
  759. res = 0;
  760. out:
  761. raw_spin_unlock_irqrestore(&db->lock, flags);
  762. if (res)
  763. debug_objects_enabled = 0;
  764. return res;
  765. }
  766. static __initdata struct debug_obj_descr descr_type_test = {
  767. .name = "selftest",
  768. .fixup_init = fixup_init,
  769. .fixup_activate = fixup_activate,
  770. .fixup_destroy = fixup_destroy,
  771. .fixup_free = fixup_free,
  772. };
  773. static __initdata struct self_test obj = { .static_init = 0 };
  774. static void __init debug_objects_selftest(void)
  775. {
  776. int fixups, oldfixups, warnings, oldwarnings;
  777. unsigned long flags;
  778. local_irq_save(flags);
  779. fixups = oldfixups = debug_objects_fixups;
  780. warnings = oldwarnings = debug_objects_warnings;
  781. descr_test = &descr_type_test;
  782. debug_object_init(&obj, &descr_type_test);
  783. if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings))
  784. goto out;
  785. debug_object_activate(&obj, &descr_type_test);
  786. if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
  787. goto out;
  788. debug_object_activate(&obj, &descr_type_test);
  789. if (check_results(&obj, ODEBUG_STATE_ACTIVE, ++fixups, ++warnings))
  790. goto out;
  791. debug_object_deactivate(&obj, &descr_type_test);
  792. if (check_results(&obj, ODEBUG_STATE_INACTIVE, fixups, warnings))
  793. goto out;
  794. debug_object_destroy(&obj, &descr_type_test);
  795. if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, warnings))
  796. goto out;
  797. debug_object_init(&obj, &descr_type_test);
  798. if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
  799. goto out;
  800. debug_object_activate(&obj, &descr_type_test);
  801. if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
  802. goto out;
  803. debug_object_deactivate(&obj, &descr_type_test);
  804. if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
  805. goto out;
  806. debug_object_free(&obj, &descr_type_test);
  807. if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings))
  808. goto out;
  809. obj.static_init = 1;
  810. debug_object_activate(&obj, &descr_type_test);
  811. if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
  812. goto out;
  813. debug_object_init(&obj, &descr_type_test);
  814. if (check_results(&obj, ODEBUG_STATE_INIT, ++fixups, ++warnings))
  815. goto out;
  816. debug_object_free(&obj, &descr_type_test);
  817. if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings))
  818. goto out;
  819. #ifdef CONFIG_DEBUG_OBJECTS_FREE
  820. debug_object_init(&obj, &descr_type_test);
  821. if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings))
  822. goto out;
  823. debug_object_activate(&obj, &descr_type_test);
  824. if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
  825. goto out;
  826. __debug_check_no_obj_freed(&obj, sizeof(obj));
  827. if (check_results(&obj, ODEBUG_STATE_NONE, ++fixups, ++warnings))
  828. goto out;
  829. #endif
  830. printk(KERN_INFO "ODEBUG: selftest passed\n");
  831. out:
  832. debug_objects_fixups = oldfixups;
  833. debug_objects_warnings = oldwarnings;
  834. descr_test = NULL;
  835. local_irq_restore(flags);
  836. }
  837. #else
  838. static inline void debug_objects_selftest(void) { }
  839. #endif
  840. /*
  841. * Called during early boot to initialize the hash buckets and link
  842. * the static object pool objects into the poll list. After this call
  843. * the object tracker is fully operational.
  844. */
  845. void __init debug_objects_early_init(void)
  846. {
  847. int i;
  848. for (i = 0; i < ODEBUG_HASH_SIZE; i++)
  849. raw_spin_lock_init(&obj_hash[i].lock);
  850. for (i = 0; i < ODEBUG_POOL_SIZE; i++)
  851. hlist_add_head(&obj_static_pool[i].node, &obj_pool);
  852. }
  853. /*
  854. * Convert the statically allocated objects to dynamic ones:
  855. */
  856. static int __init debug_objects_replace_static_objects(void)
  857. {
  858. struct debug_bucket *db = obj_hash;
  859. struct hlist_node *node, *tmp;
  860. struct debug_obj *obj, *new;
  861. HLIST_HEAD(objects);
  862. int i, cnt = 0;
  863. for (i = 0; i < ODEBUG_POOL_SIZE; i++) {
  864. obj = kmem_cache_zalloc(obj_cache, GFP_KERNEL);
  865. if (!obj)
  866. goto free;
  867. hlist_add_head(&obj->node, &objects);
  868. }
  869. /*
  870. * When debug_objects_mem_init() is called we know that only
  871. * one CPU is up, so disabling interrupts is enough
  872. * protection. This avoids the lockdep hell of lock ordering.
  873. */
  874. local_irq_disable();
  875. /* Remove the statically allocated objects from the pool */
  876. hlist_for_each_entry_safe(obj, node, tmp, &obj_pool, node)
  877. hlist_del(&obj->node);
  878. /* Move the allocated objects to the pool */
  879. hlist_move_list(&objects, &obj_pool);
  880. /* Replace the active object references */
  881. for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
  882. hlist_move_list(&db->list, &objects);
  883. hlist_for_each_entry(obj, node, &objects, node) {
  884. new = hlist_entry(obj_pool.first, typeof(*obj), node);
  885. hlist_del(&new->node);
  886. /* copy object data */
  887. *new = *obj;
  888. hlist_add_head(&new->node, &db->list);
  889. cnt++;
  890. }
  891. }
  892. printk(KERN_DEBUG "ODEBUG: %d of %d active objects replaced\n", cnt,
  893. obj_pool_used);
  894. local_irq_enable();
  895. return 0;
  896. free:
  897. hlist_for_each_entry_safe(obj, node, tmp, &objects, node) {
  898. hlist_del(&obj->node);
  899. kmem_cache_free(obj_cache, obj);
  900. }
  901. return -ENOMEM;
  902. }
  903. /*
  904. * Called after the kmem_caches are functional to setup a dedicated
  905. * cache pool, which has the SLAB_DEBUG_OBJECTS flag set. This flag
  906. * prevents that the debug code is called on kmem_cache_free() for the
  907. * debug tracker objects to avoid recursive calls.
  908. */
  909. void __init debug_objects_mem_init(void)
  910. {
  911. if (!debug_objects_enabled)
  912. return;
  913. obj_cache = kmem_cache_create("debug_objects_cache",
  914. sizeof (struct debug_obj), 0,
  915. SLAB_DEBUG_OBJECTS, NULL);
  916. if (!obj_cache || debug_objects_replace_static_objects()) {
  917. debug_objects_enabled = 0;
  918. if (obj_cache)
  919. kmem_cache_destroy(obj_cache);
  920. printk(KERN_WARNING "ODEBUG: out of memory.\n");
  921. } else
  922. debug_objects_selftest();
  923. }