debugobjects.c 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053
  1. /*
  2. * Generic infrastructure for lifetime debugging of objects.
  3. *
  4. * Started by Thomas Gleixner
  5. *
  6. * Copyright (C) 2008, Thomas Gleixner <tglx@linutronix.de>
  7. *
  8. * For licencing details see kernel-base/COPYING
  9. */
  10. #include <linux/debugobjects.h>
  11. #include <linux/interrupt.h>
  12. #include <linux/sched.h>
  13. #include <linux/seq_file.h>
  14. #include <linux/debugfs.h>
  15. #include <linux/slab.h>
  16. #include <linux/hash.h>
  17. #define ODEBUG_HASH_BITS 14
  18. #define ODEBUG_HASH_SIZE (1 << ODEBUG_HASH_BITS)
  19. #define ODEBUG_POOL_SIZE 512
  20. #define ODEBUG_POOL_MIN_LEVEL 256
  21. #define ODEBUG_CHUNK_SHIFT PAGE_SHIFT
  22. #define ODEBUG_CHUNK_SIZE (1 << ODEBUG_CHUNK_SHIFT)
  23. #define ODEBUG_CHUNK_MASK (~(ODEBUG_CHUNK_SIZE - 1))
  24. struct debug_bucket {
  25. struct hlist_head list;
  26. raw_spinlock_t lock;
  27. };
  28. static struct debug_bucket obj_hash[ODEBUG_HASH_SIZE];
  29. static struct debug_obj obj_static_pool[ODEBUG_POOL_SIZE] __initdata;
  30. static DEFINE_RAW_SPINLOCK(pool_lock);
  31. static HLIST_HEAD(obj_pool);
  32. static int obj_pool_min_free = ODEBUG_POOL_SIZE;
  33. static int obj_pool_free = ODEBUG_POOL_SIZE;
  34. static int obj_pool_used;
  35. static int obj_pool_max_used;
  36. static struct kmem_cache *obj_cache;
  37. static int debug_objects_maxchain __read_mostly;
  38. static int debug_objects_fixups __read_mostly;
  39. static int debug_objects_warnings __read_mostly;
  40. static int debug_objects_enabled __read_mostly
  41. = CONFIG_DEBUG_OBJECTS_ENABLE_DEFAULT;
  42. static struct debug_obj_descr *descr_test __read_mostly;
  43. static void free_obj_work(struct work_struct *work);
  44. static DECLARE_WORK(debug_obj_work, free_obj_work);
  45. static int __init enable_object_debug(char *str)
  46. {
  47. debug_objects_enabled = 1;
  48. return 0;
  49. }
  50. static int __init disable_object_debug(char *str)
  51. {
  52. debug_objects_enabled = 0;
  53. return 0;
  54. }
  55. early_param("debug_objects", enable_object_debug);
  56. early_param("no_debug_objects", disable_object_debug);
  57. static const char *obj_states[ODEBUG_STATE_MAX] = {
  58. [ODEBUG_STATE_NONE] = "none",
  59. [ODEBUG_STATE_INIT] = "initialized",
  60. [ODEBUG_STATE_INACTIVE] = "inactive",
  61. [ODEBUG_STATE_ACTIVE] = "active",
  62. [ODEBUG_STATE_DESTROYED] = "destroyed",
  63. [ODEBUG_STATE_NOTAVAILABLE] = "not available",
  64. };
  65. static int fill_pool(void)
  66. {
  67. gfp_t gfp = GFP_ATOMIC | __GFP_NORETRY | __GFP_NOWARN;
  68. struct debug_obj *new;
  69. unsigned long flags;
  70. if (likely(obj_pool_free >= ODEBUG_POOL_MIN_LEVEL))
  71. return obj_pool_free;
  72. if (unlikely(!obj_cache))
  73. return obj_pool_free;
  74. while (obj_pool_free < ODEBUG_POOL_MIN_LEVEL) {
  75. new = kmem_cache_zalloc(obj_cache, gfp);
  76. if (!new)
  77. return obj_pool_free;
  78. raw_spin_lock_irqsave(&pool_lock, flags);
  79. hlist_add_head(&new->node, &obj_pool);
  80. obj_pool_free++;
  81. raw_spin_unlock_irqrestore(&pool_lock, flags);
  82. }
  83. return obj_pool_free;
  84. }
  85. /*
  86. * Lookup an object in the hash bucket.
  87. */
  88. static struct debug_obj *lookup_object(void *addr, struct debug_bucket *b)
  89. {
  90. struct hlist_node *node;
  91. struct debug_obj *obj;
  92. int cnt = 0;
  93. hlist_for_each_entry(obj, node, &b->list, node) {
  94. cnt++;
  95. if (obj->object == addr)
  96. return obj;
  97. }
  98. if (cnt > debug_objects_maxchain)
  99. debug_objects_maxchain = cnt;
  100. return NULL;
  101. }
  102. /*
  103. * Allocate a new object. If the pool is empty, switch off the debugger.
  104. * Must be called with interrupts disabled.
  105. */
  106. static struct debug_obj *
  107. alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr)
  108. {
  109. struct debug_obj *obj = NULL;
  110. raw_spin_lock(&pool_lock);
  111. if (obj_pool.first) {
  112. obj = hlist_entry(obj_pool.first, typeof(*obj), node);
  113. obj->object = addr;
  114. obj->descr = descr;
  115. obj->state = ODEBUG_STATE_NONE;
  116. obj->astate = 0;
  117. hlist_del(&obj->node);
  118. hlist_add_head(&obj->node, &b->list);
  119. obj_pool_used++;
  120. if (obj_pool_used > obj_pool_max_used)
  121. obj_pool_max_used = obj_pool_used;
  122. obj_pool_free--;
  123. if (obj_pool_free < obj_pool_min_free)
  124. obj_pool_min_free = obj_pool_free;
  125. }
  126. raw_spin_unlock(&pool_lock);
  127. return obj;
  128. }
  129. /*
  130. * workqueue function to free objects.
  131. */
  132. static void free_obj_work(struct work_struct *work)
  133. {
  134. struct debug_obj *obj;
  135. unsigned long flags;
  136. raw_spin_lock_irqsave(&pool_lock, flags);
  137. while (obj_pool_free > ODEBUG_POOL_SIZE) {
  138. obj = hlist_entry(obj_pool.first, typeof(*obj), node);
  139. hlist_del(&obj->node);
  140. obj_pool_free--;
  141. /*
  142. * We release pool_lock across kmem_cache_free() to
  143. * avoid contention on pool_lock.
  144. */
  145. raw_spin_unlock_irqrestore(&pool_lock, flags);
  146. kmem_cache_free(obj_cache, obj);
  147. raw_spin_lock_irqsave(&pool_lock, flags);
  148. }
  149. raw_spin_unlock_irqrestore(&pool_lock, flags);
  150. }
  151. /*
  152. * Put the object back into the pool and schedule work to free objects
  153. * if necessary.
  154. */
  155. static void free_object(struct debug_obj *obj)
  156. {
  157. unsigned long flags;
  158. int sched = 0;
  159. raw_spin_lock_irqsave(&pool_lock, flags);
  160. /*
  161. * schedule work when the pool is filled and the cache is
  162. * initialized:
  163. */
  164. if (obj_pool_free > ODEBUG_POOL_SIZE && obj_cache)
  165. sched = keventd_up() && !work_pending(&debug_obj_work);
  166. hlist_add_head(&obj->node, &obj_pool);
  167. obj_pool_free++;
  168. obj_pool_used--;
  169. raw_spin_unlock_irqrestore(&pool_lock, flags);
  170. if (sched)
  171. schedule_work(&debug_obj_work);
  172. }
  173. /*
  174. * We run out of memory. That means we probably have tons of objects
  175. * allocated.
  176. */
  177. static void debug_objects_oom(void)
  178. {
  179. struct debug_bucket *db = obj_hash;
  180. struct hlist_node *node, *tmp;
  181. HLIST_HEAD(freelist);
  182. struct debug_obj *obj;
  183. unsigned long flags;
  184. int i;
  185. printk(KERN_WARNING "ODEBUG: Out of memory. ODEBUG disabled\n");
  186. for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
  187. raw_spin_lock_irqsave(&db->lock, flags);
  188. hlist_move_list(&db->list, &freelist);
  189. raw_spin_unlock_irqrestore(&db->lock, flags);
  190. /* Now free them */
  191. hlist_for_each_entry_safe(obj, node, tmp, &freelist, node) {
  192. hlist_del(&obj->node);
  193. free_object(obj);
  194. }
  195. }
  196. }
  197. /*
  198. * We use the pfn of the address for the hash. That way we can check
  199. * for freed objects simply by checking the affected bucket.
  200. */
  201. static struct debug_bucket *get_bucket(unsigned long addr)
  202. {
  203. unsigned long hash;
  204. hash = hash_long((addr >> ODEBUG_CHUNK_SHIFT), ODEBUG_HASH_BITS);
  205. return &obj_hash[hash];
  206. }
  207. static void debug_print_object(struct debug_obj *obj, char *msg)
  208. {
  209. struct debug_obj_descr *descr = obj->descr;
  210. static int limit;
  211. if (limit < 5 && descr != descr_test) {
  212. void *hint = descr->debug_hint ?
  213. descr->debug_hint(obj->object) : NULL;
  214. limit++;
  215. WARN(1, KERN_ERR "ODEBUG: %s %s (active state %u) "
  216. "object type: %s hint: %pS\n",
  217. msg, obj_states[obj->state], obj->astate,
  218. descr->name, hint);
  219. }
  220. debug_objects_warnings++;
  221. }
  222. /*
  223. * Try to repair the damage, so we have a better chance to get useful
  224. * debug output.
  225. */
  226. static void
  227. debug_object_fixup(int (*fixup)(void *addr, enum debug_obj_state state),
  228. void * addr, enum debug_obj_state state)
  229. {
  230. if (fixup)
  231. debug_objects_fixups += fixup(addr, state);
  232. }
  233. static void debug_object_is_on_stack(void *addr, int onstack)
  234. {
  235. int is_on_stack;
  236. static int limit;
  237. if (limit > 4)
  238. return;
  239. is_on_stack = object_is_on_stack(addr);
  240. if (is_on_stack == onstack)
  241. return;
  242. limit++;
  243. if (is_on_stack)
  244. printk(KERN_WARNING
  245. "ODEBUG: object is on stack, but not annotated\n");
  246. else
  247. printk(KERN_WARNING
  248. "ODEBUG: object is not on stack, but annotated\n");
  249. WARN_ON(1);
  250. }
  251. static void
  252. __debug_object_init(void *addr, struct debug_obj_descr *descr, int onstack)
  253. {
  254. enum debug_obj_state state;
  255. struct debug_bucket *db;
  256. struct debug_obj *obj;
  257. unsigned long flags;
  258. fill_pool();
  259. db = get_bucket((unsigned long) addr);
  260. raw_spin_lock_irqsave(&db->lock, flags);
  261. obj = lookup_object(addr, db);
  262. if (!obj) {
  263. obj = alloc_object(addr, db, descr);
  264. if (!obj) {
  265. debug_objects_enabled = 0;
  266. raw_spin_unlock_irqrestore(&db->lock, flags);
  267. debug_objects_oom();
  268. return;
  269. }
  270. debug_object_is_on_stack(addr, onstack);
  271. }
  272. switch (obj->state) {
  273. case ODEBUG_STATE_NONE:
  274. case ODEBUG_STATE_INIT:
  275. case ODEBUG_STATE_INACTIVE:
  276. obj->state = ODEBUG_STATE_INIT;
  277. break;
  278. case ODEBUG_STATE_ACTIVE:
  279. debug_print_object(obj, "init");
  280. state = obj->state;
  281. raw_spin_unlock_irqrestore(&db->lock, flags);
  282. debug_object_fixup(descr->fixup_init, addr, state);
  283. return;
  284. case ODEBUG_STATE_DESTROYED:
  285. debug_print_object(obj, "init");
  286. break;
  287. default:
  288. break;
  289. }
  290. raw_spin_unlock_irqrestore(&db->lock, flags);
  291. }
  292. /**
  293. * debug_object_init - debug checks when an object is initialized
  294. * @addr: address of the object
  295. * @descr: pointer to an object specific debug description structure
  296. */
  297. void debug_object_init(void *addr, struct debug_obj_descr *descr)
  298. {
  299. if (!debug_objects_enabled)
  300. return;
  301. __debug_object_init(addr, descr, 0);
  302. }
  303. /**
  304. * debug_object_init_on_stack - debug checks when an object on stack is
  305. * initialized
  306. * @addr: address of the object
  307. * @descr: pointer to an object specific debug description structure
  308. */
  309. void debug_object_init_on_stack(void *addr, struct debug_obj_descr *descr)
  310. {
  311. if (!debug_objects_enabled)
  312. return;
  313. __debug_object_init(addr, descr, 1);
  314. }
  315. /**
  316. * debug_object_activate - debug checks when an object is activated
  317. * @addr: address of the object
  318. * @descr: pointer to an object specific debug description structure
  319. */
  320. void debug_object_activate(void *addr, struct debug_obj_descr *descr)
  321. {
  322. enum debug_obj_state state;
  323. struct debug_bucket *db;
  324. struct debug_obj *obj;
  325. unsigned long flags;
  326. if (!debug_objects_enabled)
  327. return;
  328. db = get_bucket((unsigned long) addr);
  329. raw_spin_lock_irqsave(&db->lock, flags);
  330. obj = lookup_object(addr, db);
  331. if (obj) {
  332. switch (obj->state) {
  333. case ODEBUG_STATE_INIT:
  334. case ODEBUG_STATE_INACTIVE:
  335. obj->state = ODEBUG_STATE_ACTIVE;
  336. break;
  337. case ODEBUG_STATE_ACTIVE:
  338. debug_print_object(obj, "activate");
  339. state = obj->state;
  340. raw_spin_unlock_irqrestore(&db->lock, flags);
  341. debug_object_fixup(descr->fixup_activate, addr, state);
  342. return;
  343. case ODEBUG_STATE_DESTROYED:
  344. debug_print_object(obj, "activate");
  345. break;
  346. default:
  347. break;
  348. }
  349. raw_spin_unlock_irqrestore(&db->lock, flags);
  350. return;
  351. }
  352. raw_spin_unlock_irqrestore(&db->lock, flags);
  353. /*
  354. * This happens when a static object is activated. We
  355. * let the type specific code decide whether this is
  356. * true or not.
  357. */
  358. debug_object_fixup(descr->fixup_activate, addr,
  359. ODEBUG_STATE_NOTAVAILABLE);
  360. }
  361. /**
  362. * debug_object_deactivate - debug checks when an object is deactivated
  363. * @addr: address of the object
  364. * @descr: pointer to an object specific debug description structure
  365. */
  366. void debug_object_deactivate(void *addr, struct debug_obj_descr *descr)
  367. {
  368. struct debug_bucket *db;
  369. struct debug_obj *obj;
  370. unsigned long flags;
  371. if (!debug_objects_enabled)
  372. return;
  373. db = get_bucket((unsigned long) addr);
  374. raw_spin_lock_irqsave(&db->lock, flags);
  375. obj = lookup_object(addr, db);
  376. if (obj) {
  377. switch (obj->state) {
  378. case ODEBUG_STATE_INIT:
  379. case ODEBUG_STATE_INACTIVE:
  380. case ODEBUG_STATE_ACTIVE:
  381. if (!obj->astate)
  382. obj->state = ODEBUG_STATE_INACTIVE;
  383. else
  384. debug_print_object(obj, "deactivate");
  385. break;
  386. case ODEBUG_STATE_DESTROYED:
  387. debug_print_object(obj, "deactivate");
  388. break;
  389. default:
  390. break;
  391. }
  392. } else {
  393. struct debug_obj o = { .object = addr,
  394. .state = ODEBUG_STATE_NOTAVAILABLE,
  395. .descr = descr };
  396. debug_print_object(&o, "deactivate");
  397. }
  398. raw_spin_unlock_irqrestore(&db->lock, flags);
  399. }
  400. /**
  401. * debug_object_destroy - debug checks when an object is destroyed
  402. * @addr: address of the object
  403. * @descr: pointer to an object specific debug description structure
  404. */
  405. void debug_object_destroy(void *addr, struct debug_obj_descr *descr)
  406. {
  407. enum debug_obj_state state;
  408. struct debug_bucket *db;
  409. struct debug_obj *obj;
  410. unsigned long flags;
  411. if (!debug_objects_enabled)
  412. return;
  413. db = get_bucket((unsigned long) addr);
  414. raw_spin_lock_irqsave(&db->lock, flags);
  415. obj = lookup_object(addr, db);
  416. if (!obj)
  417. goto out_unlock;
  418. switch (obj->state) {
  419. case ODEBUG_STATE_NONE:
  420. case ODEBUG_STATE_INIT:
  421. case ODEBUG_STATE_INACTIVE:
  422. obj->state = ODEBUG_STATE_DESTROYED;
  423. break;
  424. case ODEBUG_STATE_ACTIVE:
  425. debug_print_object(obj, "destroy");
  426. state = obj->state;
  427. raw_spin_unlock_irqrestore(&db->lock, flags);
  428. debug_object_fixup(descr->fixup_destroy, addr, state);
  429. return;
  430. case ODEBUG_STATE_DESTROYED:
  431. debug_print_object(obj, "destroy");
  432. break;
  433. default:
  434. break;
  435. }
  436. out_unlock:
  437. raw_spin_unlock_irqrestore(&db->lock, flags);
  438. }
  439. /**
  440. * debug_object_free - debug checks when an object is freed
  441. * @addr: address of the object
  442. * @descr: pointer to an object specific debug description structure
  443. */
  444. void debug_object_free(void *addr, struct debug_obj_descr *descr)
  445. {
  446. enum debug_obj_state state;
  447. struct debug_bucket *db;
  448. struct debug_obj *obj;
  449. unsigned long flags;
  450. if (!debug_objects_enabled)
  451. return;
  452. db = get_bucket((unsigned long) addr);
  453. raw_spin_lock_irqsave(&db->lock, flags);
  454. obj = lookup_object(addr, db);
  455. if (!obj)
  456. goto out_unlock;
  457. switch (obj->state) {
  458. case ODEBUG_STATE_ACTIVE:
  459. debug_print_object(obj, "free");
  460. state = obj->state;
  461. raw_spin_unlock_irqrestore(&db->lock, flags);
  462. debug_object_fixup(descr->fixup_free, addr, state);
  463. return;
  464. default:
  465. hlist_del(&obj->node);
  466. raw_spin_unlock_irqrestore(&db->lock, flags);
  467. free_object(obj);
  468. return;
  469. }
  470. out_unlock:
  471. raw_spin_unlock_irqrestore(&db->lock, flags);
  472. }
  473. /**
  474. * debug_object_active_state - debug checks object usage state machine
  475. * @addr: address of the object
  476. * @descr: pointer to an object specific debug description structure
  477. * @expect: expected state
  478. * @next: state to move to if expected state is found
  479. */
  480. void
  481. debug_object_active_state(void *addr, struct debug_obj_descr *descr,
  482. unsigned int expect, unsigned int next)
  483. {
  484. struct debug_bucket *db;
  485. struct debug_obj *obj;
  486. unsigned long flags;
  487. if (!debug_objects_enabled)
  488. return;
  489. db = get_bucket((unsigned long) addr);
  490. raw_spin_lock_irqsave(&db->lock, flags);
  491. obj = lookup_object(addr, db);
  492. if (obj) {
  493. switch (obj->state) {
  494. case ODEBUG_STATE_ACTIVE:
  495. if (obj->astate == expect)
  496. obj->astate = next;
  497. else
  498. debug_print_object(obj, "active_state");
  499. break;
  500. default:
  501. debug_print_object(obj, "active_state");
  502. break;
  503. }
  504. } else {
  505. struct debug_obj o = { .object = addr,
  506. .state = ODEBUG_STATE_NOTAVAILABLE,
  507. .descr = descr };
  508. debug_print_object(&o, "active_state");
  509. }
  510. raw_spin_unlock_irqrestore(&db->lock, flags);
  511. }
  512. #ifdef CONFIG_DEBUG_OBJECTS_FREE
  513. static void __debug_check_no_obj_freed(const void *address, unsigned long size)
  514. {
  515. unsigned long flags, oaddr, saddr, eaddr, paddr, chunks;
  516. struct hlist_node *node, *tmp;
  517. HLIST_HEAD(freelist);
  518. struct debug_obj_descr *descr;
  519. enum debug_obj_state state;
  520. struct debug_bucket *db;
  521. struct debug_obj *obj;
  522. int cnt;
  523. saddr = (unsigned long) address;
  524. eaddr = saddr + size;
  525. paddr = saddr & ODEBUG_CHUNK_MASK;
  526. chunks = ((eaddr - paddr) + (ODEBUG_CHUNK_SIZE - 1));
  527. chunks >>= ODEBUG_CHUNK_SHIFT;
  528. for (;chunks > 0; chunks--, paddr += ODEBUG_CHUNK_SIZE) {
  529. db = get_bucket(paddr);
  530. repeat:
  531. cnt = 0;
  532. raw_spin_lock_irqsave(&db->lock, flags);
  533. hlist_for_each_entry_safe(obj, node, tmp, &db->list, node) {
  534. cnt++;
  535. oaddr = (unsigned long) obj->object;
  536. if (oaddr < saddr || oaddr >= eaddr)
  537. continue;
  538. switch (obj->state) {
  539. case ODEBUG_STATE_ACTIVE:
  540. debug_print_object(obj, "free");
  541. descr = obj->descr;
  542. state = obj->state;
  543. raw_spin_unlock_irqrestore(&db->lock, flags);
  544. debug_object_fixup(descr->fixup_free,
  545. (void *) oaddr, state);
  546. goto repeat;
  547. default:
  548. hlist_del(&obj->node);
  549. hlist_add_head(&obj->node, &freelist);
  550. break;
  551. }
  552. }
  553. raw_spin_unlock_irqrestore(&db->lock, flags);
  554. /* Now free them */
  555. hlist_for_each_entry_safe(obj, node, tmp, &freelist, node) {
  556. hlist_del(&obj->node);
  557. free_object(obj);
  558. }
  559. if (cnt > debug_objects_maxchain)
  560. debug_objects_maxchain = cnt;
  561. }
  562. }
  563. void debug_check_no_obj_freed(const void *address, unsigned long size)
  564. {
  565. if (debug_objects_enabled)
  566. __debug_check_no_obj_freed(address, size);
  567. }
  568. #endif
  569. #ifdef CONFIG_DEBUG_FS
  570. static int debug_stats_show(struct seq_file *m, void *v)
  571. {
  572. seq_printf(m, "max_chain :%d\n", debug_objects_maxchain);
  573. seq_printf(m, "warnings :%d\n", debug_objects_warnings);
  574. seq_printf(m, "fixups :%d\n", debug_objects_fixups);
  575. seq_printf(m, "pool_free :%d\n", obj_pool_free);
  576. seq_printf(m, "pool_min_free :%d\n", obj_pool_min_free);
  577. seq_printf(m, "pool_used :%d\n", obj_pool_used);
  578. seq_printf(m, "pool_max_used :%d\n", obj_pool_max_used);
  579. return 0;
  580. }
  581. static int debug_stats_open(struct inode *inode, struct file *filp)
  582. {
  583. return single_open(filp, debug_stats_show, NULL);
  584. }
  585. static const struct file_operations debug_stats_fops = {
  586. .open = debug_stats_open,
  587. .read = seq_read,
  588. .llseek = seq_lseek,
  589. .release = single_release,
  590. };
  591. static int __init debug_objects_init_debugfs(void)
  592. {
  593. struct dentry *dbgdir, *dbgstats;
  594. if (!debug_objects_enabled)
  595. return 0;
  596. dbgdir = debugfs_create_dir("debug_objects", NULL);
  597. if (!dbgdir)
  598. return -ENOMEM;
  599. dbgstats = debugfs_create_file("stats", 0444, dbgdir, NULL,
  600. &debug_stats_fops);
  601. if (!dbgstats)
  602. goto err;
  603. return 0;
  604. err:
  605. debugfs_remove(dbgdir);
  606. return -ENOMEM;
  607. }
  608. __initcall(debug_objects_init_debugfs);
  609. #else
  610. static inline void debug_objects_init_debugfs(void) { }
  611. #endif
  612. #ifdef CONFIG_DEBUG_OBJECTS_SELFTEST
  613. /* Random data structure for the self test */
  614. struct self_test {
  615. unsigned long dummy1[6];
  616. int static_init;
  617. unsigned long dummy2[3];
  618. };
  619. static __initdata struct debug_obj_descr descr_type_test;
  620. /*
  621. * fixup_init is called when:
  622. * - an active object is initialized
  623. */
  624. static int __init fixup_init(void *addr, enum debug_obj_state state)
  625. {
  626. struct self_test *obj = addr;
  627. switch (state) {
  628. case ODEBUG_STATE_ACTIVE:
  629. debug_object_deactivate(obj, &descr_type_test);
  630. debug_object_init(obj, &descr_type_test);
  631. return 1;
  632. default:
  633. return 0;
  634. }
  635. }
  636. /*
  637. * fixup_activate is called when:
  638. * - an active object is activated
  639. * - an unknown object is activated (might be a statically initialized object)
  640. */
  641. static int __init fixup_activate(void *addr, enum debug_obj_state state)
  642. {
  643. struct self_test *obj = addr;
  644. switch (state) {
  645. case ODEBUG_STATE_NOTAVAILABLE:
  646. if (obj->static_init == 1) {
  647. debug_object_init(obj, &descr_type_test);
  648. debug_object_activate(obj, &descr_type_test);
  649. /*
  650. * Real code should return 0 here ! This is
  651. * not a fixup of some bad behaviour. We
  652. * merily call the debug_init function to keep
  653. * track of the object.
  654. */
  655. return 1;
  656. } else {
  657. /* Real code needs to emit a warning here */
  658. }
  659. return 0;
  660. case ODEBUG_STATE_ACTIVE:
  661. debug_object_deactivate(obj, &descr_type_test);
  662. debug_object_activate(obj, &descr_type_test);
  663. return 1;
  664. default:
  665. return 0;
  666. }
  667. }
  668. /*
  669. * fixup_destroy is called when:
  670. * - an active object is destroyed
  671. */
  672. static int __init fixup_destroy(void *addr, enum debug_obj_state state)
  673. {
  674. struct self_test *obj = addr;
  675. switch (state) {
  676. case ODEBUG_STATE_ACTIVE:
  677. debug_object_deactivate(obj, &descr_type_test);
  678. debug_object_destroy(obj, &descr_type_test);
  679. return 1;
  680. default:
  681. return 0;
  682. }
  683. }
  684. /*
  685. * fixup_free is called when:
  686. * - an active object is freed
  687. */
  688. static int __init fixup_free(void *addr, enum debug_obj_state state)
  689. {
  690. struct self_test *obj = addr;
  691. switch (state) {
  692. case ODEBUG_STATE_ACTIVE:
  693. debug_object_deactivate(obj, &descr_type_test);
  694. debug_object_free(obj, &descr_type_test);
  695. return 1;
  696. default:
  697. return 0;
  698. }
  699. }
  700. static int __init
  701. check_results(void *addr, enum debug_obj_state state, int fixups, int warnings)
  702. {
  703. struct debug_bucket *db;
  704. struct debug_obj *obj;
  705. unsigned long flags;
  706. int res = -EINVAL;
  707. db = get_bucket((unsigned long) addr);
  708. raw_spin_lock_irqsave(&db->lock, flags);
  709. obj = lookup_object(addr, db);
  710. if (!obj && state != ODEBUG_STATE_NONE) {
  711. WARN(1, KERN_ERR "ODEBUG: selftest object not found\n");
  712. goto out;
  713. }
  714. if (obj && obj->state != state) {
  715. WARN(1, KERN_ERR "ODEBUG: selftest wrong state: %d != %d\n",
  716. obj->state, state);
  717. goto out;
  718. }
  719. if (fixups != debug_objects_fixups) {
  720. WARN(1, KERN_ERR "ODEBUG: selftest fixups failed %d != %d\n",
  721. fixups, debug_objects_fixups);
  722. goto out;
  723. }
  724. if (warnings != debug_objects_warnings) {
  725. WARN(1, KERN_ERR "ODEBUG: selftest warnings failed %d != %d\n",
  726. warnings, debug_objects_warnings);
  727. goto out;
  728. }
  729. res = 0;
  730. out:
  731. raw_spin_unlock_irqrestore(&db->lock, flags);
  732. if (res)
  733. debug_objects_enabled = 0;
  734. return res;
  735. }
  736. static __initdata struct debug_obj_descr descr_type_test = {
  737. .name = "selftest",
  738. .fixup_init = fixup_init,
  739. .fixup_activate = fixup_activate,
  740. .fixup_destroy = fixup_destroy,
  741. .fixup_free = fixup_free,
  742. };
  743. static __initdata struct self_test obj = { .static_init = 0 };
  744. static void __init debug_objects_selftest(void)
  745. {
  746. int fixups, oldfixups, warnings, oldwarnings;
  747. unsigned long flags;
  748. local_irq_save(flags);
  749. fixups = oldfixups = debug_objects_fixups;
  750. warnings = oldwarnings = debug_objects_warnings;
  751. descr_test = &descr_type_test;
  752. debug_object_init(&obj, &descr_type_test);
  753. if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings))
  754. goto out;
  755. debug_object_activate(&obj, &descr_type_test);
  756. if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
  757. goto out;
  758. debug_object_activate(&obj, &descr_type_test);
  759. if (check_results(&obj, ODEBUG_STATE_ACTIVE, ++fixups, ++warnings))
  760. goto out;
  761. debug_object_deactivate(&obj, &descr_type_test);
  762. if (check_results(&obj, ODEBUG_STATE_INACTIVE, fixups, warnings))
  763. goto out;
  764. debug_object_destroy(&obj, &descr_type_test);
  765. if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, warnings))
  766. goto out;
  767. debug_object_init(&obj, &descr_type_test);
  768. if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
  769. goto out;
  770. debug_object_activate(&obj, &descr_type_test);
  771. if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
  772. goto out;
  773. debug_object_deactivate(&obj, &descr_type_test);
  774. if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
  775. goto out;
  776. debug_object_free(&obj, &descr_type_test);
  777. if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings))
  778. goto out;
  779. obj.static_init = 1;
  780. debug_object_activate(&obj, &descr_type_test);
  781. if (check_results(&obj, ODEBUG_STATE_ACTIVE, ++fixups, warnings))
  782. goto out;
  783. debug_object_init(&obj, &descr_type_test);
  784. if (check_results(&obj, ODEBUG_STATE_INIT, ++fixups, ++warnings))
  785. goto out;
  786. debug_object_free(&obj, &descr_type_test);
  787. if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings))
  788. goto out;
  789. #ifdef CONFIG_DEBUG_OBJECTS_FREE
  790. debug_object_init(&obj, &descr_type_test);
  791. if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings))
  792. goto out;
  793. debug_object_activate(&obj, &descr_type_test);
  794. if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
  795. goto out;
  796. __debug_check_no_obj_freed(&obj, sizeof(obj));
  797. if (check_results(&obj, ODEBUG_STATE_NONE, ++fixups, ++warnings))
  798. goto out;
  799. #endif
  800. printk(KERN_INFO "ODEBUG: selftest passed\n");
  801. out:
  802. debug_objects_fixups = oldfixups;
  803. debug_objects_warnings = oldwarnings;
  804. descr_test = NULL;
  805. local_irq_restore(flags);
  806. }
  807. #else
  808. static inline void debug_objects_selftest(void) { }
  809. #endif
  810. /*
  811. * Called during early boot to initialize the hash buckets and link
  812. * the static object pool objects into the poll list. After this call
  813. * the object tracker is fully operational.
  814. */
  815. void __init debug_objects_early_init(void)
  816. {
  817. int i;
  818. for (i = 0; i < ODEBUG_HASH_SIZE; i++)
  819. raw_spin_lock_init(&obj_hash[i].lock);
  820. for (i = 0; i < ODEBUG_POOL_SIZE; i++)
  821. hlist_add_head(&obj_static_pool[i].node, &obj_pool);
  822. }
  823. /*
  824. * Convert the statically allocated objects to dynamic ones:
  825. */
  826. static int __init debug_objects_replace_static_objects(void)
  827. {
  828. struct debug_bucket *db = obj_hash;
  829. struct hlist_node *node, *tmp;
  830. struct debug_obj *obj, *new;
  831. HLIST_HEAD(objects);
  832. int i, cnt = 0;
  833. for (i = 0; i < ODEBUG_POOL_SIZE; i++) {
  834. obj = kmem_cache_zalloc(obj_cache, GFP_KERNEL);
  835. if (!obj)
  836. goto free;
  837. hlist_add_head(&obj->node, &objects);
  838. }
  839. /*
  840. * When debug_objects_mem_init() is called we know that only
  841. * one CPU is up, so disabling interrupts is enough
  842. * protection. This avoids the lockdep hell of lock ordering.
  843. */
  844. local_irq_disable();
  845. /* Remove the statically allocated objects from the pool */
  846. hlist_for_each_entry_safe(obj, node, tmp, &obj_pool, node)
  847. hlist_del(&obj->node);
  848. /* Move the allocated objects to the pool */
  849. hlist_move_list(&objects, &obj_pool);
  850. /* Replace the active object references */
  851. for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
  852. hlist_move_list(&db->list, &objects);
  853. hlist_for_each_entry(obj, node, &objects, node) {
  854. new = hlist_entry(obj_pool.first, typeof(*obj), node);
  855. hlist_del(&new->node);
  856. /* copy object data */
  857. *new = *obj;
  858. hlist_add_head(&new->node, &db->list);
  859. cnt++;
  860. }
  861. }
  862. printk(KERN_DEBUG "ODEBUG: %d of %d active objects replaced\n", cnt,
  863. obj_pool_used);
  864. local_irq_enable();
  865. return 0;
  866. free:
  867. hlist_for_each_entry_safe(obj, node, tmp, &objects, node) {
  868. hlist_del(&obj->node);
  869. kmem_cache_free(obj_cache, obj);
  870. }
  871. return -ENOMEM;
  872. }
  873. /*
  874. * Called after the kmem_caches are functional to setup a dedicated
  875. * cache pool, which has the SLAB_DEBUG_OBJECTS flag set. This flag
  876. * prevents that the debug code is called on kmem_cache_free() for the
  877. * debug tracker objects to avoid recursive calls.
  878. */
  879. void __init debug_objects_mem_init(void)
  880. {
  881. if (!debug_objects_enabled)
  882. return;
  883. obj_cache = kmem_cache_create("debug_objects_cache",
  884. sizeof (struct debug_obj), 0,
  885. SLAB_DEBUG_OBJECTS, NULL);
  886. if (!obj_cache || debug_objects_replace_static_objects()) {
  887. debug_objects_enabled = 0;
  888. if (obj_cache)
  889. kmem_cache_destroy(obj_cache);
  890. printk(KERN_WARNING "ODEBUG: out of memory.\n");
  891. } else
  892. debug_objects_selftest();
  893. }