debugobjects.c 26 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060
  1. /*
  2. * Generic infrastructure for lifetime debugging of objects.
  3. *
  4. * Started by Thomas Gleixner
  5. *
  6. * Copyright (C) 2008, Thomas Gleixner <tglx@linutronix.de>
  7. *
  8. * For licencing details see kernel-base/COPYING
  9. */
  10. #include <linux/debugobjects.h>
  11. #include <linux/interrupt.h>
  12. #include <linux/sched.h>
  13. #include <linux/seq_file.h>
  14. #include <linux/debugfs.h>
  15. #include <linux/slab.h>
  16. #include <linux/hash.h>
  17. #define ODEBUG_HASH_BITS 14
  18. #define ODEBUG_HASH_SIZE (1 << ODEBUG_HASH_BITS)
  19. #define ODEBUG_POOL_SIZE 512
  20. #define ODEBUG_POOL_MIN_LEVEL 256
  21. #define ODEBUG_CHUNK_SHIFT PAGE_SHIFT
  22. #define ODEBUG_CHUNK_SIZE (1 << ODEBUG_CHUNK_SHIFT)
  23. #define ODEBUG_CHUNK_MASK (~(ODEBUG_CHUNK_SIZE - 1))
  24. struct debug_bucket {
  25. struct hlist_head list;
  26. raw_spinlock_t lock;
  27. };
  28. static struct debug_bucket obj_hash[ODEBUG_HASH_SIZE];
  29. static struct debug_obj obj_static_pool[ODEBUG_POOL_SIZE] __initdata;
  30. static DEFINE_RAW_SPINLOCK(pool_lock);
  31. static HLIST_HEAD(obj_pool);
  32. static int obj_pool_min_free = ODEBUG_POOL_SIZE;
  33. static int obj_pool_free = ODEBUG_POOL_SIZE;
  34. static int obj_pool_used;
  35. static int obj_pool_max_used;
  36. static struct kmem_cache *obj_cache;
  37. static int debug_objects_maxchain __read_mostly;
  38. static int debug_objects_fixups __read_mostly;
  39. static int debug_objects_warnings __read_mostly;
  40. static int debug_objects_enabled __read_mostly
  41. = CONFIG_DEBUG_OBJECTS_ENABLE_DEFAULT;
  42. static struct debug_obj_descr *descr_test __read_mostly;
  43. static void free_obj_work(struct work_struct *work);
  44. static DECLARE_WORK(debug_obj_work, free_obj_work);
  45. static int __init enable_object_debug(char *str)
  46. {
  47. debug_objects_enabled = 1;
  48. return 0;
  49. }
  50. static int __init disable_object_debug(char *str)
  51. {
  52. debug_objects_enabled = 0;
  53. return 0;
  54. }
  55. early_param("debug_objects", enable_object_debug);
  56. early_param("no_debug_objects", disable_object_debug);
  57. static const char *obj_states[ODEBUG_STATE_MAX] = {
  58. [ODEBUG_STATE_NONE] = "none",
  59. [ODEBUG_STATE_INIT] = "initialized",
  60. [ODEBUG_STATE_INACTIVE] = "inactive",
  61. [ODEBUG_STATE_ACTIVE] = "active",
  62. [ODEBUG_STATE_DESTROYED] = "destroyed",
  63. [ODEBUG_STATE_NOTAVAILABLE] = "not available",
  64. };
  65. static int fill_pool(void)
  66. {
  67. gfp_t gfp = GFP_ATOMIC | __GFP_NORETRY | __GFP_NOWARN;
  68. struct debug_obj *new;
  69. unsigned long flags;
  70. if (likely(obj_pool_free >= ODEBUG_POOL_MIN_LEVEL))
  71. return obj_pool_free;
  72. if (unlikely(!obj_cache))
  73. return obj_pool_free;
  74. while (obj_pool_free < ODEBUG_POOL_MIN_LEVEL) {
  75. new = kmem_cache_zalloc(obj_cache, gfp);
  76. if (!new)
  77. return obj_pool_free;
  78. raw_spin_lock_irqsave(&pool_lock, flags);
  79. hlist_add_head(&new->node, &obj_pool);
  80. obj_pool_free++;
  81. raw_spin_unlock_irqrestore(&pool_lock, flags);
  82. }
  83. return obj_pool_free;
  84. }
  85. /*
  86. * Lookup an object in the hash bucket.
  87. */
  88. static struct debug_obj *lookup_object(void *addr, struct debug_bucket *b)
  89. {
  90. struct hlist_node *node;
  91. struct debug_obj *obj;
  92. int cnt = 0;
  93. hlist_for_each_entry(obj, node, &b->list, node) {
  94. cnt++;
  95. if (obj->object == addr)
  96. return obj;
  97. }
  98. if (cnt > debug_objects_maxchain)
  99. debug_objects_maxchain = cnt;
  100. return NULL;
  101. }
  102. /*
  103. * Allocate a new object. If the pool is empty, switch off the debugger.
  104. * Must be called with interrupts disabled.
  105. */
  106. static struct debug_obj *
  107. alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr)
  108. {
  109. struct debug_obj *obj = NULL;
  110. raw_spin_lock(&pool_lock);
  111. if (obj_pool.first) {
  112. obj = hlist_entry(obj_pool.first, typeof(*obj), node);
  113. obj->object = addr;
  114. obj->descr = descr;
  115. obj->state = ODEBUG_STATE_NONE;
  116. obj->astate = 0;
  117. hlist_del(&obj->node);
  118. hlist_add_head(&obj->node, &b->list);
  119. obj_pool_used++;
  120. if (obj_pool_used > obj_pool_max_used)
  121. obj_pool_max_used = obj_pool_used;
  122. obj_pool_free--;
  123. if (obj_pool_free < obj_pool_min_free)
  124. obj_pool_min_free = obj_pool_free;
  125. }
  126. raw_spin_unlock(&pool_lock);
  127. return obj;
  128. }
  129. /*
  130. * workqueue function to free objects.
  131. */
  132. static void free_obj_work(struct work_struct *work)
  133. {
  134. struct debug_obj *obj;
  135. unsigned long flags;
  136. raw_spin_lock_irqsave(&pool_lock, flags);
  137. while (obj_pool_free > ODEBUG_POOL_SIZE) {
  138. obj = hlist_entry(obj_pool.first, typeof(*obj), node);
  139. hlist_del(&obj->node);
  140. obj_pool_free--;
  141. /*
  142. * We release pool_lock across kmem_cache_free() to
  143. * avoid contention on pool_lock.
  144. */
  145. raw_spin_unlock_irqrestore(&pool_lock, flags);
  146. kmem_cache_free(obj_cache, obj);
  147. raw_spin_lock_irqsave(&pool_lock, flags);
  148. }
  149. raw_spin_unlock_irqrestore(&pool_lock, flags);
  150. }
  151. /*
  152. * Put the object back into the pool and schedule work to free objects
  153. * if necessary.
  154. */
  155. static void free_object(struct debug_obj *obj)
  156. {
  157. unsigned long flags;
  158. int sched = 0;
  159. raw_spin_lock_irqsave(&pool_lock, flags);
  160. /*
  161. * schedule work when the pool is filled and the cache is
  162. * initialized:
  163. */
  164. if (obj_pool_free > ODEBUG_POOL_SIZE && obj_cache)
  165. sched = keventd_up() && !work_pending(&debug_obj_work);
  166. hlist_add_head(&obj->node, &obj_pool);
  167. obj_pool_free++;
  168. obj_pool_used--;
  169. raw_spin_unlock_irqrestore(&pool_lock, flags);
  170. if (sched)
  171. schedule_work(&debug_obj_work);
  172. }
  173. /*
  174. * We run out of memory. That means we probably have tons of objects
  175. * allocated.
  176. */
  177. static void debug_objects_oom(void)
  178. {
  179. struct debug_bucket *db = obj_hash;
  180. struct hlist_node *node, *tmp;
  181. HLIST_HEAD(freelist);
  182. struct debug_obj *obj;
  183. unsigned long flags;
  184. int i;
  185. printk(KERN_WARNING "ODEBUG: Out of memory. ODEBUG disabled\n");
  186. for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
  187. raw_spin_lock_irqsave(&db->lock, flags);
  188. hlist_move_list(&db->list, &freelist);
  189. raw_spin_unlock_irqrestore(&db->lock, flags);
  190. /* Now free them */
  191. hlist_for_each_entry_safe(obj, node, tmp, &freelist, node) {
  192. hlist_del(&obj->node);
  193. free_object(obj);
  194. }
  195. }
  196. }
  197. /*
  198. * We use the pfn of the address for the hash. That way we can check
  199. * for freed objects simply by checking the affected bucket.
  200. */
  201. static struct debug_bucket *get_bucket(unsigned long addr)
  202. {
  203. unsigned long hash;
  204. hash = hash_long((addr >> ODEBUG_CHUNK_SHIFT), ODEBUG_HASH_BITS);
  205. return &obj_hash[hash];
  206. }
  207. static void debug_print_object(struct debug_obj *obj, char *msg)
  208. {
  209. struct debug_obj_descr *descr = obj->descr;
  210. static int limit;
  211. if (limit < 5 && descr != descr_test) {
  212. void *hint = descr->debug_hint ?
  213. descr->debug_hint(obj->object) : NULL;
  214. limit++;
  215. WARN(1, KERN_ERR "ODEBUG: %s %s (active state %u) "
  216. "object type: %s hint: %pS\n",
  217. msg, obj_states[obj->state], obj->astate,
  218. descr->name, hint);
  219. }
  220. debug_objects_warnings++;
  221. }
  222. /*
  223. * Try to repair the damage, so we have a better chance to get useful
  224. * debug output.
  225. */
  226. static int
  227. debug_object_fixup(int (*fixup)(void *addr, enum debug_obj_state state),
  228. void * addr, enum debug_obj_state state)
  229. {
  230. int fixed = 0;
  231. if (fixup)
  232. fixed = fixup(addr, state);
  233. debug_objects_fixups += fixed;
  234. return fixed;
  235. }
  236. static void debug_object_is_on_stack(void *addr, int onstack)
  237. {
  238. int is_on_stack;
  239. static int limit;
  240. if (limit > 4)
  241. return;
  242. is_on_stack = object_is_on_stack(addr);
  243. if (is_on_stack == onstack)
  244. return;
  245. limit++;
  246. if (is_on_stack)
  247. printk(KERN_WARNING
  248. "ODEBUG: object is on stack, but not annotated\n");
  249. else
  250. printk(KERN_WARNING
  251. "ODEBUG: object is not on stack, but annotated\n");
  252. WARN_ON(1);
  253. }
  254. static void
  255. __debug_object_init(void *addr, struct debug_obj_descr *descr, int onstack)
  256. {
  257. enum debug_obj_state state;
  258. struct debug_bucket *db;
  259. struct debug_obj *obj;
  260. unsigned long flags;
  261. fill_pool();
  262. db = get_bucket((unsigned long) addr);
  263. raw_spin_lock_irqsave(&db->lock, flags);
  264. obj = lookup_object(addr, db);
  265. if (!obj) {
  266. obj = alloc_object(addr, db, descr);
  267. if (!obj) {
  268. debug_objects_enabled = 0;
  269. raw_spin_unlock_irqrestore(&db->lock, flags);
  270. debug_objects_oom();
  271. return;
  272. }
  273. debug_object_is_on_stack(addr, onstack);
  274. }
  275. switch (obj->state) {
  276. case ODEBUG_STATE_NONE:
  277. case ODEBUG_STATE_INIT:
  278. case ODEBUG_STATE_INACTIVE:
  279. obj->state = ODEBUG_STATE_INIT;
  280. break;
  281. case ODEBUG_STATE_ACTIVE:
  282. debug_print_object(obj, "init");
  283. state = obj->state;
  284. raw_spin_unlock_irqrestore(&db->lock, flags);
  285. debug_object_fixup(descr->fixup_init, addr, state);
  286. return;
  287. case ODEBUG_STATE_DESTROYED:
  288. debug_print_object(obj, "init");
  289. break;
  290. default:
  291. break;
  292. }
  293. raw_spin_unlock_irqrestore(&db->lock, flags);
  294. }
  295. /**
  296. * debug_object_init - debug checks when an object is initialized
  297. * @addr: address of the object
  298. * @descr: pointer to an object specific debug description structure
  299. */
  300. void debug_object_init(void *addr, struct debug_obj_descr *descr)
  301. {
  302. if (!debug_objects_enabled)
  303. return;
  304. __debug_object_init(addr, descr, 0);
  305. }
  306. /**
  307. * debug_object_init_on_stack - debug checks when an object on stack is
  308. * initialized
  309. * @addr: address of the object
  310. * @descr: pointer to an object specific debug description structure
  311. */
  312. void debug_object_init_on_stack(void *addr, struct debug_obj_descr *descr)
  313. {
  314. if (!debug_objects_enabled)
  315. return;
  316. __debug_object_init(addr, descr, 1);
  317. }
  318. /**
  319. * debug_object_activate - debug checks when an object is activated
  320. * @addr: address of the object
  321. * @descr: pointer to an object specific debug description structure
  322. */
  323. void debug_object_activate(void *addr, struct debug_obj_descr *descr)
  324. {
  325. enum debug_obj_state state;
  326. struct debug_bucket *db;
  327. struct debug_obj *obj;
  328. unsigned long flags;
  329. struct debug_obj o = { .object = addr,
  330. .state = ODEBUG_STATE_NOTAVAILABLE,
  331. .descr = descr };
  332. if (!debug_objects_enabled)
  333. return;
  334. db = get_bucket((unsigned long) addr);
  335. raw_spin_lock_irqsave(&db->lock, flags);
  336. obj = lookup_object(addr, db);
  337. if (obj) {
  338. switch (obj->state) {
  339. case ODEBUG_STATE_INIT:
  340. case ODEBUG_STATE_INACTIVE:
  341. obj->state = ODEBUG_STATE_ACTIVE;
  342. break;
  343. case ODEBUG_STATE_ACTIVE:
  344. debug_print_object(obj, "activate");
  345. state = obj->state;
  346. raw_spin_unlock_irqrestore(&db->lock, flags);
  347. debug_object_fixup(descr->fixup_activate, addr, state);
  348. return;
  349. case ODEBUG_STATE_DESTROYED:
  350. debug_print_object(obj, "activate");
  351. break;
  352. default:
  353. break;
  354. }
  355. raw_spin_unlock_irqrestore(&db->lock, flags);
  356. return;
  357. }
  358. raw_spin_unlock_irqrestore(&db->lock, flags);
  359. /*
  360. * This happens when a static object is activated. We
  361. * let the type specific code decide whether this is
  362. * true or not.
  363. */
  364. if (debug_object_fixup(descr->fixup_activate, addr,
  365. ODEBUG_STATE_NOTAVAILABLE))
  366. debug_print_object(&o, "activate");
  367. }
  368. /**
  369. * debug_object_deactivate - debug checks when an object is deactivated
  370. * @addr: address of the object
  371. * @descr: pointer to an object specific debug description structure
  372. */
  373. void debug_object_deactivate(void *addr, struct debug_obj_descr *descr)
  374. {
  375. struct debug_bucket *db;
  376. struct debug_obj *obj;
  377. unsigned long flags;
  378. if (!debug_objects_enabled)
  379. return;
  380. db = get_bucket((unsigned long) addr);
  381. raw_spin_lock_irqsave(&db->lock, flags);
  382. obj = lookup_object(addr, db);
  383. if (obj) {
  384. switch (obj->state) {
  385. case ODEBUG_STATE_INIT:
  386. case ODEBUG_STATE_INACTIVE:
  387. case ODEBUG_STATE_ACTIVE:
  388. if (!obj->astate)
  389. obj->state = ODEBUG_STATE_INACTIVE;
  390. else
  391. debug_print_object(obj, "deactivate");
  392. break;
  393. case ODEBUG_STATE_DESTROYED:
  394. debug_print_object(obj, "deactivate");
  395. break;
  396. default:
  397. break;
  398. }
  399. } else {
  400. struct debug_obj o = { .object = addr,
  401. .state = ODEBUG_STATE_NOTAVAILABLE,
  402. .descr = descr };
  403. debug_print_object(&o, "deactivate");
  404. }
  405. raw_spin_unlock_irqrestore(&db->lock, flags);
  406. }
  407. /**
  408. * debug_object_destroy - debug checks when an object is destroyed
  409. * @addr: address of the object
  410. * @descr: pointer to an object specific debug description structure
  411. */
  412. void debug_object_destroy(void *addr, struct debug_obj_descr *descr)
  413. {
  414. enum debug_obj_state state;
  415. struct debug_bucket *db;
  416. struct debug_obj *obj;
  417. unsigned long flags;
  418. if (!debug_objects_enabled)
  419. return;
  420. db = get_bucket((unsigned long) addr);
  421. raw_spin_lock_irqsave(&db->lock, flags);
  422. obj = lookup_object(addr, db);
  423. if (!obj)
  424. goto out_unlock;
  425. switch (obj->state) {
  426. case ODEBUG_STATE_NONE:
  427. case ODEBUG_STATE_INIT:
  428. case ODEBUG_STATE_INACTIVE:
  429. obj->state = ODEBUG_STATE_DESTROYED;
  430. break;
  431. case ODEBUG_STATE_ACTIVE:
  432. debug_print_object(obj, "destroy");
  433. state = obj->state;
  434. raw_spin_unlock_irqrestore(&db->lock, flags);
  435. debug_object_fixup(descr->fixup_destroy, addr, state);
  436. return;
  437. case ODEBUG_STATE_DESTROYED:
  438. debug_print_object(obj, "destroy");
  439. break;
  440. default:
  441. break;
  442. }
  443. out_unlock:
  444. raw_spin_unlock_irqrestore(&db->lock, flags);
  445. }
  446. /**
  447. * debug_object_free - debug checks when an object is freed
  448. * @addr: address of the object
  449. * @descr: pointer to an object specific debug description structure
  450. */
  451. void debug_object_free(void *addr, struct debug_obj_descr *descr)
  452. {
  453. enum debug_obj_state state;
  454. struct debug_bucket *db;
  455. struct debug_obj *obj;
  456. unsigned long flags;
  457. if (!debug_objects_enabled)
  458. return;
  459. db = get_bucket((unsigned long) addr);
  460. raw_spin_lock_irqsave(&db->lock, flags);
  461. obj = lookup_object(addr, db);
  462. if (!obj)
  463. goto out_unlock;
  464. switch (obj->state) {
  465. case ODEBUG_STATE_ACTIVE:
  466. debug_print_object(obj, "free");
  467. state = obj->state;
  468. raw_spin_unlock_irqrestore(&db->lock, flags);
  469. debug_object_fixup(descr->fixup_free, addr, state);
  470. return;
  471. default:
  472. hlist_del(&obj->node);
  473. raw_spin_unlock_irqrestore(&db->lock, flags);
  474. free_object(obj);
  475. return;
  476. }
  477. out_unlock:
  478. raw_spin_unlock_irqrestore(&db->lock, flags);
  479. }
  480. /**
  481. * debug_object_active_state - debug checks object usage state machine
  482. * @addr: address of the object
  483. * @descr: pointer to an object specific debug description structure
  484. * @expect: expected state
  485. * @next: state to move to if expected state is found
  486. */
  487. void
  488. debug_object_active_state(void *addr, struct debug_obj_descr *descr,
  489. unsigned int expect, unsigned int next)
  490. {
  491. struct debug_bucket *db;
  492. struct debug_obj *obj;
  493. unsigned long flags;
  494. if (!debug_objects_enabled)
  495. return;
  496. db = get_bucket((unsigned long) addr);
  497. raw_spin_lock_irqsave(&db->lock, flags);
  498. obj = lookup_object(addr, db);
  499. if (obj) {
  500. switch (obj->state) {
  501. case ODEBUG_STATE_ACTIVE:
  502. if (obj->astate == expect)
  503. obj->astate = next;
  504. else
  505. debug_print_object(obj, "active_state");
  506. break;
  507. default:
  508. debug_print_object(obj, "active_state");
  509. break;
  510. }
  511. } else {
  512. struct debug_obj o = { .object = addr,
  513. .state = ODEBUG_STATE_NOTAVAILABLE,
  514. .descr = descr };
  515. debug_print_object(&o, "active_state");
  516. }
  517. raw_spin_unlock_irqrestore(&db->lock, flags);
  518. }
  519. #ifdef CONFIG_DEBUG_OBJECTS_FREE
  520. static void __debug_check_no_obj_freed(const void *address, unsigned long size)
  521. {
  522. unsigned long flags, oaddr, saddr, eaddr, paddr, chunks;
  523. struct hlist_node *node, *tmp;
  524. HLIST_HEAD(freelist);
  525. struct debug_obj_descr *descr;
  526. enum debug_obj_state state;
  527. struct debug_bucket *db;
  528. struct debug_obj *obj;
  529. int cnt;
  530. saddr = (unsigned long) address;
  531. eaddr = saddr + size;
  532. paddr = saddr & ODEBUG_CHUNK_MASK;
  533. chunks = ((eaddr - paddr) + (ODEBUG_CHUNK_SIZE - 1));
  534. chunks >>= ODEBUG_CHUNK_SHIFT;
  535. for (;chunks > 0; chunks--, paddr += ODEBUG_CHUNK_SIZE) {
  536. db = get_bucket(paddr);
  537. repeat:
  538. cnt = 0;
  539. raw_spin_lock_irqsave(&db->lock, flags);
  540. hlist_for_each_entry_safe(obj, node, tmp, &db->list, node) {
  541. cnt++;
  542. oaddr = (unsigned long) obj->object;
  543. if (oaddr < saddr || oaddr >= eaddr)
  544. continue;
  545. switch (obj->state) {
  546. case ODEBUG_STATE_ACTIVE:
  547. debug_print_object(obj, "free");
  548. descr = obj->descr;
  549. state = obj->state;
  550. raw_spin_unlock_irqrestore(&db->lock, flags);
  551. debug_object_fixup(descr->fixup_free,
  552. (void *) oaddr, state);
  553. goto repeat;
  554. default:
  555. hlist_del(&obj->node);
  556. hlist_add_head(&obj->node, &freelist);
  557. break;
  558. }
  559. }
  560. raw_spin_unlock_irqrestore(&db->lock, flags);
  561. /* Now free them */
  562. hlist_for_each_entry_safe(obj, node, tmp, &freelist, node) {
  563. hlist_del(&obj->node);
  564. free_object(obj);
  565. }
  566. if (cnt > debug_objects_maxchain)
  567. debug_objects_maxchain = cnt;
  568. }
  569. }
  570. void debug_check_no_obj_freed(const void *address, unsigned long size)
  571. {
  572. if (debug_objects_enabled)
  573. __debug_check_no_obj_freed(address, size);
  574. }
  575. #endif
  576. #ifdef CONFIG_DEBUG_FS
  577. static int debug_stats_show(struct seq_file *m, void *v)
  578. {
  579. seq_printf(m, "max_chain :%d\n", debug_objects_maxchain);
  580. seq_printf(m, "warnings :%d\n", debug_objects_warnings);
  581. seq_printf(m, "fixups :%d\n", debug_objects_fixups);
  582. seq_printf(m, "pool_free :%d\n", obj_pool_free);
  583. seq_printf(m, "pool_min_free :%d\n", obj_pool_min_free);
  584. seq_printf(m, "pool_used :%d\n", obj_pool_used);
  585. seq_printf(m, "pool_max_used :%d\n", obj_pool_max_used);
  586. return 0;
  587. }
  588. static int debug_stats_open(struct inode *inode, struct file *filp)
  589. {
  590. return single_open(filp, debug_stats_show, NULL);
  591. }
  592. static const struct file_operations debug_stats_fops = {
  593. .open = debug_stats_open,
  594. .read = seq_read,
  595. .llseek = seq_lseek,
  596. .release = single_release,
  597. };
  598. static int __init debug_objects_init_debugfs(void)
  599. {
  600. struct dentry *dbgdir, *dbgstats;
  601. if (!debug_objects_enabled)
  602. return 0;
  603. dbgdir = debugfs_create_dir("debug_objects", NULL);
  604. if (!dbgdir)
  605. return -ENOMEM;
  606. dbgstats = debugfs_create_file("stats", 0444, dbgdir, NULL,
  607. &debug_stats_fops);
  608. if (!dbgstats)
  609. goto err;
  610. return 0;
  611. err:
  612. debugfs_remove(dbgdir);
  613. return -ENOMEM;
  614. }
  615. __initcall(debug_objects_init_debugfs);
  616. #else
  617. static inline void debug_objects_init_debugfs(void) { }
  618. #endif
  619. #ifdef CONFIG_DEBUG_OBJECTS_SELFTEST
  620. /* Random data structure for the self test */
  621. struct self_test {
  622. unsigned long dummy1[6];
  623. int static_init;
  624. unsigned long dummy2[3];
  625. };
  626. static __initdata struct debug_obj_descr descr_type_test;
  627. /*
  628. * fixup_init is called when:
  629. * - an active object is initialized
  630. */
  631. static int __init fixup_init(void *addr, enum debug_obj_state state)
  632. {
  633. struct self_test *obj = addr;
  634. switch (state) {
  635. case ODEBUG_STATE_ACTIVE:
  636. debug_object_deactivate(obj, &descr_type_test);
  637. debug_object_init(obj, &descr_type_test);
  638. return 1;
  639. default:
  640. return 0;
  641. }
  642. }
  643. /*
  644. * fixup_activate is called when:
  645. * - an active object is activated
  646. * - an unknown object is activated (might be a statically initialized object)
  647. */
  648. static int __init fixup_activate(void *addr, enum debug_obj_state state)
  649. {
  650. struct self_test *obj = addr;
  651. switch (state) {
  652. case ODEBUG_STATE_NOTAVAILABLE:
  653. if (obj->static_init == 1) {
  654. debug_object_init(obj, &descr_type_test);
  655. debug_object_activate(obj, &descr_type_test);
  656. /*
  657. * Real code should return 0 here ! This is
  658. * not a fixup of some bad behaviour. We
  659. * merily call the debug_init function to keep
  660. * track of the object.
  661. */
  662. return 1;
  663. } else {
  664. /* Real code needs to emit a warning here */
  665. }
  666. return 0;
  667. case ODEBUG_STATE_ACTIVE:
  668. debug_object_deactivate(obj, &descr_type_test);
  669. debug_object_activate(obj, &descr_type_test);
  670. return 1;
  671. default:
  672. return 0;
  673. }
  674. }
  675. /*
  676. * fixup_destroy is called when:
  677. * - an active object is destroyed
  678. */
  679. static int __init fixup_destroy(void *addr, enum debug_obj_state state)
  680. {
  681. struct self_test *obj = addr;
  682. switch (state) {
  683. case ODEBUG_STATE_ACTIVE:
  684. debug_object_deactivate(obj, &descr_type_test);
  685. debug_object_destroy(obj, &descr_type_test);
  686. return 1;
  687. default:
  688. return 0;
  689. }
  690. }
  691. /*
  692. * fixup_free is called when:
  693. * - an active object is freed
  694. */
  695. static int __init fixup_free(void *addr, enum debug_obj_state state)
  696. {
  697. struct self_test *obj = addr;
  698. switch (state) {
  699. case ODEBUG_STATE_ACTIVE:
  700. debug_object_deactivate(obj, &descr_type_test);
  701. debug_object_free(obj, &descr_type_test);
  702. return 1;
  703. default:
  704. return 0;
  705. }
  706. }
  707. static int __init
  708. check_results(void *addr, enum debug_obj_state state, int fixups, int warnings)
  709. {
  710. struct debug_bucket *db;
  711. struct debug_obj *obj;
  712. unsigned long flags;
  713. int res = -EINVAL;
  714. db = get_bucket((unsigned long) addr);
  715. raw_spin_lock_irqsave(&db->lock, flags);
  716. obj = lookup_object(addr, db);
  717. if (!obj && state != ODEBUG_STATE_NONE) {
  718. WARN(1, KERN_ERR "ODEBUG: selftest object not found\n");
  719. goto out;
  720. }
  721. if (obj && obj->state != state) {
  722. WARN(1, KERN_ERR "ODEBUG: selftest wrong state: %d != %d\n",
  723. obj->state, state);
  724. goto out;
  725. }
  726. if (fixups != debug_objects_fixups) {
  727. WARN(1, KERN_ERR "ODEBUG: selftest fixups failed %d != %d\n",
  728. fixups, debug_objects_fixups);
  729. goto out;
  730. }
  731. if (warnings != debug_objects_warnings) {
  732. WARN(1, KERN_ERR "ODEBUG: selftest warnings failed %d != %d\n",
  733. warnings, debug_objects_warnings);
  734. goto out;
  735. }
  736. res = 0;
  737. out:
  738. raw_spin_unlock_irqrestore(&db->lock, flags);
  739. if (res)
  740. debug_objects_enabled = 0;
  741. return res;
  742. }
  743. static __initdata struct debug_obj_descr descr_type_test = {
  744. .name = "selftest",
  745. .fixup_init = fixup_init,
  746. .fixup_activate = fixup_activate,
  747. .fixup_destroy = fixup_destroy,
  748. .fixup_free = fixup_free,
  749. };
  750. static __initdata struct self_test obj = { .static_init = 0 };
  751. static void __init debug_objects_selftest(void)
  752. {
  753. int fixups, oldfixups, warnings, oldwarnings;
  754. unsigned long flags;
  755. local_irq_save(flags);
  756. fixups = oldfixups = debug_objects_fixups;
  757. warnings = oldwarnings = debug_objects_warnings;
  758. descr_test = &descr_type_test;
  759. debug_object_init(&obj, &descr_type_test);
  760. if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings))
  761. goto out;
  762. debug_object_activate(&obj, &descr_type_test);
  763. if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
  764. goto out;
  765. debug_object_activate(&obj, &descr_type_test);
  766. if (check_results(&obj, ODEBUG_STATE_ACTIVE, ++fixups, ++warnings))
  767. goto out;
  768. debug_object_deactivate(&obj, &descr_type_test);
  769. if (check_results(&obj, ODEBUG_STATE_INACTIVE, fixups, warnings))
  770. goto out;
  771. debug_object_destroy(&obj, &descr_type_test);
  772. if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, warnings))
  773. goto out;
  774. debug_object_init(&obj, &descr_type_test);
  775. if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
  776. goto out;
  777. debug_object_activate(&obj, &descr_type_test);
  778. if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
  779. goto out;
  780. debug_object_deactivate(&obj, &descr_type_test);
  781. if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
  782. goto out;
  783. debug_object_free(&obj, &descr_type_test);
  784. if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings))
  785. goto out;
  786. obj.static_init = 1;
  787. debug_object_activate(&obj, &descr_type_test);
  788. if (check_results(&obj, ODEBUG_STATE_ACTIVE, ++fixups, warnings))
  789. goto out;
  790. debug_object_init(&obj, &descr_type_test);
  791. if (check_results(&obj, ODEBUG_STATE_INIT, ++fixups, ++warnings))
  792. goto out;
  793. debug_object_free(&obj, &descr_type_test);
  794. if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings))
  795. goto out;
  796. #ifdef CONFIG_DEBUG_OBJECTS_FREE
  797. debug_object_init(&obj, &descr_type_test);
  798. if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings))
  799. goto out;
  800. debug_object_activate(&obj, &descr_type_test);
  801. if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
  802. goto out;
  803. __debug_check_no_obj_freed(&obj, sizeof(obj));
  804. if (check_results(&obj, ODEBUG_STATE_NONE, ++fixups, ++warnings))
  805. goto out;
  806. #endif
  807. printk(KERN_INFO "ODEBUG: selftest passed\n");
  808. out:
  809. debug_objects_fixups = oldfixups;
  810. debug_objects_warnings = oldwarnings;
  811. descr_test = NULL;
  812. local_irq_restore(flags);
  813. }
  814. #else
  815. static inline void debug_objects_selftest(void) { }
  816. #endif
  817. /*
  818. * Called during early boot to initialize the hash buckets and link
  819. * the static object pool objects into the poll list. After this call
  820. * the object tracker is fully operational.
  821. */
  822. void __init debug_objects_early_init(void)
  823. {
  824. int i;
  825. for (i = 0; i < ODEBUG_HASH_SIZE; i++)
  826. raw_spin_lock_init(&obj_hash[i].lock);
  827. for (i = 0; i < ODEBUG_POOL_SIZE; i++)
  828. hlist_add_head(&obj_static_pool[i].node, &obj_pool);
  829. }
  830. /*
  831. * Convert the statically allocated objects to dynamic ones:
  832. */
  833. static int __init debug_objects_replace_static_objects(void)
  834. {
  835. struct debug_bucket *db = obj_hash;
  836. struct hlist_node *node, *tmp;
  837. struct debug_obj *obj, *new;
  838. HLIST_HEAD(objects);
  839. int i, cnt = 0;
  840. for (i = 0; i < ODEBUG_POOL_SIZE; i++) {
  841. obj = kmem_cache_zalloc(obj_cache, GFP_KERNEL);
  842. if (!obj)
  843. goto free;
  844. hlist_add_head(&obj->node, &objects);
  845. }
  846. /*
  847. * When debug_objects_mem_init() is called we know that only
  848. * one CPU is up, so disabling interrupts is enough
  849. * protection. This avoids the lockdep hell of lock ordering.
  850. */
  851. local_irq_disable();
  852. /* Remove the statically allocated objects from the pool */
  853. hlist_for_each_entry_safe(obj, node, tmp, &obj_pool, node)
  854. hlist_del(&obj->node);
  855. /* Move the allocated objects to the pool */
  856. hlist_move_list(&objects, &obj_pool);
  857. /* Replace the active object references */
  858. for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
  859. hlist_move_list(&db->list, &objects);
  860. hlist_for_each_entry(obj, node, &objects, node) {
  861. new = hlist_entry(obj_pool.first, typeof(*obj), node);
  862. hlist_del(&new->node);
  863. /* copy object data */
  864. *new = *obj;
  865. hlist_add_head(&new->node, &db->list);
  866. cnt++;
  867. }
  868. }
  869. printk(KERN_DEBUG "ODEBUG: %d of %d active objects replaced\n", cnt,
  870. obj_pool_used);
  871. local_irq_enable();
  872. return 0;
  873. free:
  874. hlist_for_each_entry_safe(obj, node, tmp, &objects, node) {
  875. hlist_del(&obj->node);
  876. kmem_cache_free(obj_cache, obj);
  877. }
  878. return -ENOMEM;
  879. }
  880. /*
  881. * Called after the kmem_caches are functional to setup a dedicated
  882. * cache pool, which has the SLAB_DEBUG_OBJECTS flag set. This flag
  883. * prevents that the debug code is called on kmem_cache_free() for the
  884. * debug tracker objects to avoid recursive calls.
  885. */
  886. void __init debug_objects_mem_init(void)
  887. {
  888. if (!debug_objects_enabled)
  889. return;
  890. obj_cache = kmem_cache_create("debug_objects_cache",
  891. sizeof (struct debug_obj), 0,
  892. SLAB_DEBUG_OBJECTS, NULL);
  893. if (!obj_cache || debug_objects_replace_static_objects()) {
  894. debug_objects_enabled = 0;
  895. if (obj_cache)
  896. kmem_cache_destroy(obj_cache);
  897. printk(KERN_WARNING "ODEBUG: out of memory.\n");
  898. } else
  899. debug_objects_selftest();
  900. }