debugobjects.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996
  1. /*
  2. * Generic infrastructure for lifetime debugging of objects.
  3. *
  4. * Started by Thomas Gleixner
  5. *
  6. * Copyright (C) 2008, Thomas Gleixner <tglx@linutronix.de>
  7. *
  8. * For licencing details see kernel-base/COPYING
  9. */
  10. #include <linux/debugobjects.h>
  11. #include <linux/interrupt.h>
  12. #include <linux/sched.h>
  13. #include <linux/seq_file.h>
  14. #include <linux/debugfs.h>
  15. #include <linux/slab.h>
  16. #include <linux/hash.h>
  17. #define ODEBUG_HASH_BITS 14
  18. #define ODEBUG_HASH_SIZE (1 << ODEBUG_HASH_BITS)
  19. #define ODEBUG_POOL_SIZE 512
  20. #define ODEBUG_POOL_MIN_LEVEL 256
  21. #define ODEBUG_CHUNK_SHIFT PAGE_SHIFT
  22. #define ODEBUG_CHUNK_SIZE (1 << ODEBUG_CHUNK_SHIFT)
  23. #define ODEBUG_CHUNK_MASK (~(ODEBUG_CHUNK_SIZE - 1))
  24. struct debug_bucket {
  25. struct hlist_head list;
  26. raw_spinlock_t lock;
  27. };
  28. static struct debug_bucket obj_hash[ODEBUG_HASH_SIZE];
  29. static struct debug_obj obj_static_pool[ODEBUG_POOL_SIZE] __initdata;
  30. static DEFINE_RAW_SPINLOCK(pool_lock);
  31. static HLIST_HEAD(obj_pool);
  32. static int obj_pool_min_free = ODEBUG_POOL_SIZE;
  33. static int obj_pool_free = ODEBUG_POOL_SIZE;
  34. static int obj_pool_used;
  35. static int obj_pool_max_used;
  36. static struct kmem_cache *obj_cache;
  37. static int debug_objects_maxchain __read_mostly;
  38. static int debug_objects_fixups __read_mostly;
  39. static int debug_objects_warnings __read_mostly;
  40. static int debug_objects_enabled __read_mostly
  41. = CONFIG_DEBUG_OBJECTS_ENABLE_DEFAULT;
  42. static struct debug_obj_descr *descr_test __read_mostly;
  43. static void free_obj_work(struct work_struct *work);
  44. static DECLARE_WORK(debug_obj_work, free_obj_work);
  45. static int __init enable_object_debug(char *str)
  46. {
  47. debug_objects_enabled = 1;
  48. return 0;
  49. }
  50. static int __init disable_object_debug(char *str)
  51. {
  52. debug_objects_enabled = 0;
  53. return 0;
  54. }
  55. early_param("debug_objects", enable_object_debug);
  56. early_param("no_debug_objects", disable_object_debug);
  57. static const char *obj_states[ODEBUG_STATE_MAX] = {
  58. [ODEBUG_STATE_NONE] = "none",
  59. [ODEBUG_STATE_INIT] = "initialized",
  60. [ODEBUG_STATE_INACTIVE] = "inactive",
  61. [ODEBUG_STATE_ACTIVE] = "active",
  62. [ODEBUG_STATE_DESTROYED] = "destroyed",
  63. [ODEBUG_STATE_NOTAVAILABLE] = "not available",
  64. };
  65. static int fill_pool(void)
  66. {
  67. gfp_t gfp = GFP_ATOMIC | __GFP_NORETRY | __GFP_NOWARN;
  68. struct debug_obj *new;
  69. unsigned long flags;
  70. if (likely(obj_pool_free >= ODEBUG_POOL_MIN_LEVEL))
  71. return obj_pool_free;
  72. if (unlikely(!obj_cache))
  73. return obj_pool_free;
  74. while (obj_pool_free < ODEBUG_POOL_MIN_LEVEL) {
  75. new = kmem_cache_zalloc(obj_cache, gfp);
  76. if (!new)
  77. return obj_pool_free;
  78. raw_spin_lock_irqsave(&pool_lock, flags);
  79. hlist_add_head(&new->node, &obj_pool);
  80. obj_pool_free++;
  81. raw_spin_unlock_irqrestore(&pool_lock, flags);
  82. }
  83. return obj_pool_free;
  84. }
  85. /*
  86. * Lookup an object in the hash bucket.
  87. */
  88. static struct debug_obj *lookup_object(void *addr, struct debug_bucket *b)
  89. {
  90. struct hlist_node *node;
  91. struct debug_obj *obj;
  92. int cnt = 0;
  93. hlist_for_each_entry(obj, node, &b->list, node) {
  94. cnt++;
  95. if (obj->object == addr)
  96. return obj;
  97. }
  98. if (cnt > debug_objects_maxchain)
  99. debug_objects_maxchain = cnt;
  100. return NULL;
  101. }
  102. /*
  103. * Allocate a new object. If the pool is empty, switch off the debugger.
  104. * Must be called with interrupts disabled.
  105. */
  106. static struct debug_obj *
  107. alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr)
  108. {
  109. struct debug_obj *obj = NULL;
  110. raw_spin_lock(&pool_lock);
  111. if (obj_pool.first) {
  112. obj = hlist_entry(obj_pool.first, typeof(*obj), node);
  113. obj->object = addr;
  114. obj->descr = descr;
  115. obj->state = ODEBUG_STATE_NONE;
  116. hlist_del(&obj->node);
  117. hlist_add_head(&obj->node, &b->list);
  118. obj_pool_used++;
  119. if (obj_pool_used > obj_pool_max_used)
  120. obj_pool_max_used = obj_pool_used;
  121. obj_pool_free--;
  122. if (obj_pool_free < obj_pool_min_free)
  123. obj_pool_min_free = obj_pool_free;
  124. }
  125. raw_spin_unlock(&pool_lock);
  126. return obj;
  127. }
  128. /*
  129. * workqueue function to free objects.
  130. */
  131. static void free_obj_work(struct work_struct *work)
  132. {
  133. struct debug_obj *obj;
  134. unsigned long flags;
  135. raw_spin_lock_irqsave(&pool_lock, flags);
  136. while (obj_pool_free > ODEBUG_POOL_SIZE) {
  137. obj = hlist_entry(obj_pool.first, typeof(*obj), node);
  138. hlist_del(&obj->node);
  139. obj_pool_free--;
  140. /*
  141. * We release pool_lock across kmem_cache_free() to
  142. * avoid contention on pool_lock.
  143. */
  144. raw_spin_unlock_irqrestore(&pool_lock, flags);
  145. kmem_cache_free(obj_cache, obj);
  146. raw_spin_lock_irqsave(&pool_lock, flags);
  147. }
  148. raw_spin_unlock_irqrestore(&pool_lock, flags);
  149. }
  150. /*
  151. * Put the object back into the pool and schedule work to free objects
  152. * if necessary.
  153. */
  154. static void free_object(struct debug_obj *obj)
  155. {
  156. unsigned long flags;
  157. int sched = 0;
  158. raw_spin_lock_irqsave(&pool_lock, flags);
  159. /*
  160. * schedule work when the pool is filled and the cache is
  161. * initialized:
  162. */
  163. if (obj_pool_free > ODEBUG_POOL_SIZE && obj_cache)
  164. sched = !work_pending(&debug_obj_work);
  165. hlist_add_head(&obj->node, &obj_pool);
  166. obj_pool_free++;
  167. obj_pool_used--;
  168. raw_spin_unlock_irqrestore(&pool_lock, flags);
  169. if (sched)
  170. schedule_work(&debug_obj_work);
  171. }
  172. /*
  173. * We run out of memory. That means we probably have tons of objects
  174. * allocated.
  175. */
  176. static void debug_objects_oom(void)
  177. {
  178. struct debug_bucket *db = obj_hash;
  179. struct hlist_node *node, *tmp;
  180. HLIST_HEAD(freelist);
  181. struct debug_obj *obj;
  182. unsigned long flags;
  183. int i;
  184. printk(KERN_WARNING "ODEBUG: Out of memory. ODEBUG disabled\n");
  185. for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
  186. raw_spin_lock_irqsave(&db->lock, flags);
  187. hlist_move_list(&db->list, &freelist);
  188. raw_spin_unlock_irqrestore(&db->lock, flags);
  189. /* Now free them */
  190. hlist_for_each_entry_safe(obj, node, tmp, &freelist, node) {
  191. hlist_del(&obj->node);
  192. free_object(obj);
  193. }
  194. }
  195. }
  196. /*
  197. * We use the pfn of the address for the hash. That way we can check
  198. * for freed objects simply by checking the affected bucket.
  199. */
  200. static struct debug_bucket *get_bucket(unsigned long addr)
  201. {
  202. unsigned long hash;
  203. hash = hash_long((addr >> ODEBUG_CHUNK_SHIFT), ODEBUG_HASH_BITS);
  204. return &obj_hash[hash];
  205. }
  206. static void debug_print_object(struct debug_obj *obj, char *msg)
  207. {
  208. static int limit;
  209. if (limit < 5 && obj->descr != descr_test) {
  210. limit++;
  211. WARN(1, KERN_ERR "ODEBUG: %s %s object type: %s\n", msg,
  212. obj_states[obj->state], obj->descr->name);
  213. }
  214. debug_objects_warnings++;
  215. }
  216. /*
  217. * Try to repair the damage, so we have a better chance to get useful
  218. * debug output.
  219. */
  220. static void
  221. debug_object_fixup(int (*fixup)(void *addr, enum debug_obj_state state),
  222. void * addr, enum debug_obj_state state)
  223. {
  224. if (fixup)
  225. debug_objects_fixups += fixup(addr, state);
  226. }
  227. static void debug_object_is_on_stack(void *addr, int onstack)
  228. {
  229. int is_on_stack;
  230. static int limit;
  231. if (limit > 4)
  232. return;
  233. is_on_stack = object_is_on_stack(addr);
  234. if (is_on_stack == onstack)
  235. return;
  236. limit++;
  237. if (is_on_stack)
  238. printk(KERN_WARNING
  239. "ODEBUG: object is on stack, but not annotated\n");
  240. else
  241. printk(KERN_WARNING
  242. "ODEBUG: object is not on stack, but annotated\n");
  243. WARN_ON(1);
  244. }
  245. static void
  246. __debug_object_init(void *addr, struct debug_obj_descr *descr, int onstack)
  247. {
  248. enum debug_obj_state state;
  249. struct debug_bucket *db;
  250. struct debug_obj *obj;
  251. unsigned long flags;
  252. fill_pool();
  253. db = get_bucket((unsigned long) addr);
  254. raw_spin_lock_irqsave(&db->lock, flags);
  255. obj = lookup_object(addr, db);
  256. if (!obj) {
  257. obj = alloc_object(addr, db, descr);
  258. if (!obj) {
  259. debug_objects_enabled = 0;
  260. raw_spin_unlock_irqrestore(&db->lock, flags);
  261. debug_objects_oom();
  262. return;
  263. }
  264. debug_object_is_on_stack(addr, onstack);
  265. }
  266. switch (obj->state) {
  267. case ODEBUG_STATE_NONE:
  268. case ODEBUG_STATE_INIT:
  269. case ODEBUG_STATE_INACTIVE:
  270. obj->state = ODEBUG_STATE_INIT;
  271. break;
  272. case ODEBUG_STATE_ACTIVE:
  273. debug_print_object(obj, "init");
  274. state = obj->state;
  275. raw_spin_unlock_irqrestore(&db->lock, flags);
  276. debug_object_fixup(descr->fixup_init, addr, state);
  277. return;
  278. case ODEBUG_STATE_DESTROYED:
  279. debug_print_object(obj, "init");
  280. break;
  281. default:
  282. break;
  283. }
  284. raw_spin_unlock_irqrestore(&db->lock, flags);
  285. }
  286. /**
  287. * debug_object_init - debug checks when an object is initialized
  288. * @addr: address of the object
  289. * @descr: pointer to an object specific debug description structure
  290. */
  291. void debug_object_init(void *addr, struct debug_obj_descr *descr)
  292. {
  293. if (!debug_objects_enabled)
  294. return;
  295. __debug_object_init(addr, descr, 0);
  296. }
  297. /**
  298. * debug_object_init_on_stack - debug checks when an object on stack is
  299. * initialized
  300. * @addr: address of the object
  301. * @descr: pointer to an object specific debug description structure
  302. */
  303. void debug_object_init_on_stack(void *addr, struct debug_obj_descr *descr)
  304. {
  305. if (!debug_objects_enabled)
  306. return;
  307. __debug_object_init(addr, descr, 1);
  308. }
  309. /**
  310. * debug_object_activate - debug checks when an object is activated
  311. * @addr: address of the object
  312. * @descr: pointer to an object specific debug description structure
  313. */
  314. void debug_object_activate(void *addr, struct debug_obj_descr *descr)
  315. {
  316. enum debug_obj_state state;
  317. struct debug_bucket *db;
  318. struct debug_obj *obj;
  319. unsigned long flags;
  320. if (!debug_objects_enabled)
  321. return;
  322. db = get_bucket((unsigned long) addr);
  323. raw_spin_lock_irqsave(&db->lock, flags);
  324. obj = lookup_object(addr, db);
  325. if (obj) {
  326. switch (obj->state) {
  327. case ODEBUG_STATE_INIT:
  328. case ODEBUG_STATE_INACTIVE:
  329. obj->state = ODEBUG_STATE_ACTIVE;
  330. break;
  331. case ODEBUG_STATE_ACTIVE:
  332. debug_print_object(obj, "activate");
  333. state = obj->state;
  334. raw_spin_unlock_irqrestore(&db->lock, flags);
  335. debug_object_fixup(descr->fixup_activate, addr, state);
  336. return;
  337. case ODEBUG_STATE_DESTROYED:
  338. debug_print_object(obj, "activate");
  339. break;
  340. default:
  341. break;
  342. }
  343. raw_spin_unlock_irqrestore(&db->lock, flags);
  344. return;
  345. }
  346. raw_spin_unlock_irqrestore(&db->lock, flags);
  347. /*
  348. * This happens when a static object is activated. We
  349. * let the type specific code decide whether this is
  350. * true or not.
  351. */
  352. debug_object_fixup(descr->fixup_activate, addr,
  353. ODEBUG_STATE_NOTAVAILABLE);
  354. }
  355. /**
  356. * debug_object_deactivate - debug checks when an object is deactivated
  357. * @addr: address of the object
  358. * @descr: pointer to an object specific debug description structure
  359. */
  360. void debug_object_deactivate(void *addr, struct debug_obj_descr *descr)
  361. {
  362. struct debug_bucket *db;
  363. struct debug_obj *obj;
  364. unsigned long flags;
  365. if (!debug_objects_enabled)
  366. return;
  367. db = get_bucket((unsigned long) addr);
  368. raw_spin_lock_irqsave(&db->lock, flags);
  369. obj = lookup_object(addr, db);
  370. if (obj) {
  371. switch (obj->state) {
  372. case ODEBUG_STATE_INIT:
  373. case ODEBUG_STATE_INACTIVE:
  374. case ODEBUG_STATE_ACTIVE:
  375. obj->state = ODEBUG_STATE_INACTIVE;
  376. break;
  377. case ODEBUG_STATE_DESTROYED:
  378. debug_print_object(obj, "deactivate");
  379. break;
  380. default:
  381. break;
  382. }
  383. } else {
  384. struct debug_obj o = { .object = addr,
  385. .state = ODEBUG_STATE_NOTAVAILABLE,
  386. .descr = descr };
  387. debug_print_object(&o, "deactivate");
  388. }
  389. raw_spin_unlock_irqrestore(&db->lock, flags);
  390. }
  391. /**
  392. * debug_object_destroy - debug checks when an object is destroyed
  393. * @addr: address of the object
  394. * @descr: pointer to an object specific debug description structure
  395. */
  396. void debug_object_destroy(void *addr, struct debug_obj_descr *descr)
  397. {
  398. enum debug_obj_state state;
  399. struct debug_bucket *db;
  400. struct debug_obj *obj;
  401. unsigned long flags;
  402. if (!debug_objects_enabled)
  403. return;
  404. db = get_bucket((unsigned long) addr);
  405. raw_spin_lock_irqsave(&db->lock, flags);
  406. obj = lookup_object(addr, db);
  407. if (!obj)
  408. goto out_unlock;
  409. switch (obj->state) {
  410. case ODEBUG_STATE_NONE:
  411. case ODEBUG_STATE_INIT:
  412. case ODEBUG_STATE_INACTIVE:
  413. obj->state = ODEBUG_STATE_DESTROYED;
  414. break;
  415. case ODEBUG_STATE_ACTIVE:
  416. debug_print_object(obj, "destroy");
  417. state = obj->state;
  418. raw_spin_unlock_irqrestore(&db->lock, flags);
  419. debug_object_fixup(descr->fixup_destroy, addr, state);
  420. return;
  421. case ODEBUG_STATE_DESTROYED:
  422. debug_print_object(obj, "destroy");
  423. break;
  424. default:
  425. break;
  426. }
  427. out_unlock:
  428. raw_spin_unlock_irqrestore(&db->lock, flags);
  429. }
  430. /**
  431. * debug_object_free - debug checks when an object is freed
  432. * @addr: address of the object
  433. * @descr: pointer to an object specific debug description structure
  434. */
  435. void debug_object_free(void *addr, struct debug_obj_descr *descr)
  436. {
  437. enum debug_obj_state state;
  438. struct debug_bucket *db;
  439. struct debug_obj *obj;
  440. unsigned long flags;
  441. if (!debug_objects_enabled)
  442. return;
  443. db = get_bucket((unsigned long) addr);
  444. raw_spin_lock_irqsave(&db->lock, flags);
  445. obj = lookup_object(addr, db);
  446. if (!obj)
  447. goto out_unlock;
  448. switch (obj->state) {
  449. case ODEBUG_STATE_ACTIVE:
  450. debug_print_object(obj, "free");
  451. state = obj->state;
  452. raw_spin_unlock_irqrestore(&db->lock, flags);
  453. debug_object_fixup(descr->fixup_free, addr, state);
  454. return;
  455. default:
  456. hlist_del(&obj->node);
  457. raw_spin_unlock_irqrestore(&db->lock, flags);
  458. free_object(obj);
  459. return;
  460. }
  461. out_unlock:
  462. raw_spin_unlock_irqrestore(&db->lock, flags);
  463. }
  464. #ifdef CONFIG_DEBUG_OBJECTS_FREE
  465. static void __debug_check_no_obj_freed(const void *address, unsigned long size)
  466. {
  467. unsigned long flags, oaddr, saddr, eaddr, paddr, chunks;
  468. struct hlist_node *node, *tmp;
  469. HLIST_HEAD(freelist);
  470. struct debug_obj_descr *descr;
  471. enum debug_obj_state state;
  472. struct debug_bucket *db;
  473. struct debug_obj *obj;
  474. int cnt;
  475. saddr = (unsigned long) address;
  476. eaddr = saddr + size;
  477. paddr = saddr & ODEBUG_CHUNK_MASK;
  478. chunks = ((eaddr - paddr) + (ODEBUG_CHUNK_SIZE - 1));
  479. chunks >>= ODEBUG_CHUNK_SHIFT;
  480. for (;chunks > 0; chunks--, paddr += ODEBUG_CHUNK_SIZE) {
  481. db = get_bucket(paddr);
  482. repeat:
  483. cnt = 0;
  484. raw_spin_lock_irqsave(&db->lock, flags);
  485. hlist_for_each_entry_safe(obj, node, tmp, &db->list, node) {
  486. cnt++;
  487. oaddr = (unsigned long) obj->object;
  488. if (oaddr < saddr || oaddr >= eaddr)
  489. continue;
  490. switch (obj->state) {
  491. case ODEBUG_STATE_ACTIVE:
  492. debug_print_object(obj, "free");
  493. descr = obj->descr;
  494. state = obj->state;
  495. raw_spin_unlock_irqrestore(&db->lock, flags);
  496. debug_object_fixup(descr->fixup_free,
  497. (void *) oaddr, state);
  498. goto repeat;
  499. default:
  500. hlist_del(&obj->node);
  501. hlist_add_head(&obj->node, &freelist);
  502. break;
  503. }
  504. }
  505. raw_spin_unlock_irqrestore(&db->lock, flags);
  506. /* Now free them */
  507. hlist_for_each_entry_safe(obj, node, tmp, &freelist, node) {
  508. hlist_del(&obj->node);
  509. free_object(obj);
  510. }
  511. if (cnt > debug_objects_maxchain)
  512. debug_objects_maxchain = cnt;
  513. }
  514. }
  515. void debug_check_no_obj_freed(const void *address, unsigned long size)
  516. {
  517. if (debug_objects_enabled)
  518. __debug_check_no_obj_freed(address, size);
  519. }
  520. #endif
  521. #ifdef CONFIG_DEBUG_FS
  522. static int debug_stats_show(struct seq_file *m, void *v)
  523. {
  524. seq_printf(m, "max_chain :%d\n", debug_objects_maxchain);
  525. seq_printf(m, "warnings :%d\n", debug_objects_warnings);
  526. seq_printf(m, "fixups :%d\n", debug_objects_fixups);
  527. seq_printf(m, "pool_free :%d\n", obj_pool_free);
  528. seq_printf(m, "pool_min_free :%d\n", obj_pool_min_free);
  529. seq_printf(m, "pool_used :%d\n", obj_pool_used);
  530. seq_printf(m, "pool_max_used :%d\n", obj_pool_max_used);
  531. return 0;
  532. }
  533. static int debug_stats_open(struct inode *inode, struct file *filp)
  534. {
  535. return single_open(filp, debug_stats_show, NULL);
  536. }
  537. static const struct file_operations debug_stats_fops = {
  538. .open = debug_stats_open,
  539. .read = seq_read,
  540. .llseek = seq_lseek,
  541. .release = single_release,
  542. };
  543. static int __init debug_objects_init_debugfs(void)
  544. {
  545. struct dentry *dbgdir, *dbgstats;
  546. if (!debug_objects_enabled)
  547. return 0;
  548. dbgdir = debugfs_create_dir("debug_objects", NULL);
  549. if (!dbgdir)
  550. return -ENOMEM;
  551. dbgstats = debugfs_create_file("stats", 0444, dbgdir, NULL,
  552. &debug_stats_fops);
  553. if (!dbgstats)
  554. goto err;
  555. return 0;
  556. err:
  557. debugfs_remove(dbgdir);
  558. return -ENOMEM;
  559. }
  560. __initcall(debug_objects_init_debugfs);
  561. #else
  562. static inline void debug_objects_init_debugfs(void) { }
  563. #endif
  564. #ifdef CONFIG_DEBUG_OBJECTS_SELFTEST
  565. /* Random data structure for the self test */
  566. struct self_test {
  567. unsigned long dummy1[6];
  568. int static_init;
  569. unsigned long dummy2[3];
  570. };
  571. static __initdata struct debug_obj_descr descr_type_test;
  572. /*
  573. * fixup_init is called when:
  574. * - an active object is initialized
  575. */
  576. static int __init fixup_init(void *addr, enum debug_obj_state state)
  577. {
  578. struct self_test *obj = addr;
  579. switch (state) {
  580. case ODEBUG_STATE_ACTIVE:
  581. debug_object_deactivate(obj, &descr_type_test);
  582. debug_object_init(obj, &descr_type_test);
  583. return 1;
  584. default:
  585. return 0;
  586. }
  587. }
  588. /*
  589. * fixup_activate is called when:
  590. * - an active object is activated
  591. * - an unknown object is activated (might be a statically initialized object)
  592. */
  593. static int __init fixup_activate(void *addr, enum debug_obj_state state)
  594. {
  595. struct self_test *obj = addr;
  596. switch (state) {
  597. case ODEBUG_STATE_NOTAVAILABLE:
  598. if (obj->static_init == 1) {
  599. debug_object_init(obj, &descr_type_test);
  600. debug_object_activate(obj, &descr_type_test);
  601. /*
  602. * Real code should return 0 here ! This is
  603. * not a fixup of some bad behaviour. We
  604. * merily call the debug_init function to keep
  605. * track of the object.
  606. */
  607. return 1;
  608. } else {
  609. /* Real code needs to emit a warning here */
  610. }
  611. return 0;
  612. case ODEBUG_STATE_ACTIVE:
  613. debug_object_deactivate(obj, &descr_type_test);
  614. debug_object_activate(obj, &descr_type_test);
  615. return 1;
  616. default:
  617. return 0;
  618. }
  619. }
  620. /*
  621. * fixup_destroy is called when:
  622. * - an active object is destroyed
  623. */
  624. static int __init fixup_destroy(void *addr, enum debug_obj_state state)
  625. {
  626. struct self_test *obj = addr;
  627. switch (state) {
  628. case ODEBUG_STATE_ACTIVE:
  629. debug_object_deactivate(obj, &descr_type_test);
  630. debug_object_destroy(obj, &descr_type_test);
  631. return 1;
  632. default:
  633. return 0;
  634. }
  635. }
  636. /*
  637. * fixup_free is called when:
  638. * - an active object is freed
  639. */
  640. static int __init fixup_free(void *addr, enum debug_obj_state state)
  641. {
  642. struct self_test *obj = addr;
  643. switch (state) {
  644. case ODEBUG_STATE_ACTIVE:
  645. debug_object_deactivate(obj, &descr_type_test);
  646. debug_object_free(obj, &descr_type_test);
  647. return 1;
  648. default:
  649. return 0;
  650. }
  651. }
  652. static int __init
  653. check_results(void *addr, enum debug_obj_state state, int fixups, int warnings)
  654. {
  655. struct debug_bucket *db;
  656. struct debug_obj *obj;
  657. unsigned long flags;
  658. int res = -EINVAL;
  659. db = get_bucket((unsigned long) addr);
  660. raw_spin_lock_irqsave(&db->lock, flags);
  661. obj = lookup_object(addr, db);
  662. if (!obj && state != ODEBUG_STATE_NONE) {
  663. WARN(1, KERN_ERR "ODEBUG: selftest object not found\n");
  664. goto out;
  665. }
  666. if (obj && obj->state != state) {
  667. WARN(1, KERN_ERR "ODEBUG: selftest wrong state: %d != %d\n",
  668. obj->state, state);
  669. goto out;
  670. }
  671. if (fixups != debug_objects_fixups) {
  672. WARN(1, KERN_ERR "ODEBUG: selftest fixups failed %d != %d\n",
  673. fixups, debug_objects_fixups);
  674. goto out;
  675. }
  676. if (warnings != debug_objects_warnings) {
  677. WARN(1, KERN_ERR "ODEBUG: selftest warnings failed %d != %d\n",
  678. warnings, debug_objects_warnings);
  679. goto out;
  680. }
  681. res = 0;
  682. out:
  683. raw_spin_unlock_irqrestore(&db->lock, flags);
  684. if (res)
  685. debug_objects_enabled = 0;
  686. return res;
  687. }
  688. static __initdata struct debug_obj_descr descr_type_test = {
  689. .name = "selftest",
  690. .fixup_init = fixup_init,
  691. .fixup_activate = fixup_activate,
  692. .fixup_destroy = fixup_destroy,
  693. .fixup_free = fixup_free,
  694. };
  695. static __initdata struct self_test obj = { .static_init = 0 };
  696. static void __init debug_objects_selftest(void)
  697. {
  698. int fixups, oldfixups, warnings, oldwarnings;
  699. unsigned long flags;
  700. local_irq_save(flags);
  701. fixups = oldfixups = debug_objects_fixups;
  702. warnings = oldwarnings = debug_objects_warnings;
  703. descr_test = &descr_type_test;
  704. debug_object_init(&obj, &descr_type_test);
  705. if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings))
  706. goto out;
  707. debug_object_activate(&obj, &descr_type_test);
  708. if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
  709. goto out;
  710. debug_object_activate(&obj, &descr_type_test);
  711. if (check_results(&obj, ODEBUG_STATE_ACTIVE, ++fixups, ++warnings))
  712. goto out;
  713. debug_object_deactivate(&obj, &descr_type_test);
  714. if (check_results(&obj, ODEBUG_STATE_INACTIVE, fixups, warnings))
  715. goto out;
  716. debug_object_destroy(&obj, &descr_type_test);
  717. if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, warnings))
  718. goto out;
  719. debug_object_init(&obj, &descr_type_test);
  720. if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
  721. goto out;
  722. debug_object_activate(&obj, &descr_type_test);
  723. if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
  724. goto out;
  725. debug_object_deactivate(&obj, &descr_type_test);
  726. if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
  727. goto out;
  728. debug_object_free(&obj, &descr_type_test);
  729. if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings))
  730. goto out;
  731. obj.static_init = 1;
  732. debug_object_activate(&obj, &descr_type_test);
  733. if (check_results(&obj, ODEBUG_STATE_ACTIVE, ++fixups, warnings))
  734. goto out;
  735. debug_object_init(&obj, &descr_type_test);
  736. if (check_results(&obj, ODEBUG_STATE_INIT, ++fixups, ++warnings))
  737. goto out;
  738. debug_object_free(&obj, &descr_type_test);
  739. if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings))
  740. goto out;
  741. #ifdef CONFIG_DEBUG_OBJECTS_FREE
  742. debug_object_init(&obj, &descr_type_test);
  743. if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings))
  744. goto out;
  745. debug_object_activate(&obj, &descr_type_test);
  746. if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
  747. goto out;
  748. __debug_check_no_obj_freed(&obj, sizeof(obj));
  749. if (check_results(&obj, ODEBUG_STATE_NONE, ++fixups, ++warnings))
  750. goto out;
  751. #endif
  752. printk(KERN_INFO "ODEBUG: selftest passed\n");
  753. out:
  754. debug_objects_fixups = oldfixups;
  755. debug_objects_warnings = oldwarnings;
  756. descr_test = NULL;
  757. local_irq_restore(flags);
  758. }
  759. #else
  760. static inline void debug_objects_selftest(void) { }
  761. #endif
  762. /*
  763. * Called during early boot to initialize the hash buckets and link
  764. * the static object pool objects into the poll list. After this call
  765. * the object tracker is fully operational.
  766. */
  767. void __init debug_objects_early_init(void)
  768. {
  769. int i;
  770. for (i = 0; i < ODEBUG_HASH_SIZE; i++)
  771. raw_spin_lock_init(&obj_hash[i].lock);
  772. for (i = 0; i < ODEBUG_POOL_SIZE; i++)
  773. hlist_add_head(&obj_static_pool[i].node, &obj_pool);
  774. }
  775. /*
  776. * Convert the statically allocated objects to dynamic ones:
  777. */
  778. static int __init debug_objects_replace_static_objects(void)
  779. {
  780. struct debug_bucket *db = obj_hash;
  781. struct hlist_node *node, *tmp;
  782. struct debug_obj *obj, *new;
  783. HLIST_HEAD(objects);
  784. int i, cnt = 0;
  785. for (i = 0; i < ODEBUG_POOL_SIZE; i++) {
  786. obj = kmem_cache_zalloc(obj_cache, GFP_KERNEL);
  787. if (!obj)
  788. goto free;
  789. hlist_add_head(&obj->node, &objects);
  790. }
  791. /*
  792. * When debug_objects_mem_init() is called we know that only
  793. * one CPU is up, so disabling interrupts is enough
  794. * protection. This avoids the lockdep hell of lock ordering.
  795. */
  796. local_irq_disable();
  797. /* Remove the statically allocated objects from the pool */
  798. hlist_for_each_entry_safe(obj, node, tmp, &obj_pool, node)
  799. hlist_del(&obj->node);
  800. /* Move the allocated objects to the pool */
  801. hlist_move_list(&objects, &obj_pool);
  802. /* Replace the active object references */
  803. for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
  804. hlist_move_list(&db->list, &objects);
  805. hlist_for_each_entry(obj, node, &objects, node) {
  806. new = hlist_entry(obj_pool.first, typeof(*obj), node);
  807. hlist_del(&new->node);
  808. /* copy object data */
  809. *new = *obj;
  810. hlist_add_head(&new->node, &db->list);
  811. cnt++;
  812. }
  813. }
  814. printk(KERN_DEBUG "ODEBUG: %d of %d active objects replaced\n", cnt,
  815. obj_pool_used);
  816. local_irq_enable();
  817. return 0;
  818. free:
  819. hlist_for_each_entry_safe(obj, node, tmp, &objects, node) {
  820. hlist_del(&obj->node);
  821. kmem_cache_free(obj_cache, obj);
  822. }
  823. return -ENOMEM;
  824. }
  825. /*
  826. * Called after the kmem_caches are functional to setup a dedicated
  827. * cache pool, which has the SLAB_DEBUG_OBJECTS flag set. This flag
  828. * prevents that the debug code is called on kmem_cache_free() for the
  829. * debug tracker objects to avoid recursive calls.
  830. */
  831. void __init debug_objects_mem_init(void)
  832. {
  833. if (!debug_objects_enabled)
  834. return;
  835. obj_cache = kmem_cache_create("debug_objects_cache",
  836. sizeof (struct debug_obj), 0,
  837. SLAB_DEBUG_OBJECTS, NULL);
  838. if (!obj_cache || debug_objects_replace_static_objects()) {
  839. debug_objects_enabled = 0;
  840. if (obj_cache)
  841. kmem_cache_destroy(obj_cache);
  842. printk(KERN_WARNING "ODEBUG: out of memory.\n");
  843. } else
  844. debug_objects_selftest();
  845. }