debugobjects.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994
  1. /*
  2. * Generic infrastructure for lifetime debugging of objects.
  3. *
  4. * Started by Thomas Gleixner
  5. *
  6. * Copyright (C) 2008, Thomas Gleixner <tglx@linutronix.de>
  7. *
  8. * For licencing details see kernel-base/COPYING
  9. */
  10. #include <linux/debugobjects.h>
  11. #include <linux/interrupt.h>
  12. #include <linux/seq_file.h>
  13. #include <linux/debugfs.h>
  14. #include <linux/hash.h>
  15. #define ODEBUG_HASH_BITS 14
  16. #define ODEBUG_HASH_SIZE (1 << ODEBUG_HASH_BITS)
  17. #define ODEBUG_POOL_SIZE 512
  18. #define ODEBUG_POOL_MIN_LEVEL 256
  19. #define ODEBUG_CHUNK_SHIFT PAGE_SHIFT
  20. #define ODEBUG_CHUNK_SIZE (1 << ODEBUG_CHUNK_SHIFT)
  21. #define ODEBUG_CHUNK_MASK (~(ODEBUG_CHUNK_SIZE - 1))
  22. struct debug_bucket {
  23. struct hlist_head list;
  24. spinlock_t lock;
  25. };
  26. static struct debug_bucket obj_hash[ODEBUG_HASH_SIZE];
  27. static struct debug_obj obj_static_pool[ODEBUG_POOL_SIZE] __initdata;
  28. static DEFINE_SPINLOCK(pool_lock);
  29. static HLIST_HEAD(obj_pool);
  30. static int obj_pool_min_free = ODEBUG_POOL_SIZE;
  31. static int obj_pool_free = ODEBUG_POOL_SIZE;
  32. static int obj_pool_used;
  33. static int obj_pool_max_used;
  34. static struct kmem_cache *obj_cache;
  35. static int debug_objects_maxchain __read_mostly;
  36. static int debug_objects_fixups __read_mostly;
  37. static int debug_objects_warnings __read_mostly;
  38. static int debug_objects_enabled __read_mostly
  39. = CONFIG_DEBUG_OBJECTS_ENABLE_DEFAULT;
  40. static struct debug_obj_descr *descr_test __read_mostly;
  41. static void free_obj_work(struct work_struct *work);
  42. static DECLARE_WORK(debug_obj_work, free_obj_work);
  43. static int __init enable_object_debug(char *str)
  44. {
  45. debug_objects_enabled = 1;
  46. return 0;
  47. }
  48. static int __init disable_object_debug(char *str)
  49. {
  50. debug_objects_enabled = 0;
  51. return 0;
  52. }
  53. early_param("debug_objects", enable_object_debug);
  54. early_param("no_debug_objects", disable_object_debug);
  55. static const char *obj_states[ODEBUG_STATE_MAX] = {
  56. [ODEBUG_STATE_NONE] = "none",
  57. [ODEBUG_STATE_INIT] = "initialized",
  58. [ODEBUG_STATE_INACTIVE] = "inactive",
  59. [ODEBUG_STATE_ACTIVE] = "active",
  60. [ODEBUG_STATE_DESTROYED] = "destroyed",
  61. [ODEBUG_STATE_NOTAVAILABLE] = "not available",
  62. };
  63. static int fill_pool(void)
  64. {
  65. gfp_t gfp = GFP_ATOMIC | __GFP_NORETRY | __GFP_NOWARN;
  66. struct debug_obj *new;
  67. unsigned long flags;
  68. if (likely(obj_pool_free >= ODEBUG_POOL_MIN_LEVEL))
  69. return obj_pool_free;
  70. if (unlikely(!obj_cache))
  71. return obj_pool_free;
  72. while (obj_pool_free < ODEBUG_POOL_MIN_LEVEL) {
  73. new = kmem_cache_zalloc(obj_cache, gfp);
  74. if (!new)
  75. return obj_pool_free;
  76. spin_lock_irqsave(&pool_lock, flags);
  77. hlist_add_head(&new->node, &obj_pool);
  78. obj_pool_free++;
  79. spin_unlock_irqrestore(&pool_lock, flags);
  80. }
  81. return obj_pool_free;
  82. }
  83. /*
  84. * Lookup an object in the hash bucket.
  85. */
  86. static struct debug_obj *lookup_object(void *addr, struct debug_bucket *b)
  87. {
  88. struct hlist_node *node;
  89. struct debug_obj *obj;
  90. int cnt = 0;
  91. hlist_for_each_entry(obj, node, &b->list, node) {
  92. cnt++;
  93. if (obj->object == addr)
  94. return obj;
  95. }
  96. if (cnt > debug_objects_maxchain)
  97. debug_objects_maxchain = cnt;
  98. return NULL;
  99. }
  100. /*
  101. * Allocate a new object. If the pool is empty, switch off the debugger.
  102. * Must be called with interrupts disabled.
  103. */
  104. static struct debug_obj *
  105. alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr)
  106. {
  107. struct debug_obj *obj = NULL;
  108. spin_lock(&pool_lock);
  109. if (obj_pool.first) {
  110. obj = hlist_entry(obj_pool.first, typeof(*obj), node);
  111. obj->object = addr;
  112. obj->descr = descr;
  113. obj->state = ODEBUG_STATE_NONE;
  114. hlist_del(&obj->node);
  115. hlist_add_head(&obj->node, &b->list);
  116. obj_pool_used++;
  117. if (obj_pool_used > obj_pool_max_used)
  118. obj_pool_max_used = obj_pool_used;
  119. obj_pool_free--;
  120. if (obj_pool_free < obj_pool_min_free)
  121. obj_pool_min_free = obj_pool_free;
  122. }
  123. spin_unlock(&pool_lock);
  124. return obj;
  125. }
  126. /*
  127. * workqueue function to free objects.
  128. */
  129. static void free_obj_work(struct work_struct *work)
  130. {
  131. struct debug_obj *obj;
  132. unsigned long flags;
  133. spin_lock_irqsave(&pool_lock, flags);
  134. while (obj_pool_free > ODEBUG_POOL_SIZE) {
  135. obj = hlist_entry(obj_pool.first, typeof(*obj), node);
  136. hlist_del(&obj->node);
  137. obj_pool_free--;
  138. /*
  139. * We release pool_lock across kmem_cache_free() to
  140. * avoid contention on pool_lock.
  141. */
  142. spin_unlock_irqrestore(&pool_lock, flags);
  143. kmem_cache_free(obj_cache, obj);
  144. spin_lock_irqsave(&pool_lock, flags);
  145. }
  146. spin_unlock_irqrestore(&pool_lock, flags);
  147. }
  148. /*
  149. * Put the object back into the pool and schedule work to free objects
  150. * if necessary.
  151. */
  152. static void free_object(struct debug_obj *obj)
  153. {
  154. unsigned long flags;
  155. int sched = 0;
  156. spin_lock_irqsave(&pool_lock, flags);
  157. /*
  158. * schedule work when the pool is filled and the cache is
  159. * initialized:
  160. */
  161. if (obj_pool_free > ODEBUG_POOL_SIZE && obj_cache)
  162. sched = !work_pending(&debug_obj_work);
  163. hlist_add_head(&obj->node, &obj_pool);
  164. obj_pool_free++;
  165. obj_pool_used--;
  166. spin_unlock_irqrestore(&pool_lock, flags);
  167. if (sched)
  168. schedule_work(&debug_obj_work);
  169. }
  170. /*
  171. * We run out of memory. That means we probably have tons of objects
  172. * allocated.
  173. */
  174. static void debug_objects_oom(void)
  175. {
  176. struct debug_bucket *db = obj_hash;
  177. struct hlist_node *node, *tmp;
  178. HLIST_HEAD(freelist);
  179. struct debug_obj *obj;
  180. unsigned long flags;
  181. int i;
  182. printk(KERN_WARNING "ODEBUG: Out of memory. ODEBUG disabled\n");
  183. for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
  184. spin_lock_irqsave(&db->lock, flags);
  185. hlist_move_list(&db->list, &freelist);
  186. spin_unlock_irqrestore(&db->lock, flags);
  187. /* Now free them */
  188. hlist_for_each_entry_safe(obj, node, tmp, &freelist, node) {
  189. hlist_del(&obj->node);
  190. free_object(obj);
  191. }
  192. }
  193. }
  194. /*
  195. * We use the pfn of the address for the hash. That way we can check
  196. * for freed objects simply by checking the affected bucket.
  197. */
  198. static struct debug_bucket *get_bucket(unsigned long addr)
  199. {
  200. unsigned long hash;
  201. hash = hash_long((addr >> ODEBUG_CHUNK_SHIFT), ODEBUG_HASH_BITS);
  202. return &obj_hash[hash];
  203. }
  204. static void debug_print_object(struct debug_obj *obj, char *msg)
  205. {
  206. static int limit;
  207. if (limit < 5 && obj->descr != descr_test) {
  208. limit++;
  209. WARN(1, KERN_ERR "ODEBUG: %s %s object type: %s\n", msg,
  210. obj_states[obj->state], obj->descr->name);
  211. }
  212. debug_objects_warnings++;
  213. }
  214. /*
  215. * Try to repair the damage, so we have a better chance to get useful
  216. * debug output.
  217. */
  218. static void
  219. debug_object_fixup(int (*fixup)(void *addr, enum debug_obj_state state),
  220. void * addr, enum debug_obj_state state)
  221. {
  222. if (fixup)
  223. debug_objects_fixups += fixup(addr, state);
  224. }
  225. static void debug_object_is_on_stack(void *addr, int onstack)
  226. {
  227. int is_on_stack;
  228. static int limit;
  229. if (limit > 4)
  230. return;
  231. is_on_stack = object_is_on_stack(addr);
  232. if (is_on_stack == onstack)
  233. return;
  234. limit++;
  235. if (is_on_stack)
  236. printk(KERN_WARNING
  237. "ODEBUG: object is on stack, but not annotated\n");
  238. else
  239. printk(KERN_WARNING
  240. "ODEBUG: object is not on stack, but annotated\n");
  241. WARN_ON(1);
  242. }
  243. static void
  244. __debug_object_init(void *addr, struct debug_obj_descr *descr, int onstack)
  245. {
  246. enum debug_obj_state state;
  247. struct debug_bucket *db;
  248. struct debug_obj *obj;
  249. unsigned long flags;
  250. fill_pool();
  251. db = get_bucket((unsigned long) addr);
  252. spin_lock_irqsave(&db->lock, flags);
  253. obj = lookup_object(addr, db);
  254. if (!obj) {
  255. obj = alloc_object(addr, db, descr);
  256. if (!obj) {
  257. debug_objects_enabled = 0;
  258. spin_unlock_irqrestore(&db->lock, flags);
  259. debug_objects_oom();
  260. return;
  261. }
  262. debug_object_is_on_stack(addr, onstack);
  263. }
  264. switch (obj->state) {
  265. case ODEBUG_STATE_NONE:
  266. case ODEBUG_STATE_INIT:
  267. case ODEBUG_STATE_INACTIVE:
  268. obj->state = ODEBUG_STATE_INIT;
  269. break;
  270. case ODEBUG_STATE_ACTIVE:
  271. debug_print_object(obj, "init");
  272. state = obj->state;
  273. spin_unlock_irqrestore(&db->lock, flags);
  274. debug_object_fixup(descr->fixup_init, addr, state);
  275. return;
  276. case ODEBUG_STATE_DESTROYED:
  277. debug_print_object(obj, "init");
  278. break;
  279. default:
  280. break;
  281. }
  282. spin_unlock_irqrestore(&db->lock, flags);
  283. }
  284. /**
  285. * debug_object_init - debug checks when an object is initialized
  286. * @addr: address of the object
  287. * @descr: pointer to an object specific debug description structure
  288. */
  289. void debug_object_init(void *addr, struct debug_obj_descr *descr)
  290. {
  291. if (!debug_objects_enabled)
  292. return;
  293. __debug_object_init(addr, descr, 0);
  294. }
  295. /**
  296. * debug_object_init_on_stack - debug checks when an object on stack is
  297. * initialized
  298. * @addr: address of the object
  299. * @descr: pointer to an object specific debug description structure
  300. */
  301. void debug_object_init_on_stack(void *addr, struct debug_obj_descr *descr)
  302. {
  303. if (!debug_objects_enabled)
  304. return;
  305. __debug_object_init(addr, descr, 1);
  306. }
  307. /**
  308. * debug_object_activate - debug checks when an object is activated
  309. * @addr: address of the object
  310. * @descr: pointer to an object specific debug description structure
  311. */
  312. void debug_object_activate(void *addr, struct debug_obj_descr *descr)
  313. {
  314. enum debug_obj_state state;
  315. struct debug_bucket *db;
  316. struct debug_obj *obj;
  317. unsigned long flags;
  318. if (!debug_objects_enabled)
  319. return;
  320. db = get_bucket((unsigned long) addr);
  321. spin_lock_irqsave(&db->lock, flags);
  322. obj = lookup_object(addr, db);
  323. if (obj) {
  324. switch (obj->state) {
  325. case ODEBUG_STATE_INIT:
  326. case ODEBUG_STATE_INACTIVE:
  327. obj->state = ODEBUG_STATE_ACTIVE;
  328. break;
  329. case ODEBUG_STATE_ACTIVE:
  330. debug_print_object(obj, "activate");
  331. state = obj->state;
  332. spin_unlock_irqrestore(&db->lock, flags);
  333. debug_object_fixup(descr->fixup_activate, addr, state);
  334. return;
  335. case ODEBUG_STATE_DESTROYED:
  336. debug_print_object(obj, "activate");
  337. break;
  338. default:
  339. break;
  340. }
  341. spin_unlock_irqrestore(&db->lock, flags);
  342. return;
  343. }
  344. spin_unlock_irqrestore(&db->lock, flags);
  345. /*
  346. * This happens when a static object is activated. We
  347. * let the type specific code decide whether this is
  348. * true or not.
  349. */
  350. debug_object_fixup(descr->fixup_activate, addr,
  351. ODEBUG_STATE_NOTAVAILABLE);
  352. }
  353. /**
  354. * debug_object_deactivate - debug checks when an object is deactivated
  355. * @addr: address of the object
  356. * @descr: pointer to an object specific debug description structure
  357. */
  358. void debug_object_deactivate(void *addr, struct debug_obj_descr *descr)
  359. {
  360. struct debug_bucket *db;
  361. struct debug_obj *obj;
  362. unsigned long flags;
  363. if (!debug_objects_enabled)
  364. return;
  365. db = get_bucket((unsigned long) addr);
  366. spin_lock_irqsave(&db->lock, flags);
  367. obj = lookup_object(addr, db);
  368. if (obj) {
  369. switch (obj->state) {
  370. case ODEBUG_STATE_INIT:
  371. case ODEBUG_STATE_INACTIVE:
  372. case ODEBUG_STATE_ACTIVE:
  373. obj->state = ODEBUG_STATE_INACTIVE;
  374. break;
  375. case ODEBUG_STATE_DESTROYED:
  376. debug_print_object(obj, "deactivate");
  377. break;
  378. default:
  379. break;
  380. }
  381. } else {
  382. struct debug_obj o = { .object = addr,
  383. .state = ODEBUG_STATE_NOTAVAILABLE,
  384. .descr = descr };
  385. debug_print_object(&o, "deactivate");
  386. }
  387. spin_unlock_irqrestore(&db->lock, flags);
  388. }
  389. /**
  390. * debug_object_destroy - debug checks when an object is destroyed
  391. * @addr: address of the object
  392. * @descr: pointer to an object specific debug description structure
  393. */
  394. void debug_object_destroy(void *addr, struct debug_obj_descr *descr)
  395. {
  396. enum debug_obj_state state;
  397. struct debug_bucket *db;
  398. struct debug_obj *obj;
  399. unsigned long flags;
  400. if (!debug_objects_enabled)
  401. return;
  402. db = get_bucket((unsigned long) addr);
  403. spin_lock_irqsave(&db->lock, flags);
  404. obj = lookup_object(addr, db);
  405. if (!obj)
  406. goto out_unlock;
  407. switch (obj->state) {
  408. case ODEBUG_STATE_NONE:
  409. case ODEBUG_STATE_INIT:
  410. case ODEBUG_STATE_INACTIVE:
  411. obj->state = ODEBUG_STATE_DESTROYED;
  412. break;
  413. case ODEBUG_STATE_ACTIVE:
  414. debug_print_object(obj, "destroy");
  415. state = obj->state;
  416. spin_unlock_irqrestore(&db->lock, flags);
  417. debug_object_fixup(descr->fixup_destroy, addr, state);
  418. return;
  419. case ODEBUG_STATE_DESTROYED:
  420. debug_print_object(obj, "destroy");
  421. break;
  422. default:
  423. break;
  424. }
  425. out_unlock:
  426. spin_unlock_irqrestore(&db->lock, flags);
  427. }
  428. /**
  429. * debug_object_free - debug checks when an object is freed
  430. * @addr: address of the object
  431. * @descr: pointer to an object specific debug description structure
  432. */
  433. void debug_object_free(void *addr, struct debug_obj_descr *descr)
  434. {
  435. enum debug_obj_state state;
  436. struct debug_bucket *db;
  437. struct debug_obj *obj;
  438. unsigned long flags;
  439. if (!debug_objects_enabled)
  440. return;
  441. db = get_bucket((unsigned long) addr);
  442. spin_lock_irqsave(&db->lock, flags);
  443. obj = lookup_object(addr, db);
  444. if (!obj)
  445. goto out_unlock;
  446. switch (obj->state) {
  447. case ODEBUG_STATE_ACTIVE:
  448. debug_print_object(obj, "free");
  449. state = obj->state;
  450. spin_unlock_irqrestore(&db->lock, flags);
  451. debug_object_fixup(descr->fixup_free, addr, state);
  452. return;
  453. default:
  454. hlist_del(&obj->node);
  455. spin_unlock_irqrestore(&db->lock, flags);
  456. free_object(obj);
  457. return;
  458. }
  459. out_unlock:
  460. spin_unlock_irqrestore(&db->lock, flags);
  461. }
  462. #ifdef CONFIG_DEBUG_OBJECTS_FREE
  463. static void __debug_check_no_obj_freed(const void *address, unsigned long size)
  464. {
  465. unsigned long flags, oaddr, saddr, eaddr, paddr, chunks;
  466. struct hlist_node *node, *tmp;
  467. HLIST_HEAD(freelist);
  468. struct debug_obj_descr *descr;
  469. enum debug_obj_state state;
  470. struct debug_bucket *db;
  471. struct debug_obj *obj;
  472. int cnt;
  473. saddr = (unsigned long) address;
  474. eaddr = saddr + size;
  475. paddr = saddr & ODEBUG_CHUNK_MASK;
  476. chunks = ((eaddr - paddr) + (ODEBUG_CHUNK_SIZE - 1));
  477. chunks >>= ODEBUG_CHUNK_SHIFT;
  478. for (;chunks > 0; chunks--, paddr += ODEBUG_CHUNK_SIZE) {
  479. db = get_bucket(paddr);
  480. repeat:
  481. cnt = 0;
  482. spin_lock_irqsave(&db->lock, flags);
  483. hlist_for_each_entry_safe(obj, node, tmp, &db->list, node) {
  484. cnt++;
  485. oaddr = (unsigned long) obj->object;
  486. if (oaddr < saddr || oaddr >= eaddr)
  487. continue;
  488. switch (obj->state) {
  489. case ODEBUG_STATE_ACTIVE:
  490. debug_print_object(obj, "free");
  491. descr = obj->descr;
  492. state = obj->state;
  493. spin_unlock_irqrestore(&db->lock, flags);
  494. debug_object_fixup(descr->fixup_free,
  495. (void *) oaddr, state);
  496. goto repeat;
  497. default:
  498. hlist_del(&obj->node);
  499. hlist_add_head(&obj->node, &freelist);
  500. break;
  501. }
  502. }
  503. spin_unlock_irqrestore(&db->lock, flags);
  504. /* Now free them */
  505. hlist_for_each_entry_safe(obj, node, tmp, &freelist, node) {
  506. hlist_del(&obj->node);
  507. free_object(obj);
  508. }
  509. if (cnt > debug_objects_maxchain)
  510. debug_objects_maxchain = cnt;
  511. }
  512. }
  513. void debug_check_no_obj_freed(const void *address, unsigned long size)
  514. {
  515. if (debug_objects_enabled)
  516. __debug_check_no_obj_freed(address, size);
  517. }
  518. #endif
  519. #ifdef CONFIG_DEBUG_FS
  520. static int debug_stats_show(struct seq_file *m, void *v)
  521. {
  522. seq_printf(m, "max_chain :%d\n", debug_objects_maxchain);
  523. seq_printf(m, "warnings :%d\n", debug_objects_warnings);
  524. seq_printf(m, "fixups :%d\n", debug_objects_fixups);
  525. seq_printf(m, "pool_free :%d\n", obj_pool_free);
  526. seq_printf(m, "pool_min_free :%d\n", obj_pool_min_free);
  527. seq_printf(m, "pool_used :%d\n", obj_pool_used);
  528. seq_printf(m, "pool_max_used :%d\n", obj_pool_max_used);
  529. return 0;
  530. }
  531. static int debug_stats_open(struct inode *inode, struct file *filp)
  532. {
  533. return single_open(filp, debug_stats_show, NULL);
  534. }
  535. static const struct file_operations debug_stats_fops = {
  536. .open = debug_stats_open,
  537. .read = seq_read,
  538. .llseek = seq_lseek,
  539. .release = single_release,
  540. };
  541. static int __init debug_objects_init_debugfs(void)
  542. {
  543. struct dentry *dbgdir, *dbgstats;
  544. if (!debug_objects_enabled)
  545. return 0;
  546. dbgdir = debugfs_create_dir("debug_objects", NULL);
  547. if (!dbgdir)
  548. return -ENOMEM;
  549. dbgstats = debugfs_create_file("stats", 0444, dbgdir, NULL,
  550. &debug_stats_fops);
  551. if (!dbgstats)
  552. goto err;
  553. return 0;
  554. err:
  555. debugfs_remove(dbgdir);
  556. return -ENOMEM;
  557. }
  558. __initcall(debug_objects_init_debugfs);
  559. #else
  560. static inline void debug_objects_init_debugfs(void) { }
  561. #endif
  562. #ifdef CONFIG_DEBUG_OBJECTS_SELFTEST
  563. /* Random data structure for the self test */
  564. struct self_test {
  565. unsigned long dummy1[6];
  566. int static_init;
  567. unsigned long dummy2[3];
  568. };
  569. static __initdata struct debug_obj_descr descr_type_test;
  570. /*
  571. * fixup_init is called when:
  572. * - an active object is initialized
  573. */
  574. static int __init fixup_init(void *addr, enum debug_obj_state state)
  575. {
  576. struct self_test *obj = addr;
  577. switch (state) {
  578. case ODEBUG_STATE_ACTIVE:
  579. debug_object_deactivate(obj, &descr_type_test);
  580. debug_object_init(obj, &descr_type_test);
  581. return 1;
  582. default:
  583. return 0;
  584. }
  585. }
  586. /*
  587. * fixup_activate is called when:
  588. * - an active object is activated
  589. * - an unknown object is activated (might be a statically initialized object)
  590. */
  591. static int __init fixup_activate(void *addr, enum debug_obj_state state)
  592. {
  593. struct self_test *obj = addr;
  594. switch (state) {
  595. case ODEBUG_STATE_NOTAVAILABLE:
  596. if (obj->static_init == 1) {
  597. debug_object_init(obj, &descr_type_test);
  598. debug_object_activate(obj, &descr_type_test);
  599. /*
  600. * Real code should return 0 here ! This is
  601. * not a fixup of some bad behaviour. We
  602. * merily call the debug_init function to keep
  603. * track of the object.
  604. */
  605. return 1;
  606. } else {
  607. /* Real code needs to emit a warning here */
  608. }
  609. return 0;
  610. case ODEBUG_STATE_ACTIVE:
  611. debug_object_deactivate(obj, &descr_type_test);
  612. debug_object_activate(obj, &descr_type_test);
  613. return 1;
  614. default:
  615. return 0;
  616. }
  617. }
  618. /*
  619. * fixup_destroy is called when:
  620. * - an active object is destroyed
  621. */
  622. static int __init fixup_destroy(void *addr, enum debug_obj_state state)
  623. {
  624. struct self_test *obj = addr;
  625. switch (state) {
  626. case ODEBUG_STATE_ACTIVE:
  627. debug_object_deactivate(obj, &descr_type_test);
  628. debug_object_destroy(obj, &descr_type_test);
  629. return 1;
  630. default:
  631. return 0;
  632. }
  633. }
  634. /*
  635. * fixup_free is called when:
  636. * - an active object is freed
  637. */
  638. static int __init fixup_free(void *addr, enum debug_obj_state state)
  639. {
  640. struct self_test *obj = addr;
  641. switch (state) {
  642. case ODEBUG_STATE_ACTIVE:
  643. debug_object_deactivate(obj, &descr_type_test);
  644. debug_object_free(obj, &descr_type_test);
  645. return 1;
  646. default:
  647. return 0;
  648. }
  649. }
  650. static int
  651. check_results(void *addr, enum debug_obj_state state, int fixups, int warnings)
  652. {
  653. struct debug_bucket *db;
  654. struct debug_obj *obj;
  655. unsigned long flags;
  656. int res = -EINVAL;
  657. db = get_bucket((unsigned long) addr);
  658. spin_lock_irqsave(&db->lock, flags);
  659. obj = lookup_object(addr, db);
  660. if (!obj && state != ODEBUG_STATE_NONE) {
  661. WARN(1, KERN_ERR "ODEBUG: selftest object not found\n");
  662. goto out;
  663. }
  664. if (obj && obj->state != state) {
  665. WARN(1, KERN_ERR "ODEBUG: selftest wrong state: %d != %d\n",
  666. obj->state, state);
  667. goto out;
  668. }
  669. if (fixups != debug_objects_fixups) {
  670. WARN(1, KERN_ERR "ODEBUG: selftest fixups failed %d != %d\n",
  671. fixups, debug_objects_fixups);
  672. goto out;
  673. }
  674. if (warnings != debug_objects_warnings) {
  675. WARN(1, KERN_ERR "ODEBUG: selftest warnings failed %d != %d\n",
  676. warnings, debug_objects_warnings);
  677. goto out;
  678. }
  679. res = 0;
  680. out:
  681. spin_unlock_irqrestore(&db->lock, flags);
  682. if (res)
  683. debug_objects_enabled = 0;
  684. return res;
  685. }
  686. static __initdata struct debug_obj_descr descr_type_test = {
  687. .name = "selftest",
  688. .fixup_init = fixup_init,
  689. .fixup_activate = fixup_activate,
  690. .fixup_destroy = fixup_destroy,
  691. .fixup_free = fixup_free,
  692. };
  693. static __initdata struct self_test obj = { .static_init = 0 };
  694. static void __init debug_objects_selftest(void)
  695. {
  696. int fixups, oldfixups, warnings, oldwarnings;
  697. unsigned long flags;
  698. local_irq_save(flags);
  699. fixups = oldfixups = debug_objects_fixups;
  700. warnings = oldwarnings = debug_objects_warnings;
  701. descr_test = &descr_type_test;
  702. debug_object_init(&obj, &descr_type_test);
  703. if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings))
  704. goto out;
  705. debug_object_activate(&obj, &descr_type_test);
  706. if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
  707. goto out;
  708. debug_object_activate(&obj, &descr_type_test);
  709. if (check_results(&obj, ODEBUG_STATE_ACTIVE, ++fixups, ++warnings))
  710. goto out;
  711. debug_object_deactivate(&obj, &descr_type_test);
  712. if (check_results(&obj, ODEBUG_STATE_INACTIVE, fixups, warnings))
  713. goto out;
  714. debug_object_destroy(&obj, &descr_type_test);
  715. if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, warnings))
  716. goto out;
  717. debug_object_init(&obj, &descr_type_test);
  718. if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
  719. goto out;
  720. debug_object_activate(&obj, &descr_type_test);
  721. if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
  722. goto out;
  723. debug_object_deactivate(&obj, &descr_type_test);
  724. if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
  725. goto out;
  726. debug_object_free(&obj, &descr_type_test);
  727. if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings))
  728. goto out;
  729. obj.static_init = 1;
  730. debug_object_activate(&obj, &descr_type_test);
  731. if (check_results(&obj, ODEBUG_STATE_ACTIVE, ++fixups, warnings))
  732. goto out;
  733. debug_object_init(&obj, &descr_type_test);
  734. if (check_results(&obj, ODEBUG_STATE_INIT, ++fixups, ++warnings))
  735. goto out;
  736. debug_object_free(&obj, &descr_type_test);
  737. if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings))
  738. goto out;
  739. #ifdef CONFIG_DEBUG_OBJECTS_FREE
  740. debug_object_init(&obj, &descr_type_test);
  741. if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings))
  742. goto out;
  743. debug_object_activate(&obj, &descr_type_test);
  744. if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
  745. goto out;
  746. __debug_check_no_obj_freed(&obj, sizeof(obj));
  747. if (check_results(&obj, ODEBUG_STATE_NONE, ++fixups, ++warnings))
  748. goto out;
  749. #endif
  750. printk(KERN_INFO "ODEBUG: selftest passed\n");
  751. out:
  752. debug_objects_fixups = oldfixups;
  753. debug_objects_warnings = oldwarnings;
  754. descr_test = NULL;
  755. local_irq_restore(flags);
  756. }
  757. #else
  758. static inline void debug_objects_selftest(void) { }
  759. #endif
  760. /*
  761. * Called during early boot to initialize the hash buckets and link
  762. * the static object pool objects into the poll list. After this call
  763. * the object tracker is fully operational.
  764. */
  765. void __init debug_objects_early_init(void)
  766. {
  767. int i;
  768. for (i = 0; i < ODEBUG_HASH_SIZE; i++)
  769. spin_lock_init(&obj_hash[i].lock);
  770. for (i = 0; i < ODEBUG_POOL_SIZE; i++)
  771. hlist_add_head(&obj_static_pool[i].node, &obj_pool);
  772. }
  773. /*
  774. * Convert the statically allocated objects to dynamic ones:
  775. */
  776. static int debug_objects_replace_static_objects(void)
  777. {
  778. struct debug_bucket *db = obj_hash;
  779. struct hlist_node *node, *tmp;
  780. struct debug_obj *obj, *new;
  781. HLIST_HEAD(objects);
  782. int i, cnt = 0;
  783. for (i = 0; i < ODEBUG_POOL_SIZE; i++) {
  784. obj = kmem_cache_zalloc(obj_cache, GFP_KERNEL);
  785. if (!obj)
  786. goto free;
  787. hlist_add_head(&obj->node, &objects);
  788. }
  789. /*
  790. * When debug_objects_mem_init() is called we know that only
  791. * one CPU is up, so disabling interrupts is enough
  792. * protection. This avoids the lockdep hell of lock ordering.
  793. */
  794. local_irq_disable();
  795. /* Remove the statically allocated objects from the pool */
  796. hlist_for_each_entry_safe(obj, node, tmp, &obj_pool, node)
  797. hlist_del(&obj->node);
  798. /* Move the allocated objects to the pool */
  799. hlist_move_list(&objects, &obj_pool);
  800. /* Replace the active object references */
  801. for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
  802. hlist_move_list(&db->list, &objects);
  803. hlist_for_each_entry(obj, node, &objects, node) {
  804. new = hlist_entry(obj_pool.first, typeof(*obj), node);
  805. hlist_del(&new->node);
  806. /* copy object data */
  807. *new = *obj;
  808. hlist_add_head(&new->node, &db->list);
  809. cnt++;
  810. }
  811. }
  812. printk(KERN_DEBUG "ODEBUG: %d of %d active objects replaced\n", cnt,
  813. obj_pool_used);
  814. local_irq_enable();
  815. return 0;
  816. free:
  817. hlist_for_each_entry_safe(obj, node, tmp, &objects, node) {
  818. hlist_del(&obj->node);
  819. kmem_cache_free(obj_cache, obj);
  820. }
  821. return -ENOMEM;
  822. }
  823. /*
  824. * Called after the kmem_caches are functional to setup a dedicated
  825. * cache pool, which has the SLAB_DEBUG_OBJECTS flag set. This flag
  826. * prevents that the debug code is called on kmem_cache_free() for the
  827. * debug tracker objects to avoid recursive calls.
  828. */
  829. void __init debug_objects_mem_init(void)
  830. {
  831. if (!debug_objects_enabled)
  832. return;
  833. obj_cache = kmem_cache_create("debug_objects_cache",
  834. sizeof (struct debug_obj), 0,
  835. SLAB_DEBUG_OBJECTS, NULL);
  836. if (!obj_cache || debug_objects_replace_static_objects()) {
  837. debug_objects_enabled = 0;
  838. if (obj_cache)
  839. kmem_cache_destroy(obj_cache);
  840. printk(KERN_WARNING "ODEBUG: out of memory.\n");
  841. } else
  842. debug_objects_selftest();
  843. }