debugobjects.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995
  1. /*
  2. * Generic infrastructure for lifetime debugging of objects.
  3. *
  4. * Started by Thomas Gleixner
  5. *
  6. * Copyright (C) 2008, Thomas Gleixner <tglx@linutronix.de>
  7. *
  8. * For licencing details see kernel-base/COPYING
  9. */
  10. #include <linux/debugobjects.h>
  11. #include <linux/interrupt.h>
  12. #include <linux/sched.h>
  13. #include <linux/seq_file.h>
  14. #include <linux/debugfs.h>
  15. #include <linux/hash.h>
  16. #define ODEBUG_HASH_BITS 14
  17. #define ODEBUG_HASH_SIZE (1 << ODEBUG_HASH_BITS)
  18. #define ODEBUG_POOL_SIZE 512
  19. #define ODEBUG_POOL_MIN_LEVEL 256
  20. #define ODEBUG_CHUNK_SHIFT PAGE_SHIFT
  21. #define ODEBUG_CHUNK_SIZE (1 << ODEBUG_CHUNK_SHIFT)
  22. #define ODEBUG_CHUNK_MASK (~(ODEBUG_CHUNK_SIZE - 1))
  23. struct debug_bucket {
  24. struct hlist_head list;
  25. raw_spinlock_t lock;
  26. };
  27. static struct debug_bucket obj_hash[ODEBUG_HASH_SIZE];
  28. static struct debug_obj obj_static_pool[ODEBUG_POOL_SIZE] __initdata;
  29. static DEFINE_RAW_SPINLOCK(pool_lock);
  30. static HLIST_HEAD(obj_pool);
  31. static int obj_pool_min_free = ODEBUG_POOL_SIZE;
  32. static int obj_pool_free = ODEBUG_POOL_SIZE;
  33. static int obj_pool_used;
  34. static int obj_pool_max_used;
  35. static struct kmem_cache *obj_cache;
  36. static int debug_objects_maxchain __read_mostly;
  37. static int debug_objects_fixups __read_mostly;
  38. static int debug_objects_warnings __read_mostly;
  39. static int debug_objects_enabled __read_mostly
  40. = CONFIG_DEBUG_OBJECTS_ENABLE_DEFAULT;
  41. static struct debug_obj_descr *descr_test __read_mostly;
  42. static void free_obj_work(struct work_struct *work);
  43. static DECLARE_WORK(debug_obj_work, free_obj_work);
  44. static int __init enable_object_debug(char *str)
  45. {
  46. debug_objects_enabled = 1;
  47. return 0;
  48. }
  49. static int __init disable_object_debug(char *str)
  50. {
  51. debug_objects_enabled = 0;
  52. return 0;
  53. }
  54. early_param("debug_objects", enable_object_debug);
  55. early_param("no_debug_objects", disable_object_debug);
  56. static const char *obj_states[ODEBUG_STATE_MAX] = {
  57. [ODEBUG_STATE_NONE] = "none",
  58. [ODEBUG_STATE_INIT] = "initialized",
  59. [ODEBUG_STATE_INACTIVE] = "inactive",
  60. [ODEBUG_STATE_ACTIVE] = "active",
  61. [ODEBUG_STATE_DESTROYED] = "destroyed",
  62. [ODEBUG_STATE_NOTAVAILABLE] = "not available",
  63. };
  64. static int fill_pool(void)
  65. {
  66. gfp_t gfp = GFP_ATOMIC | __GFP_NORETRY | __GFP_NOWARN;
  67. struct debug_obj *new;
  68. unsigned long flags;
  69. if (likely(obj_pool_free >= ODEBUG_POOL_MIN_LEVEL))
  70. return obj_pool_free;
  71. if (unlikely(!obj_cache))
  72. return obj_pool_free;
  73. while (obj_pool_free < ODEBUG_POOL_MIN_LEVEL) {
  74. new = kmem_cache_zalloc(obj_cache, gfp);
  75. if (!new)
  76. return obj_pool_free;
  77. raw_spin_lock_irqsave(&pool_lock, flags);
  78. hlist_add_head(&new->node, &obj_pool);
  79. obj_pool_free++;
  80. raw_spin_unlock_irqrestore(&pool_lock, flags);
  81. }
  82. return obj_pool_free;
  83. }
  84. /*
  85. * Lookup an object in the hash bucket.
  86. */
  87. static struct debug_obj *lookup_object(void *addr, struct debug_bucket *b)
  88. {
  89. struct hlist_node *node;
  90. struct debug_obj *obj;
  91. int cnt = 0;
  92. hlist_for_each_entry(obj, node, &b->list, node) {
  93. cnt++;
  94. if (obj->object == addr)
  95. return obj;
  96. }
  97. if (cnt > debug_objects_maxchain)
  98. debug_objects_maxchain = cnt;
  99. return NULL;
  100. }
  101. /*
  102. * Allocate a new object. If the pool is empty, switch off the debugger.
  103. * Must be called with interrupts disabled.
  104. */
  105. static struct debug_obj *
  106. alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr)
  107. {
  108. struct debug_obj *obj = NULL;
  109. raw_spin_lock(&pool_lock);
  110. if (obj_pool.first) {
  111. obj = hlist_entry(obj_pool.first, typeof(*obj), node);
  112. obj->object = addr;
  113. obj->descr = descr;
  114. obj->state = ODEBUG_STATE_NONE;
  115. hlist_del(&obj->node);
  116. hlist_add_head(&obj->node, &b->list);
  117. obj_pool_used++;
  118. if (obj_pool_used > obj_pool_max_used)
  119. obj_pool_max_used = obj_pool_used;
  120. obj_pool_free--;
  121. if (obj_pool_free < obj_pool_min_free)
  122. obj_pool_min_free = obj_pool_free;
  123. }
  124. raw_spin_unlock(&pool_lock);
  125. return obj;
  126. }
  127. /*
  128. * workqueue function to free objects.
  129. */
  130. static void free_obj_work(struct work_struct *work)
  131. {
  132. struct debug_obj *obj;
  133. unsigned long flags;
  134. raw_spin_lock_irqsave(&pool_lock, flags);
  135. while (obj_pool_free > ODEBUG_POOL_SIZE) {
  136. obj = hlist_entry(obj_pool.first, typeof(*obj), node);
  137. hlist_del(&obj->node);
  138. obj_pool_free--;
  139. /*
  140. * We release pool_lock across kmem_cache_free() to
  141. * avoid contention on pool_lock.
  142. */
  143. raw_spin_unlock_irqrestore(&pool_lock, flags);
  144. kmem_cache_free(obj_cache, obj);
  145. raw_spin_lock_irqsave(&pool_lock, flags);
  146. }
  147. raw_spin_unlock_irqrestore(&pool_lock, flags);
  148. }
  149. /*
  150. * Put the object back into the pool and schedule work to free objects
  151. * if necessary.
  152. */
  153. static void free_object(struct debug_obj *obj)
  154. {
  155. unsigned long flags;
  156. int sched = 0;
  157. raw_spin_lock_irqsave(&pool_lock, flags);
  158. /*
  159. * schedule work when the pool is filled and the cache is
  160. * initialized:
  161. */
  162. if (obj_pool_free > ODEBUG_POOL_SIZE && obj_cache)
  163. sched = !work_pending(&debug_obj_work);
  164. hlist_add_head(&obj->node, &obj_pool);
  165. obj_pool_free++;
  166. obj_pool_used--;
  167. raw_spin_unlock_irqrestore(&pool_lock, flags);
  168. if (sched)
  169. schedule_work(&debug_obj_work);
  170. }
  171. /*
  172. * We run out of memory. That means we probably have tons of objects
  173. * allocated.
  174. */
  175. static void debug_objects_oom(void)
  176. {
  177. struct debug_bucket *db = obj_hash;
  178. struct hlist_node *node, *tmp;
  179. HLIST_HEAD(freelist);
  180. struct debug_obj *obj;
  181. unsigned long flags;
  182. int i;
  183. printk(KERN_WARNING "ODEBUG: Out of memory. ODEBUG disabled\n");
  184. for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
  185. raw_spin_lock_irqsave(&db->lock, flags);
  186. hlist_move_list(&db->list, &freelist);
  187. raw_spin_unlock_irqrestore(&db->lock, flags);
  188. /* Now free them */
  189. hlist_for_each_entry_safe(obj, node, tmp, &freelist, node) {
  190. hlist_del(&obj->node);
  191. free_object(obj);
  192. }
  193. }
  194. }
  195. /*
  196. * We use the pfn of the address for the hash. That way we can check
  197. * for freed objects simply by checking the affected bucket.
  198. */
  199. static struct debug_bucket *get_bucket(unsigned long addr)
  200. {
  201. unsigned long hash;
  202. hash = hash_long((addr >> ODEBUG_CHUNK_SHIFT), ODEBUG_HASH_BITS);
  203. return &obj_hash[hash];
  204. }
  205. static void debug_print_object(struct debug_obj *obj, char *msg)
  206. {
  207. static int limit;
  208. if (limit < 5 && obj->descr != descr_test) {
  209. limit++;
  210. WARN(1, KERN_ERR "ODEBUG: %s %s object type: %s\n", msg,
  211. obj_states[obj->state], obj->descr->name);
  212. }
  213. debug_objects_warnings++;
  214. }
  215. /*
  216. * Try to repair the damage, so we have a better chance to get useful
  217. * debug output.
  218. */
  219. static void
  220. debug_object_fixup(int (*fixup)(void *addr, enum debug_obj_state state),
  221. void * addr, enum debug_obj_state state)
  222. {
  223. if (fixup)
  224. debug_objects_fixups += fixup(addr, state);
  225. }
  226. static void debug_object_is_on_stack(void *addr, int onstack)
  227. {
  228. int is_on_stack;
  229. static int limit;
  230. if (limit > 4)
  231. return;
  232. is_on_stack = object_is_on_stack(addr);
  233. if (is_on_stack == onstack)
  234. return;
  235. limit++;
  236. if (is_on_stack)
  237. printk(KERN_WARNING
  238. "ODEBUG: object is on stack, but not annotated\n");
  239. else
  240. printk(KERN_WARNING
  241. "ODEBUG: object is not on stack, but annotated\n");
  242. WARN_ON(1);
  243. }
  244. static void
  245. __debug_object_init(void *addr, struct debug_obj_descr *descr, int onstack)
  246. {
  247. enum debug_obj_state state;
  248. struct debug_bucket *db;
  249. struct debug_obj *obj;
  250. unsigned long flags;
  251. fill_pool();
  252. db = get_bucket((unsigned long) addr);
  253. raw_spin_lock_irqsave(&db->lock, flags);
  254. obj = lookup_object(addr, db);
  255. if (!obj) {
  256. obj = alloc_object(addr, db, descr);
  257. if (!obj) {
  258. debug_objects_enabled = 0;
  259. raw_spin_unlock_irqrestore(&db->lock, flags);
  260. debug_objects_oom();
  261. return;
  262. }
  263. debug_object_is_on_stack(addr, onstack);
  264. }
  265. switch (obj->state) {
  266. case ODEBUG_STATE_NONE:
  267. case ODEBUG_STATE_INIT:
  268. case ODEBUG_STATE_INACTIVE:
  269. obj->state = ODEBUG_STATE_INIT;
  270. break;
  271. case ODEBUG_STATE_ACTIVE:
  272. debug_print_object(obj, "init");
  273. state = obj->state;
  274. raw_spin_unlock_irqrestore(&db->lock, flags);
  275. debug_object_fixup(descr->fixup_init, addr, state);
  276. return;
  277. case ODEBUG_STATE_DESTROYED:
  278. debug_print_object(obj, "init");
  279. break;
  280. default:
  281. break;
  282. }
  283. raw_spin_unlock_irqrestore(&db->lock, flags);
  284. }
  285. /**
  286. * debug_object_init - debug checks when an object is initialized
  287. * @addr: address of the object
  288. * @descr: pointer to an object specific debug description structure
  289. */
  290. void debug_object_init(void *addr, struct debug_obj_descr *descr)
  291. {
  292. if (!debug_objects_enabled)
  293. return;
  294. __debug_object_init(addr, descr, 0);
  295. }
  296. /**
  297. * debug_object_init_on_stack - debug checks when an object on stack is
  298. * initialized
  299. * @addr: address of the object
  300. * @descr: pointer to an object specific debug description structure
  301. */
  302. void debug_object_init_on_stack(void *addr, struct debug_obj_descr *descr)
  303. {
  304. if (!debug_objects_enabled)
  305. return;
  306. __debug_object_init(addr, descr, 1);
  307. }
  308. /**
  309. * debug_object_activate - debug checks when an object is activated
  310. * @addr: address of the object
  311. * @descr: pointer to an object specific debug description structure
  312. */
  313. void debug_object_activate(void *addr, struct debug_obj_descr *descr)
  314. {
  315. enum debug_obj_state state;
  316. struct debug_bucket *db;
  317. struct debug_obj *obj;
  318. unsigned long flags;
  319. if (!debug_objects_enabled)
  320. return;
  321. db = get_bucket((unsigned long) addr);
  322. raw_spin_lock_irqsave(&db->lock, flags);
  323. obj = lookup_object(addr, db);
  324. if (obj) {
  325. switch (obj->state) {
  326. case ODEBUG_STATE_INIT:
  327. case ODEBUG_STATE_INACTIVE:
  328. obj->state = ODEBUG_STATE_ACTIVE;
  329. break;
  330. case ODEBUG_STATE_ACTIVE:
  331. debug_print_object(obj, "activate");
  332. state = obj->state;
  333. raw_spin_unlock_irqrestore(&db->lock, flags);
  334. debug_object_fixup(descr->fixup_activate, addr, state);
  335. return;
  336. case ODEBUG_STATE_DESTROYED:
  337. debug_print_object(obj, "activate");
  338. break;
  339. default:
  340. break;
  341. }
  342. raw_spin_unlock_irqrestore(&db->lock, flags);
  343. return;
  344. }
  345. raw_spin_unlock_irqrestore(&db->lock, flags);
  346. /*
  347. * This happens when a static object is activated. We
  348. * let the type specific code decide whether this is
  349. * true or not.
  350. */
  351. debug_object_fixup(descr->fixup_activate, addr,
  352. ODEBUG_STATE_NOTAVAILABLE);
  353. }
  354. /**
  355. * debug_object_deactivate - debug checks when an object is deactivated
  356. * @addr: address of the object
  357. * @descr: pointer to an object specific debug description structure
  358. */
  359. void debug_object_deactivate(void *addr, struct debug_obj_descr *descr)
  360. {
  361. struct debug_bucket *db;
  362. struct debug_obj *obj;
  363. unsigned long flags;
  364. if (!debug_objects_enabled)
  365. return;
  366. db = get_bucket((unsigned long) addr);
  367. raw_spin_lock_irqsave(&db->lock, flags);
  368. obj = lookup_object(addr, db);
  369. if (obj) {
  370. switch (obj->state) {
  371. case ODEBUG_STATE_INIT:
  372. case ODEBUG_STATE_INACTIVE:
  373. case ODEBUG_STATE_ACTIVE:
  374. obj->state = ODEBUG_STATE_INACTIVE;
  375. break;
  376. case ODEBUG_STATE_DESTROYED:
  377. debug_print_object(obj, "deactivate");
  378. break;
  379. default:
  380. break;
  381. }
  382. } else {
  383. struct debug_obj o = { .object = addr,
  384. .state = ODEBUG_STATE_NOTAVAILABLE,
  385. .descr = descr };
  386. debug_print_object(&o, "deactivate");
  387. }
  388. raw_spin_unlock_irqrestore(&db->lock, flags);
  389. }
  390. /**
  391. * debug_object_destroy - debug checks when an object is destroyed
  392. * @addr: address of the object
  393. * @descr: pointer to an object specific debug description structure
  394. */
  395. void debug_object_destroy(void *addr, struct debug_obj_descr *descr)
  396. {
  397. enum debug_obj_state state;
  398. struct debug_bucket *db;
  399. struct debug_obj *obj;
  400. unsigned long flags;
  401. if (!debug_objects_enabled)
  402. return;
  403. db = get_bucket((unsigned long) addr);
  404. raw_spin_lock_irqsave(&db->lock, flags);
  405. obj = lookup_object(addr, db);
  406. if (!obj)
  407. goto out_unlock;
  408. switch (obj->state) {
  409. case ODEBUG_STATE_NONE:
  410. case ODEBUG_STATE_INIT:
  411. case ODEBUG_STATE_INACTIVE:
  412. obj->state = ODEBUG_STATE_DESTROYED;
  413. break;
  414. case ODEBUG_STATE_ACTIVE:
  415. debug_print_object(obj, "destroy");
  416. state = obj->state;
  417. raw_spin_unlock_irqrestore(&db->lock, flags);
  418. debug_object_fixup(descr->fixup_destroy, addr, state);
  419. return;
  420. case ODEBUG_STATE_DESTROYED:
  421. debug_print_object(obj, "destroy");
  422. break;
  423. default:
  424. break;
  425. }
  426. out_unlock:
  427. raw_spin_unlock_irqrestore(&db->lock, flags);
  428. }
  429. /**
  430. * debug_object_free - debug checks when an object is freed
  431. * @addr: address of the object
  432. * @descr: pointer to an object specific debug description structure
  433. */
  434. void debug_object_free(void *addr, struct debug_obj_descr *descr)
  435. {
  436. enum debug_obj_state state;
  437. struct debug_bucket *db;
  438. struct debug_obj *obj;
  439. unsigned long flags;
  440. if (!debug_objects_enabled)
  441. return;
  442. db = get_bucket((unsigned long) addr);
  443. raw_spin_lock_irqsave(&db->lock, flags);
  444. obj = lookup_object(addr, db);
  445. if (!obj)
  446. goto out_unlock;
  447. switch (obj->state) {
  448. case ODEBUG_STATE_ACTIVE:
  449. debug_print_object(obj, "free");
  450. state = obj->state;
  451. raw_spin_unlock_irqrestore(&db->lock, flags);
  452. debug_object_fixup(descr->fixup_free, addr, state);
  453. return;
  454. default:
  455. hlist_del(&obj->node);
  456. raw_spin_unlock_irqrestore(&db->lock, flags);
  457. free_object(obj);
  458. return;
  459. }
  460. out_unlock:
  461. raw_spin_unlock_irqrestore(&db->lock, flags);
  462. }
  463. #ifdef CONFIG_DEBUG_OBJECTS_FREE
  464. static void __debug_check_no_obj_freed(const void *address, unsigned long size)
  465. {
  466. unsigned long flags, oaddr, saddr, eaddr, paddr, chunks;
  467. struct hlist_node *node, *tmp;
  468. HLIST_HEAD(freelist);
  469. struct debug_obj_descr *descr;
  470. enum debug_obj_state state;
  471. struct debug_bucket *db;
  472. struct debug_obj *obj;
  473. int cnt;
  474. saddr = (unsigned long) address;
  475. eaddr = saddr + size;
  476. paddr = saddr & ODEBUG_CHUNK_MASK;
  477. chunks = ((eaddr - paddr) + (ODEBUG_CHUNK_SIZE - 1));
  478. chunks >>= ODEBUG_CHUNK_SHIFT;
  479. for (;chunks > 0; chunks--, paddr += ODEBUG_CHUNK_SIZE) {
  480. db = get_bucket(paddr);
  481. repeat:
  482. cnt = 0;
  483. raw_spin_lock_irqsave(&db->lock, flags);
  484. hlist_for_each_entry_safe(obj, node, tmp, &db->list, node) {
  485. cnt++;
  486. oaddr = (unsigned long) obj->object;
  487. if (oaddr < saddr || oaddr >= eaddr)
  488. continue;
  489. switch (obj->state) {
  490. case ODEBUG_STATE_ACTIVE:
  491. debug_print_object(obj, "free");
  492. descr = obj->descr;
  493. state = obj->state;
  494. raw_spin_unlock_irqrestore(&db->lock, flags);
  495. debug_object_fixup(descr->fixup_free,
  496. (void *) oaddr, state);
  497. goto repeat;
  498. default:
  499. hlist_del(&obj->node);
  500. hlist_add_head(&obj->node, &freelist);
  501. break;
  502. }
  503. }
  504. raw_spin_unlock_irqrestore(&db->lock, flags);
  505. /* Now free them */
  506. hlist_for_each_entry_safe(obj, node, tmp, &freelist, node) {
  507. hlist_del(&obj->node);
  508. free_object(obj);
  509. }
  510. if (cnt > debug_objects_maxchain)
  511. debug_objects_maxchain = cnt;
  512. }
  513. }
  514. void debug_check_no_obj_freed(const void *address, unsigned long size)
  515. {
  516. if (debug_objects_enabled)
  517. __debug_check_no_obj_freed(address, size);
  518. }
  519. #endif
  520. #ifdef CONFIG_DEBUG_FS
  521. static int debug_stats_show(struct seq_file *m, void *v)
  522. {
  523. seq_printf(m, "max_chain :%d\n", debug_objects_maxchain);
  524. seq_printf(m, "warnings :%d\n", debug_objects_warnings);
  525. seq_printf(m, "fixups :%d\n", debug_objects_fixups);
  526. seq_printf(m, "pool_free :%d\n", obj_pool_free);
  527. seq_printf(m, "pool_min_free :%d\n", obj_pool_min_free);
  528. seq_printf(m, "pool_used :%d\n", obj_pool_used);
  529. seq_printf(m, "pool_max_used :%d\n", obj_pool_max_used);
  530. return 0;
  531. }
  532. static int debug_stats_open(struct inode *inode, struct file *filp)
  533. {
  534. return single_open(filp, debug_stats_show, NULL);
  535. }
  536. static const struct file_operations debug_stats_fops = {
  537. .open = debug_stats_open,
  538. .read = seq_read,
  539. .llseek = seq_lseek,
  540. .release = single_release,
  541. };
  542. static int __init debug_objects_init_debugfs(void)
  543. {
  544. struct dentry *dbgdir, *dbgstats;
  545. if (!debug_objects_enabled)
  546. return 0;
  547. dbgdir = debugfs_create_dir("debug_objects", NULL);
  548. if (!dbgdir)
  549. return -ENOMEM;
  550. dbgstats = debugfs_create_file("stats", 0444, dbgdir, NULL,
  551. &debug_stats_fops);
  552. if (!dbgstats)
  553. goto err;
  554. return 0;
  555. err:
  556. debugfs_remove(dbgdir);
  557. return -ENOMEM;
  558. }
  559. __initcall(debug_objects_init_debugfs);
  560. #else
  561. static inline void debug_objects_init_debugfs(void) { }
  562. #endif
  563. #ifdef CONFIG_DEBUG_OBJECTS_SELFTEST
  564. /* Random data structure for the self test */
  565. struct self_test {
  566. unsigned long dummy1[6];
  567. int static_init;
  568. unsigned long dummy2[3];
  569. };
  570. static __initdata struct debug_obj_descr descr_type_test;
  571. /*
  572. * fixup_init is called when:
  573. * - an active object is initialized
  574. */
  575. static int __init fixup_init(void *addr, enum debug_obj_state state)
  576. {
  577. struct self_test *obj = addr;
  578. switch (state) {
  579. case ODEBUG_STATE_ACTIVE:
  580. debug_object_deactivate(obj, &descr_type_test);
  581. debug_object_init(obj, &descr_type_test);
  582. return 1;
  583. default:
  584. return 0;
  585. }
  586. }
  587. /*
  588. * fixup_activate is called when:
  589. * - an active object is activated
  590. * - an unknown object is activated (might be a statically initialized object)
  591. */
  592. static int __init fixup_activate(void *addr, enum debug_obj_state state)
  593. {
  594. struct self_test *obj = addr;
  595. switch (state) {
  596. case ODEBUG_STATE_NOTAVAILABLE:
  597. if (obj->static_init == 1) {
  598. debug_object_init(obj, &descr_type_test);
  599. debug_object_activate(obj, &descr_type_test);
  600. /*
  601. * Real code should return 0 here ! This is
  602. * not a fixup of some bad behaviour. We
  603. * merily call the debug_init function to keep
  604. * track of the object.
  605. */
  606. return 1;
  607. } else {
  608. /* Real code needs to emit a warning here */
  609. }
  610. return 0;
  611. case ODEBUG_STATE_ACTIVE:
  612. debug_object_deactivate(obj, &descr_type_test);
  613. debug_object_activate(obj, &descr_type_test);
  614. return 1;
  615. default:
  616. return 0;
  617. }
  618. }
  619. /*
  620. * fixup_destroy is called when:
  621. * - an active object is destroyed
  622. */
  623. static int __init fixup_destroy(void *addr, enum debug_obj_state state)
  624. {
  625. struct self_test *obj = addr;
  626. switch (state) {
  627. case ODEBUG_STATE_ACTIVE:
  628. debug_object_deactivate(obj, &descr_type_test);
  629. debug_object_destroy(obj, &descr_type_test);
  630. return 1;
  631. default:
  632. return 0;
  633. }
  634. }
  635. /*
  636. * fixup_free is called when:
  637. * - an active object is freed
  638. */
  639. static int __init fixup_free(void *addr, enum debug_obj_state state)
  640. {
  641. struct self_test *obj = addr;
  642. switch (state) {
  643. case ODEBUG_STATE_ACTIVE:
  644. debug_object_deactivate(obj, &descr_type_test);
  645. debug_object_free(obj, &descr_type_test);
  646. return 1;
  647. default:
  648. return 0;
  649. }
  650. }
  651. static int __init
  652. check_results(void *addr, enum debug_obj_state state, int fixups, int warnings)
  653. {
  654. struct debug_bucket *db;
  655. struct debug_obj *obj;
  656. unsigned long flags;
  657. int res = -EINVAL;
  658. db = get_bucket((unsigned long) addr);
  659. raw_spin_lock_irqsave(&db->lock, flags);
  660. obj = lookup_object(addr, db);
  661. if (!obj && state != ODEBUG_STATE_NONE) {
  662. WARN(1, KERN_ERR "ODEBUG: selftest object not found\n");
  663. goto out;
  664. }
  665. if (obj && obj->state != state) {
  666. WARN(1, KERN_ERR "ODEBUG: selftest wrong state: %d != %d\n",
  667. obj->state, state);
  668. goto out;
  669. }
  670. if (fixups != debug_objects_fixups) {
  671. WARN(1, KERN_ERR "ODEBUG: selftest fixups failed %d != %d\n",
  672. fixups, debug_objects_fixups);
  673. goto out;
  674. }
  675. if (warnings != debug_objects_warnings) {
  676. WARN(1, KERN_ERR "ODEBUG: selftest warnings failed %d != %d\n",
  677. warnings, debug_objects_warnings);
  678. goto out;
  679. }
  680. res = 0;
  681. out:
  682. raw_spin_unlock_irqrestore(&db->lock, flags);
  683. if (res)
  684. debug_objects_enabled = 0;
  685. return res;
  686. }
  687. static __initdata struct debug_obj_descr descr_type_test = {
  688. .name = "selftest",
  689. .fixup_init = fixup_init,
  690. .fixup_activate = fixup_activate,
  691. .fixup_destroy = fixup_destroy,
  692. .fixup_free = fixup_free,
  693. };
  694. static __initdata struct self_test obj = { .static_init = 0 };
  695. static void __init debug_objects_selftest(void)
  696. {
  697. int fixups, oldfixups, warnings, oldwarnings;
  698. unsigned long flags;
  699. local_irq_save(flags);
  700. fixups = oldfixups = debug_objects_fixups;
  701. warnings = oldwarnings = debug_objects_warnings;
  702. descr_test = &descr_type_test;
  703. debug_object_init(&obj, &descr_type_test);
  704. if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings))
  705. goto out;
  706. debug_object_activate(&obj, &descr_type_test);
  707. if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
  708. goto out;
  709. debug_object_activate(&obj, &descr_type_test);
  710. if (check_results(&obj, ODEBUG_STATE_ACTIVE, ++fixups, ++warnings))
  711. goto out;
  712. debug_object_deactivate(&obj, &descr_type_test);
  713. if (check_results(&obj, ODEBUG_STATE_INACTIVE, fixups, warnings))
  714. goto out;
  715. debug_object_destroy(&obj, &descr_type_test);
  716. if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, warnings))
  717. goto out;
  718. debug_object_init(&obj, &descr_type_test);
  719. if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
  720. goto out;
  721. debug_object_activate(&obj, &descr_type_test);
  722. if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
  723. goto out;
  724. debug_object_deactivate(&obj, &descr_type_test);
  725. if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
  726. goto out;
  727. debug_object_free(&obj, &descr_type_test);
  728. if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings))
  729. goto out;
  730. obj.static_init = 1;
  731. debug_object_activate(&obj, &descr_type_test);
  732. if (check_results(&obj, ODEBUG_STATE_ACTIVE, ++fixups, warnings))
  733. goto out;
  734. debug_object_init(&obj, &descr_type_test);
  735. if (check_results(&obj, ODEBUG_STATE_INIT, ++fixups, ++warnings))
  736. goto out;
  737. debug_object_free(&obj, &descr_type_test);
  738. if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings))
  739. goto out;
  740. #ifdef CONFIG_DEBUG_OBJECTS_FREE
  741. debug_object_init(&obj, &descr_type_test);
  742. if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings))
  743. goto out;
  744. debug_object_activate(&obj, &descr_type_test);
  745. if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
  746. goto out;
  747. __debug_check_no_obj_freed(&obj, sizeof(obj));
  748. if (check_results(&obj, ODEBUG_STATE_NONE, ++fixups, ++warnings))
  749. goto out;
  750. #endif
  751. printk(KERN_INFO "ODEBUG: selftest passed\n");
  752. out:
  753. debug_objects_fixups = oldfixups;
  754. debug_objects_warnings = oldwarnings;
  755. descr_test = NULL;
  756. local_irq_restore(flags);
  757. }
  758. #else
  759. static inline void debug_objects_selftest(void) { }
  760. #endif
  761. /*
  762. * Called during early boot to initialize the hash buckets and link
  763. * the static object pool objects into the poll list. After this call
  764. * the object tracker is fully operational.
  765. */
  766. void __init debug_objects_early_init(void)
  767. {
  768. int i;
  769. for (i = 0; i < ODEBUG_HASH_SIZE; i++)
  770. raw_spin_lock_init(&obj_hash[i].lock);
  771. for (i = 0; i < ODEBUG_POOL_SIZE; i++)
  772. hlist_add_head(&obj_static_pool[i].node, &obj_pool);
  773. }
  774. /*
  775. * Convert the statically allocated objects to dynamic ones:
  776. */
  777. static int __init debug_objects_replace_static_objects(void)
  778. {
  779. struct debug_bucket *db = obj_hash;
  780. struct hlist_node *node, *tmp;
  781. struct debug_obj *obj, *new;
  782. HLIST_HEAD(objects);
  783. int i, cnt = 0;
  784. for (i = 0; i < ODEBUG_POOL_SIZE; i++) {
  785. obj = kmem_cache_zalloc(obj_cache, GFP_KERNEL);
  786. if (!obj)
  787. goto free;
  788. hlist_add_head(&obj->node, &objects);
  789. }
  790. /*
  791. * When debug_objects_mem_init() is called we know that only
  792. * one CPU is up, so disabling interrupts is enough
  793. * protection. This avoids the lockdep hell of lock ordering.
  794. */
  795. local_irq_disable();
  796. /* Remove the statically allocated objects from the pool */
  797. hlist_for_each_entry_safe(obj, node, tmp, &obj_pool, node)
  798. hlist_del(&obj->node);
  799. /* Move the allocated objects to the pool */
  800. hlist_move_list(&objects, &obj_pool);
  801. /* Replace the active object references */
  802. for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
  803. hlist_move_list(&db->list, &objects);
  804. hlist_for_each_entry(obj, node, &objects, node) {
  805. new = hlist_entry(obj_pool.first, typeof(*obj), node);
  806. hlist_del(&new->node);
  807. /* copy object data */
  808. *new = *obj;
  809. hlist_add_head(&new->node, &db->list);
  810. cnt++;
  811. }
  812. }
  813. printk(KERN_DEBUG "ODEBUG: %d of %d active objects replaced\n", cnt,
  814. obj_pool_used);
  815. local_irq_enable();
  816. return 0;
  817. free:
  818. hlist_for_each_entry_safe(obj, node, tmp, &objects, node) {
  819. hlist_del(&obj->node);
  820. kmem_cache_free(obj_cache, obj);
  821. }
  822. return -ENOMEM;
  823. }
  824. /*
  825. * Called after the kmem_caches are functional to setup a dedicated
  826. * cache pool, which has the SLAB_DEBUG_OBJECTS flag set. This flag
  827. * prevents that the debug code is called on kmem_cache_free() for the
  828. * debug tracker objects to avoid recursive calls.
  829. */
  830. void __init debug_objects_mem_init(void)
  831. {
  832. if (!debug_objects_enabled)
  833. return;
  834. obj_cache = kmem_cache_create("debug_objects_cache",
  835. sizeof (struct debug_obj), 0,
  836. SLAB_DEBUG_OBJECTS, NULL);
  837. if (!obj_cache || debug_objects_replace_static_objects()) {
  838. debug_objects_enabled = 0;
  839. if (obj_cache)
  840. kmem_cache_destroy(obj_cache);
  841. printk(KERN_WARNING "ODEBUG: out of memory.\n");
  842. } else
  843. debug_objects_selftest();
  844. }