debugobjects.c 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089
  1. /*
  2. * Generic infrastructure for lifetime debugging of objects.
  3. *
  4. * Started by Thomas Gleixner
  5. *
  6. * Copyright (C) 2008, Thomas Gleixner <tglx@linutronix.de>
  7. *
  8. * For licencing details see kernel-base/COPYING
  9. */
  10. #include <linux/debugobjects.h>
  11. #include <linux/interrupt.h>
  12. #include <linux/sched.h>
  13. #include <linux/seq_file.h>
  14. #include <linux/debugfs.h>
  15. #include <linux/slab.h>
  16. #include <linux/hash.h>
  17. #define ODEBUG_HASH_BITS 14
  18. #define ODEBUG_HASH_SIZE (1 << ODEBUG_HASH_BITS)
  19. #define ODEBUG_POOL_SIZE 512
  20. #define ODEBUG_POOL_MIN_LEVEL 256
  21. #define ODEBUG_CHUNK_SHIFT PAGE_SHIFT
  22. #define ODEBUG_CHUNK_SIZE (1 << ODEBUG_CHUNK_SHIFT)
  23. #define ODEBUG_CHUNK_MASK (~(ODEBUG_CHUNK_SIZE - 1))
  24. struct debug_bucket {
  25. struct hlist_head list;
  26. raw_spinlock_t lock;
  27. };
  28. static struct debug_bucket obj_hash[ODEBUG_HASH_SIZE];
  29. static struct debug_obj obj_static_pool[ODEBUG_POOL_SIZE] __initdata;
  30. static DEFINE_RAW_SPINLOCK(pool_lock);
  31. static HLIST_HEAD(obj_pool);
  32. static int obj_pool_min_free = ODEBUG_POOL_SIZE;
  33. static int obj_pool_free = ODEBUG_POOL_SIZE;
  34. static int obj_pool_used;
  35. static int obj_pool_max_used;
  36. static struct kmem_cache *obj_cache;
  37. static int debug_objects_maxchain __read_mostly;
  38. static int debug_objects_fixups __read_mostly;
  39. static int debug_objects_warnings __read_mostly;
  40. static int debug_objects_enabled __read_mostly
  41. = CONFIG_DEBUG_OBJECTS_ENABLE_DEFAULT;
  42. static struct debug_obj_descr *descr_test __read_mostly;
  43. static void free_obj_work(struct work_struct *work);
  44. static DECLARE_WORK(debug_obj_work, free_obj_work);
  45. static int __init enable_object_debug(char *str)
  46. {
  47. debug_objects_enabled = 1;
  48. return 0;
  49. }
  50. static int __init disable_object_debug(char *str)
  51. {
  52. debug_objects_enabled = 0;
  53. return 0;
  54. }
  55. early_param("debug_objects", enable_object_debug);
  56. early_param("no_debug_objects", disable_object_debug);
  57. static const char *obj_states[ODEBUG_STATE_MAX] = {
  58. [ODEBUG_STATE_NONE] = "none",
  59. [ODEBUG_STATE_INIT] = "initialized",
  60. [ODEBUG_STATE_INACTIVE] = "inactive",
  61. [ODEBUG_STATE_ACTIVE] = "active",
  62. [ODEBUG_STATE_DESTROYED] = "destroyed",
  63. [ODEBUG_STATE_NOTAVAILABLE] = "not available",
  64. };
  65. static void fill_pool(void)
  66. {
  67. gfp_t gfp = GFP_ATOMIC | __GFP_NORETRY | __GFP_NOWARN;
  68. struct debug_obj *new;
  69. unsigned long flags;
  70. if (likely(obj_pool_free >= ODEBUG_POOL_MIN_LEVEL))
  71. return;
  72. if (unlikely(!obj_cache))
  73. return;
  74. while (obj_pool_free < ODEBUG_POOL_MIN_LEVEL) {
  75. new = kmem_cache_zalloc(obj_cache, gfp);
  76. if (!new)
  77. return;
  78. raw_spin_lock_irqsave(&pool_lock, flags);
  79. hlist_add_head(&new->node, &obj_pool);
  80. obj_pool_free++;
  81. raw_spin_unlock_irqrestore(&pool_lock, flags);
  82. }
  83. }
  84. /*
  85. * Lookup an object in the hash bucket.
  86. */
  87. static struct debug_obj *lookup_object(void *addr, struct debug_bucket *b)
  88. {
  89. struct hlist_node *node;
  90. struct debug_obj *obj;
  91. int cnt = 0;
  92. hlist_for_each_entry(obj, node, &b->list, node) {
  93. cnt++;
  94. if (obj->object == addr)
  95. return obj;
  96. }
  97. if (cnt > debug_objects_maxchain)
  98. debug_objects_maxchain = cnt;
  99. return NULL;
  100. }
  101. /*
  102. * Allocate a new object. If the pool is empty, switch off the debugger.
  103. * Must be called with interrupts disabled.
  104. */
  105. static struct debug_obj *
  106. alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr)
  107. {
  108. struct debug_obj *obj = NULL;
  109. raw_spin_lock(&pool_lock);
  110. if (obj_pool.first) {
  111. obj = hlist_entry(obj_pool.first, typeof(*obj), node);
  112. obj->object = addr;
  113. obj->descr = descr;
  114. obj->state = ODEBUG_STATE_NONE;
  115. obj->astate = 0;
  116. hlist_del(&obj->node);
  117. hlist_add_head(&obj->node, &b->list);
  118. obj_pool_used++;
  119. if (obj_pool_used > obj_pool_max_used)
  120. obj_pool_max_used = obj_pool_used;
  121. obj_pool_free--;
  122. if (obj_pool_free < obj_pool_min_free)
  123. obj_pool_min_free = obj_pool_free;
  124. }
  125. raw_spin_unlock(&pool_lock);
  126. return obj;
  127. }
  128. /*
  129. * workqueue function to free objects.
  130. */
  131. static void free_obj_work(struct work_struct *work)
  132. {
  133. struct debug_obj *obj;
  134. unsigned long flags;
  135. raw_spin_lock_irqsave(&pool_lock, flags);
  136. while (obj_pool_free > ODEBUG_POOL_SIZE) {
  137. obj = hlist_entry(obj_pool.first, typeof(*obj), node);
  138. hlist_del(&obj->node);
  139. obj_pool_free--;
  140. /*
  141. * We release pool_lock across kmem_cache_free() to
  142. * avoid contention on pool_lock.
  143. */
  144. raw_spin_unlock_irqrestore(&pool_lock, flags);
  145. kmem_cache_free(obj_cache, obj);
  146. raw_spin_lock_irqsave(&pool_lock, flags);
  147. }
  148. raw_spin_unlock_irqrestore(&pool_lock, flags);
  149. }
  150. /*
  151. * Put the object back into the pool and schedule work to free objects
  152. * if necessary.
  153. */
  154. static void free_object(struct debug_obj *obj)
  155. {
  156. unsigned long flags;
  157. int sched = 0;
  158. raw_spin_lock_irqsave(&pool_lock, flags);
  159. /*
  160. * schedule work when the pool is filled and the cache is
  161. * initialized:
  162. */
  163. if (obj_pool_free > ODEBUG_POOL_SIZE && obj_cache)
  164. sched = keventd_up() && !work_pending(&debug_obj_work);
  165. hlist_add_head(&obj->node, &obj_pool);
  166. obj_pool_free++;
  167. obj_pool_used--;
  168. raw_spin_unlock_irqrestore(&pool_lock, flags);
  169. if (sched)
  170. schedule_work(&debug_obj_work);
  171. }
  172. /*
  173. * We run out of memory. That means we probably have tons of objects
  174. * allocated.
  175. */
  176. static void debug_objects_oom(void)
  177. {
  178. struct debug_bucket *db = obj_hash;
  179. struct hlist_node *node, *tmp;
  180. HLIST_HEAD(freelist);
  181. struct debug_obj *obj;
  182. unsigned long flags;
  183. int i;
  184. printk(KERN_WARNING "ODEBUG: Out of memory. ODEBUG disabled\n");
  185. for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
  186. raw_spin_lock_irqsave(&db->lock, flags);
  187. hlist_move_list(&db->list, &freelist);
  188. raw_spin_unlock_irqrestore(&db->lock, flags);
  189. /* Now free them */
  190. hlist_for_each_entry_safe(obj, node, tmp, &freelist, node) {
  191. hlist_del(&obj->node);
  192. free_object(obj);
  193. }
  194. }
  195. }
  196. /*
  197. * We use the pfn of the address for the hash. That way we can check
  198. * for freed objects simply by checking the affected bucket.
  199. */
  200. static struct debug_bucket *get_bucket(unsigned long addr)
  201. {
  202. unsigned long hash;
  203. hash = hash_long((addr >> ODEBUG_CHUNK_SHIFT), ODEBUG_HASH_BITS);
  204. return &obj_hash[hash];
  205. }
  206. static void debug_print_object(struct debug_obj *obj, char *msg)
  207. {
  208. struct debug_obj_descr *descr = obj->descr;
  209. static int limit;
  210. if (limit < 5 && descr != descr_test) {
  211. void *hint = descr->debug_hint ?
  212. descr->debug_hint(obj->object) : NULL;
  213. limit++;
  214. WARN(1, KERN_ERR "ODEBUG: %s %s (active state %u) "
  215. "object type: %s hint: %pS\n",
  216. msg, obj_states[obj->state], obj->astate,
  217. descr->name, hint);
  218. }
  219. debug_objects_warnings++;
  220. }
  221. /*
  222. * Try to repair the damage, so we have a better chance to get useful
  223. * debug output.
  224. */
  225. static int
  226. debug_object_fixup(int (*fixup)(void *addr, enum debug_obj_state state),
  227. void * addr, enum debug_obj_state state)
  228. {
  229. int fixed = 0;
  230. if (fixup)
  231. fixed = fixup(addr, state);
  232. debug_objects_fixups += fixed;
  233. return fixed;
  234. }
  235. static void debug_object_is_on_stack(void *addr, int onstack)
  236. {
  237. int is_on_stack;
  238. static int limit;
  239. if (limit > 4)
  240. return;
  241. is_on_stack = object_is_on_stack(addr);
  242. if (is_on_stack == onstack)
  243. return;
  244. limit++;
  245. if (is_on_stack)
  246. printk(KERN_WARNING
  247. "ODEBUG: object is on stack, but not annotated\n");
  248. else
  249. printk(KERN_WARNING
  250. "ODEBUG: object is not on stack, but annotated\n");
  251. WARN_ON(1);
  252. }
  253. static void
  254. __debug_object_init(void *addr, struct debug_obj_descr *descr, int onstack)
  255. {
  256. enum debug_obj_state state;
  257. struct debug_bucket *db;
  258. struct debug_obj *obj;
  259. unsigned long flags;
  260. fill_pool();
  261. db = get_bucket((unsigned long) addr);
  262. raw_spin_lock_irqsave(&db->lock, flags);
  263. obj = lookup_object(addr, db);
  264. if (!obj) {
  265. obj = alloc_object(addr, db, descr);
  266. if (!obj) {
  267. debug_objects_enabled = 0;
  268. raw_spin_unlock_irqrestore(&db->lock, flags);
  269. debug_objects_oom();
  270. return;
  271. }
  272. debug_object_is_on_stack(addr, onstack);
  273. }
  274. switch (obj->state) {
  275. case ODEBUG_STATE_NONE:
  276. case ODEBUG_STATE_INIT:
  277. case ODEBUG_STATE_INACTIVE:
  278. obj->state = ODEBUG_STATE_INIT;
  279. break;
  280. case ODEBUG_STATE_ACTIVE:
  281. debug_print_object(obj, "init");
  282. state = obj->state;
  283. raw_spin_unlock_irqrestore(&db->lock, flags);
  284. debug_object_fixup(descr->fixup_init, addr, state);
  285. return;
  286. case ODEBUG_STATE_DESTROYED:
  287. debug_print_object(obj, "init");
  288. break;
  289. default:
  290. break;
  291. }
  292. raw_spin_unlock_irqrestore(&db->lock, flags);
  293. }
  294. /**
  295. * debug_object_init - debug checks when an object is initialized
  296. * @addr: address of the object
  297. * @descr: pointer to an object specific debug description structure
  298. */
  299. void debug_object_init(void *addr, struct debug_obj_descr *descr)
  300. {
  301. if (!debug_objects_enabled)
  302. return;
  303. __debug_object_init(addr, descr, 0);
  304. }
  305. /**
  306. * debug_object_init_on_stack - debug checks when an object on stack is
  307. * initialized
  308. * @addr: address of the object
  309. * @descr: pointer to an object specific debug description structure
  310. */
  311. void debug_object_init_on_stack(void *addr, struct debug_obj_descr *descr)
  312. {
  313. if (!debug_objects_enabled)
  314. return;
  315. __debug_object_init(addr, descr, 1);
  316. }
  317. /**
  318. * debug_object_activate - debug checks when an object is activated
  319. * @addr: address of the object
  320. * @descr: pointer to an object specific debug description structure
  321. */
  322. void debug_object_activate(void *addr, struct debug_obj_descr *descr)
  323. {
  324. enum debug_obj_state state;
  325. struct debug_bucket *db;
  326. struct debug_obj *obj;
  327. unsigned long flags;
  328. struct debug_obj o = { .object = addr,
  329. .state = ODEBUG_STATE_NOTAVAILABLE,
  330. .descr = descr };
  331. if (!debug_objects_enabled)
  332. return;
  333. db = get_bucket((unsigned long) addr);
  334. raw_spin_lock_irqsave(&db->lock, flags);
  335. obj = lookup_object(addr, db);
  336. if (obj) {
  337. switch (obj->state) {
  338. case ODEBUG_STATE_INIT:
  339. case ODEBUG_STATE_INACTIVE:
  340. obj->state = ODEBUG_STATE_ACTIVE;
  341. break;
  342. case ODEBUG_STATE_ACTIVE:
  343. debug_print_object(obj, "activate");
  344. state = obj->state;
  345. raw_spin_unlock_irqrestore(&db->lock, flags);
  346. debug_object_fixup(descr->fixup_activate, addr, state);
  347. return;
  348. case ODEBUG_STATE_DESTROYED:
  349. debug_print_object(obj, "activate");
  350. break;
  351. default:
  352. break;
  353. }
  354. raw_spin_unlock_irqrestore(&db->lock, flags);
  355. return;
  356. }
  357. raw_spin_unlock_irqrestore(&db->lock, flags);
  358. /*
  359. * This happens when a static object is activated. We
  360. * let the type specific code decide whether this is
  361. * true or not.
  362. */
  363. if (debug_object_fixup(descr->fixup_activate, addr,
  364. ODEBUG_STATE_NOTAVAILABLE))
  365. debug_print_object(&o, "activate");
  366. }
  367. /**
  368. * debug_object_deactivate - debug checks when an object is deactivated
  369. * @addr: address of the object
  370. * @descr: pointer to an object specific debug description structure
  371. */
  372. void debug_object_deactivate(void *addr, struct debug_obj_descr *descr)
  373. {
  374. struct debug_bucket *db;
  375. struct debug_obj *obj;
  376. unsigned long flags;
  377. if (!debug_objects_enabled)
  378. return;
  379. db = get_bucket((unsigned long) addr);
  380. raw_spin_lock_irqsave(&db->lock, flags);
  381. obj = lookup_object(addr, db);
  382. if (obj) {
  383. switch (obj->state) {
  384. case ODEBUG_STATE_INIT:
  385. case ODEBUG_STATE_INACTIVE:
  386. case ODEBUG_STATE_ACTIVE:
  387. if (!obj->astate)
  388. obj->state = ODEBUG_STATE_INACTIVE;
  389. else
  390. debug_print_object(obj, "deactivate");
  391. break;
  392. case ODEBUG_STATE_DESTROYED:
  393. debug_print_object(obj, "deactivate");
  394. break;
  395. default:
  396. break;
  397. }
  398. } else {
  399. struct debug_obj o = { .object = addr,
  400. .state = ODEBUG_STATE_NOTAVAILABLE,
  401. .descr = descr };
  402. debug_print_object(&o, "deactivate");
  403. }
  404. raw_spin_unlock_irqrestore(&db->lock, flags);
  405. }
  406. /**
  407. * debug_object_destroy - debug checks when an object is destroyed
  408. * @addr: address of the object
  409. * @descr: pointer to an object specific debug description structure
  410. */
  411. void debug_object_destroy(void *addr, struct debug_obj_descr *descr)
  412. {
  413. enum debug_obj_state state;
  414. struct debug_bucket *db;
  415. struct debug_obj *obj;
  416. unsigned long flags;
  417. if (!debug_objects_enabled)
  418. return;
  419. db = get_bucket((unsigned long) addr);
  420. raw_spin_lock_irqsave(&db->lock, flags);
  421. obj = lookup_object(addr, db);
  422. if (!obj)
  423. goto out_unlock;
  424. switch (obj->state) {
  425. case ODEBUG_STATE_NONE:
  426. case ODEBUG_STATE_INIT:
  427. case ODEBUG_STATE_INACTIVE:
  428. obj->state = ODEBUG_STATE_DESTROYED;
  429. break;
  430. case ODEBUG_STATE_ACTIVE:
  431. debug_print_object(obj, "destroy");
  432. state = obj->state;
  433. raw_spin_unlock_irqrestore(&db->lock, flags);
  434. debug_object_fixup(descr->fixup_destroy, addr, state);
  435. return;
  436. case ODEBUG_STATE_DESTROYED:
  437. debug_print_object(obj, "destroy");
  438. break;
  439. default:
  440. break;
  441. }
  442. out_unlock:
  443. raw_spin_unlock_irqrestore(&db->lock, flags);
  444. }
  445. /**
  446. * debug_object_free - debug checks when an object is freed
  447. * @addr: address of the object
  448. * @descr: pointer to an object specific debug description structure
  449. */
  450. void debug_object_free(void *addr, struct debug_obj_descr *descr)
  451. {
  452. enum debug_obj_state state;
  453. struct debug_bucket *db;
  454. struct debug_obj *obj;
  455. unsigned long flags;
  456. if (!debug_objects_enabled)
  457. return;
  458. db = get_bucket((unsigned long) addr);
  459. raw_spin_lock_irqsave(&db->lock, flags);
  460. obj = lookup_object(addr, db);
  461. if (!obj)
  462. goto out_unlock;
  463. switch (obj->state) {
  464. case ODEBUG_STATE_ACTIVE:
  465. debug_print_object(obj, "free");
  466. state = obj->state;
  467. raw_spin_unlock_irqrestore(&db->lock, flags);
  468. debug_object_fixup(descr->fixup_free, addr, state);
  469. return;
  470. default:
  471. hlist_del(&obj->node);
  472. raw_spin_unlock_irqrestore(&db->lock, flags);
  473. free_object(obj);
  474. return;
  475. }
  476. out_unlock:
  477. raw_spin_unlock_irqrestore(&db->lock, flags);
  478. }
  479. /**
  480. * debug_object_assert_init - debug checks when object should be init-ed
  481. * @addr: address of the object
  482. * @descr: pointer to an object specific debug description structure
  483. */
  484. void debug_object_assert_init(void *addr, struct debug_obj_descr *descr)
  485. {
  486. struct debug_bucket *db;
  487. struct debug_obj *obj;
  488. unsigned long flags;
  489. if (!debug_objects_enabled)
  490. return;
  491. db = get_bucket((unsigned long) addr);
  492. raw_spin_lock_irqsave(&db->lock, flags);
  493. obj = lookup_object(addr, db);
  494. if (!obj) {
  495. struct debug_obj o = { .object = addr,
  496. .state = ODEBUG_STATE_NOTAVAILABLE,
  497. .descr = descr };
  498. raw_spin_unlock_irqrestore(&db->lock, flags);
  499. /*
  500. * Maybe the object is static. Let the type specific
  501. * code decide what to do.
  502. */
  503. if (debug_object_fixup(descr->fixup_assert_init, addr,
  504. ODEBUG_STATE_NOTAVAILABLE))
  505. debug_print_object(&o, "assert_init");
  506. return;
  507. }
  508. raw_spin_unlock_irqrestore(&db->lock, flags);
  509. }
  510. /**
  511. * debug_object_active_state - debug checks object usage state machine
  512. * @addr: address of the object
  513. * @descr: pointer to an object specific debug description structure
  514. * @expect: expected state
  515. * @next: state to move to if expected state is found
  516. */
  517. void
  518. debug_object_active_state(void *addr, struct debug_obj_descr *descr,
  519. unsigned int expect, unsigned int next)
  520. {
  521. struct debug_bucket *db;
  522. struct debug_obj *obj;
  523. unsigned long flags;
  524. if (!debug_objects_enabled)
  525. return;
  526. db = get_bucket((unsigned long) addr);
  527. raw_spin_lock_irqsave(&db->lock, flags);
  528. obj = lookup_object(addr, db);
  529. if (obj) {
  530. switch (obj->state) {
  531. case ODEBUG_STATE_ACTIVE:
  532. if (obj->astate == expect)
  533. obj->astate = next;
  534. else
  535. debug_print_object(obj, "active_state");
  536. break;
  537. default:
  538. debug_print_object(obj, "active_state");
  539. break;
  540. }
  541. } else {
  542. struct debug_obj o = { .object = addr,
  543. .state = ODEBUG_STATE_NOTAVAILABLE,
  544. .descr = descr };
  545. debug_print_object(&o, "active_state");
  546. }
  547. raw_spin_unlock_irqrestore(&db->lock, flags);
  548. }
  549. #ifdef CONFIG_DEBUG_OBJECTS_FREE
  550. static void __debug_check_no_obj_freed(const void *address, unsigned long size)
  551. {
  552. unsigned long flags, oaddr, saddr, eaddr, paddr, chunks;
  553. struct hlist_node *node, *tmp;
  554. HLIST_HEAD(freelist);
  555. struct debug_obj_descr *descr;
  556. enum debug_obj_state state;
  557. struct debug_bucket *db;
  558. struct debug_obj *obj;
  559. int cnt;
  560. saddr = (unsigned long) address;
  561. eaddr = saddr + size;
  562. paddr = saddr & ODEBUG_CHUNK_MASK;
  563. chunks = ((eaddr - paddr) + (ODEBUG_CHUNK_SIZE - 1));
  564. chunks >>= ODEBUG_CHUNK_SHIFT;
  565. for (;chunks > 0; chunks--, paddr += ODEBUG_CHUNK_SIZE) {
  566. db = get_bucket(paddr);
  567. repeat:
  568. cnt = 0;
  569. raw_spin_lock_irqsave(&db->lock, flags);
  570. hlist_for_each_entry_safe(obj, node, tmp, &db->list, node) {
  571. cnt++;
  572. oaddr = (unsigned long) obj->object;
  573. if (oaddr < saddr || oaddr >= eaddr)
  574. continue;
  575. switch (obj->state) {
  576. case ODEBUG_STATE_ACTIVE:
  577. debug_print_object(obj, "free");
  578. descr = obj->descr;
  579. state = obj->state;
  580. raw_spin_unlock_irqrestore(&db->lock, flags);
  581. debug_object_fixup(descr->fixup_free,
  582. (void *) oaddr, state);
  583. goto repeat;
  584. default:
  585. hlist_del(&obj->node);
  586. hlist_add_head(&obj->node, &freelist);
  587. break;
  588. }
  589. }
  590. raw_spin_unlock_irqrestore(&db->lock, flags);
  591. /* Now free them */
  592. hlist_for_each_entry_safe(obj, node, tmp, &freelist, node) {
  593. hlist_del(&obj->node);
  594. free_object(obj);
  595. }
  596. if (cnt > debug_objects_maxchain)
  597. debug_objects_maxchain = cnt;
  598. }
  599. }
  600. void debug_check_no_obj_freed(const void *address, unsigned long size)
  601. {
  602. if (debug_objects_enabled)
  603. __debug_check_no_obj_freed(address, size);
  604. }
  605. #endif
  606. #ifdef CONFIG_DEBUG_FS
  607. static int debug_stats_show(struct seq_file *m, void *v)
  608. {
  609. seq_printf(m, "max_chain :%d\n", debug_objects_maxchain);
  610. seq_printf(m, "warnings :%d\n", debug_objects_warnings);
  611. seq_printf(m, "fixups :%d\n", debug_objects_fixups);
  612. seq_printf(m, "pool_free :%d\n", obj_pool_free);
  613. seq_printf(m, "pool_min_free :%d\n", obj_pool_min_free);
  614. seq_printf(m, "pool_used :%d\n", obj_pool_used);
  615. seq_printf(m, "pool_max_used :%d\n", obj_pool_max_used);
  616. return 0;
  617. }
  618. static int debug_stats_open(struct inode *inode, struct file *filp)
  619. {
  620. return single_open(filp, debug_stats_show, NULL);
  621. }
  622. static const struct file_operations debug_stats_fops = {
  623. .open = debug_stats_open,
  624. .read = seq_read,
  625. .llseek = seq_lseek,
  626. .release = single_release,
  627. };
  628. static int __init debug_objects_init_debugfs(void)
  629. {
  630. struct dentry *dbgdir, *dbgstats;
  631. if (!debug_objects_enabled)
  632. return 0;
  633. dbgdir = debugfs_create_dir("debug_objects", NULL);
  634. if (!dbgdir)
  635. return -ENOMEM;
  636. dbgstats = debugfs_create_file("stats", 0444, dbgdir, NULL,
  637. &debug_stats_fops);
  638. if (!dbgstats)
  639. goto err;
  640. return 0;
  641. err:
  642. debugfs_remove(dbgdir);
  643. return -ENOMEM;
  644. }
  645. __initcall(debug_objects_init_debugfs);
  646. #else
  647. static inline void debug_objects_init_debugfs(void) { }
  648. #endif
  649. #ifdef CONFIG_DEBUG_OBJECTS_SELFTEST
  650. /* Random data structure for the self test */
  651. struct self_test {
  652. unsigned long dummy1[6];
  653. int static_init;
  654. unsigned long dummy2[3];
  655. };
  656. static __initdata struct debug_obj_descr descr_type_test;
  657. /*
  658. * fixup_init is called when:
  659. * - an active object is initialized
  660. */
  661. static int __init fixup_init(void *addr, enum debug_obj_state state)
  662. {
  663. struct self_test *obj = addr;
  664. switch (state) {
  665. case ODEBUG_STATE_ACTIVE:
  666. debug_object_deactivate(obj, &descr_type_test);
  667. debug_object_init(obj, &descr_type_test);
  668. return 1;
  669. default:
  670. return 0;
  671. }
  672. }
  673. /*
  674. * fixup_activate is called when:
  675. * - an active object is activated
  676. * - an unknown object is activated (might be a statically initialized object)
  677. */
  678. static int __init fixup_activate(void *addr, enum debug_obj_state state)
  679. {
  680. struct self_test *obj = addr;
  681. switch (state) {
  682. case ODEBUG_STATE_NOTAVAILABLE:
  683. if (obj->static_init == 1) {
  684. debug_object_init(obj, &descr_type_test);
  685. debug_object_activate(obj, &descr_type_test);
  686. return 0;
  687. }
  688. return 1;
  689. case ODEBUG_STATE_ACTIVE:
  690. debug_object_deactivate(obj, &descr_type_test);
  691. debug_object_activate(obj, &descr_type_test);
  692. return 1;
  693. default:
  694. return 0;
  695. }
  696. }
  697. /*
  698. * fixup_destroy is called when:
  699. * - an active object is destroyed
  700. */
  701. static int __init fixup_destroy(void *addr, enum debug_obj_state state)
  702. {
  703. struct self_test *obj = addr;
  704. switch (state) {
  705. case ODEBUG_STATE_ACTIVE:
  706. debug_object_deactivate(obj, &descr_type_test);
  707. debug_object_destroy(obj, &descr_type_test);
  708. return 1;
  709. default:
  710. return 0;
  711. }
  712. }
  713. /*
  714. * fixup_free is called when:
  715. * - an active object is freed
  716. */
  717. static int __init fixup_free(void *addr, enum debug_obj_state state)
  718. {
  719. struct self_test *obj = addr;
  720. switch (state) {
  721. case ODEBUG_STATE_ACTIVE:
  722. debug_object_deactivate(obj, &descr_type_test);
  723. debug_object_free(obj, &descr_type_test);
  724. return 1;
  725. default:
  726. return 0;
  727. }
  728. }
  729. static int __init
  730. check_results(void *addr, enum debug_obj_state state, int fixups, int warnings)
  731. {
  732. struct debug_bucket *db;
  733. struct debug_obj *obj;
  734. unsigned long flags;
  735. int res = -EINVAL;
  736. db = get_bucket((unsigned long) addr);
  737. raw_spin_lock_irqsave(&db->lock, flags);
  738. obj = lookup_object(addr, db);
  739. if (!obj && state != ODEBUG_STATE_NONE) {
  740. WARN(1, KERN_ERR "ODEBUG: selftest object not found\n");
  741. goto out;
  742. }
  743. if (obj && obj->state != state) {
  744. WARN(1, KERN_ERR "ODEBUG: selftest wrong state: %d != %d\n",
  745. obj->state, state);
  746. goto out;
  747. }
  748. if (fixups != debug_objects_fixups) {
  749. WARN(1, KERN_ERR "ODEBUG: selftest fixups failed %d != %d\n",
  750. fixups, debug_objects_fixups);
  751. goto out;
  752. }
  753. if (warnings != debug_objects_warnings) {
  754. WARN(1, KERN_ERR "ODEBUG: selftest warnings failed %d != %d\n",
  755. warnings, debug_objects_warnings);
  756. goto out;
  757. }
  758. res = 0;
  759. out:
  760. raw_spin_unlock_irqrestore(&db->lock, flags);
  761. if (res)
  762. debug_objects_enabled = 0;
  763. return res;
  764. }
  765. static __initdata struct debug_obj_descr descr_type_test = {
  766. .name = "selftest",
  767. .fixup_init = fixup_init,
  768. .fixup_activate = fixup_activate,
  769. .fixup_destroy = fixup_destroy,
  770. .fixup_free = fixup_free,
  771. };
  772. static __initdata struct self_test obj = { .static_init = 0 };
  773. static void __init debug_objects_selftest(void)
  774. {
  775. int fixups, oldfixups, warnings, oldwarnings;
  776. unsigned long flags;
  777. local_irq_save(flags);
  778. fixups = oldfixups = debug_objects_fixups;
  779. warnings = oldwarnings = debug_objects_warnings;
  780. descr_test = &descr_type_test;
  781. debug_object_init(&obj, &descr_type_test);
  782. if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings))
  783. goto out;
  784. debug_object_activate(&obj, &descr_type_test);
  785. if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
  786. goto out;
  787. debug_object_activate(&obj, &descr_type_test);
  788. if (check_results(&obj, ODEBUG_STATE_ACTIVE, ++fixups, ++warnings))
  789. goto out;
  790. debug_object_deactivate(&obj, &descr_type_test);
  791. if (check_results(&obj, ODEBUG_STATE_INACTIVE, fixups, warnings))
  792. goto out;
  793. debug_object_destroy(&obj, &descr_type_test);
  794. if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, warnings))
  795. goto out;
  796. debug_object_init(&obj, &descr_type_test);
  797. if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
  798. goto out;
  799. debug_object_activate(&obj, &descr_type_test);
  800. if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
  801. goto out;
  802. debug_object_deactivate(&obj, &descr_type_test);
  803. if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
  804. goto out;
  805. debug_object_free(&obj, &descr_type_test);
  806. if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings))
  807. goto out;
  808. obj.static_init = 1;
  809. debug_object_activate(&obj, &descr_type_test);
  810. if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
  811. goto out;
  812. debug_object_init(&obj, &descr_type_test);
  813. if (check_results(&obj, ODEBUG_STATE_INIT, ++fixups, ++warnings))
  814. goto out;
  815. debug_object_free(&obj, &descr_type_test);
  816. if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings))
  817. goto out;
  818. #ifdef CONFIG_DEBUG_OBJECTS_FREE
  819. debug_object_init(&obj, &descr_type_test);
  820. if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings))
  821. goto out;
  822. debug_object_activate(&obj, &descr_type_test);
  823. if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
  824. goto out;
  825. __debug_check_no_obj_freed(&obj, sizeof(obj));
  826. if (check_results(&obj, ODEBUG_STATE_NONE, ++fixups, ++warnings))
  827. goto out;
  828. #endif
  829. printk(KERN_INFO "ODEBUG: selftest passed\n");
  830. out:
  831. debug_objects_fixups = oldfixups;
  832. debug_objects_warnings = oldwarnings;
  833. descr_test = NULL;
  834. local_irq_restore(flags);
  835. }
  836. #else
  837. static inline void debug_objects_selftest(void) { }
  838. #endif
  839. /*
  840. * Called during early boot to initialize the hash buckets and link
  841. * the static object pool objects into the poll list. After this call
  842. * the object tracker is fully operational.
  843. */
  844. void __init debug_objects_early_init(void)
  845. {
  846. int i;
  847. for (i = 0; i < ODEBUG_HASH_SIZE; i++)
  848. raw_spin_lock_init(&obj_hash[i].lock);
  849. for (i = 0; i < ODEBUG_POOL_SIZE; i++)
  850. hlist_add_head(&obj_static_pool[i].node, &obj_pool);
  851. }
  852. /*
  853. * Convert the statically allocated objects to dynamic ones:
  854. */
  855. static int __init debug_objects_replace_static_objects(void)
  856. {
  857. struct debug_bucket *db = obj_hash;
  858. struct hlist_node *node, *tmp;
  859. struct debug_obj *obj, *new;
  860. HLIST_HEAD(objects);
  861. int i, cnt = 0;
  862. for (i = 0; i < ODEBUG_POOL_SIZE; i++) {
  863. obj = kmem_cache_zalloc(obj_cache, GFP_KERNEL);
  864. if (!obj)
  865. goto free;
  866. hlist_add_head(&obj->node, &objects);
  867. }
  868. /*
  869. * When debug_objects_mem_init() is called we know that only
  870. * one CPU is up, so disabling interrupts is enough
  871. * protection. This avoids the lockdep hell of lock ordering.
  872. */
  873. local_irq_disable();
  874. /* Remove the statically allocated objects from the pool */
  875. hlist_for_each_entry_safe(obj, node, tmp, &obj_pool, node)
  876. hlist_del(&obj->node);
  877. /* Move the allocated objects to the pool */
  878. hlist_move_list(&objects, &obj_pool);
  879. /* Replace the active object references */
  880. for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
  881. hlist_move_list(&db->list, &objects);
  882. hlist_for_each_entry(obj, node, &objects, node) {
  883. new = hlist_entry(obj_pool.first, typeof(*obj), node);
  884. hlist_del(&new->node);
  885. /* copy object data */
  886. *new = *obj;
  887. hlist_add_head(&new->node, &db->list);
  888. cnt++;
  889. }
  890. }
  891. local_irq_enable();
  892. printk(KERN_DEBUG "ODEBUG: %d of %d active objects replaced\n", cnt,
  893. obj_pool_used);
  894. return 0;
  895. free:
  896. hlist_for_each_entry_safe(obj, node, tmp, &objects, node) {
  897. hlist_del(&obj->node);
  898. kmem_cache_free(obj_cache, obj);
  899. }
  900. return -ENOMEM;
  901. }
  902. /*
  903. * Called after the kmem_caches are functional to setup a dedicated
  904. * cache pool, which has the SLAB_DEBUG_OBJECTS flag set. This flag
  905. * prevents that the debug code is called on kmem_cache_free() for the
  906. * debug tracker objects to avoid recursive calls.
  907. */
  908. void __init debug_objects_mem_init(void)
  909. {
  910. if (!debug_objects_enabled)
  911. return;
  912. obj_cache = kmem_cache_create("debug_objects_cache",
  913. sizeof (struct debug_obj), 0,
  914. SLAB_DEBUG_OBJECTS, NULL);
  915. if (!obj_cache || debug_objects_replace_static_objects()) {
  916. debug_objects_enabled = 0;
  917. if (obj_cache)
  918. kmem_cache_destroy(obj_cache);
  919. printk(KERN_WARNING "ODEBUG: out of memory.\n");
  920. } else
  921. debug_objects_selftest();
  922. }