debugobjects.c 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098
  1. /*
  2. * Generic infrastructure for lifetime debugging of objects.
  3. *
  4. * Started by Thomas Gleixner
  5. *
  6. * Copyright (C) 2008, Thomas Gleixner <tglx@linutronix.de>
  7. *
  8. * For licencing details see kernel-base/COPYING
  9. */
  10. #include <linux/debugobjects.h>
  11. #include <linux/interrupt.h>
  12. #include <linux/sched.h>
  13. #include <linux/seq_file.h>
  14. #include <linux/debugfs.h>
  15. #include <linux/slab.h>
  16. #include <linux/hash.h>
  17. #define ODEBUG_HASH_BITS 14
  18. #define ODEBUG_HASH_SIZE (1 << ODEBUG_HASH_BITS)
  19. #define ODEBUG_POOL_SIZE 512
  20. #define ODEBUG_POOL_MIN_LEVEL 256
  21. #define ODEBUG_CHUNK_SHIFT PAGE_SHIFT
  22. #define ODEBUG_CHUNK_SIZE (1 << ODEBUG_CHUNK_SHIFT)
  23. #define ODEBUG_CHUNK_MASK (~(ODEBUG_CHUNK_SIZE - 1))
  24. struct debug_bucket {
  25. struct hlist_head list;
  26. raw_spinlock_t lock;
  27. };
  28. static struct debug_bucket obj_hash[ODEBUG_HASH_SIZE];
  29. static struct debug_obj obj_static_pool[ODEBUG_POOL_SIZE] __initdata;
  30. static DEFINE_RAW_SPINLOCK(pool_lock);
  31. static HLIST_HEAD(obj_pool);
  32. static int obj_pool_min_free = ODEBUG_POOL_SIZE;
  33. static int obj_pool_free = ODEBUG_POOL_SIZE;
  34. static int obj_pool_used;
  35. static int obj_pool_max_used;
  36. static struct kmem_cache *obj_cache;
  37. static int debug_objects_maxchain __read_mostly;
  38. static int debug_objects_fixups __read_mostly;
  39. static int debug_objects_warnings __read_mostly;
  40. static int debug_objects_enabled __read_mostly
  41. = CONFIG_DEBUG_OBJECTS_ENABLE_DEFAULT;
  42. static struct debug_obj_descr *descr_test __read_mostly;
  43. static void free_obj_work(struct work_struct *work);
  44. static DECLARE_WORK(debug_obj_work, free_obj_work);
  45. static int __init enable_object_debug(char *str)
  46. {
  47. debug_objects_enabled = 1;
  48. return 0;
  49. }
  50. static int __init disable_object_debug(char *str)
  51. {
  52. debug_objects_enabled = 0;
  53. return 0;
  54. }
  55. early_param("debug_objects", enable_object_debug);
  56. early_param("no_debug_objects", disable_object_debug);
  57. static const char *obj_states[ODEBUG_STATE_MAX] = {
  58. [ODEBUG_STATE_NONE] = "none",
  59. [ODEBUG_STATE_INIT] = "initialized",
  60. [ODEBUG_STATE_INACTIVE] = "inactive",
  61. [ODEBUG_STATE_ACTIVE] = "active",
  62. [ODEBUG_STATE_DESTROYED] = "destroyed",
  63. [ODEBUG_STATE_NOTAVAILABLE] = "not available",
  64. };
  65. static int fill_pool(void)
  66. {
  67. gfp_t gfp = GFP_ATOMIC | __GFP_NORETRY | __GFP_NOWARN;
  68. struct debug_obj *new;
  69. unsigned long flags;
  70. if (likely(obj_pool_free >= ODEBUG_POOL_MIN_LEVEL))
  71. return obj_pool_free;
  72. if (unlikely(!obj_cache))
  73. return obj_pool_free;
  74. while (obj_pool_free < ODEBUG_POOL_MIN_LEVEL) {
  75. new = kmem_cache_zalloc(obj_cache, gfp);
  76. if (!new)
  77. return obj_pool_free;
  78. raw_spin_lock_irqsave(&pool_lock, flags);
  79. hlist_add_head(&new->node, &obj_pool);
  80. obj_pool_free++;
  81. raw_spin_unlock_irqrestore(&pool_lock, flags);
  82. }
  83. return obj_pool_free;
  84. }
  85. /*
  86. * Lookup an object in the hash bucket.
  87. */
  88. static struct debug_obj *lookup_object(void *addr, struct debug_bucket *b)
  89. {
  90. struct hlist_node *node;
  91. struct debug_obj *obj;
  92. int cnt = 0;
  93. hlist_for_each_entry(obj, node, &b->list, node) {
  94. cnt++;
  95. if (obj->object == addr)
  96. return obj;
  97. }
  98. if (cnt > debug_objects_maxchain)
  99. debug_objects_maxchain = cnt;
  100. return NULL;
  101. }
  102. /*
  103. * Allocate a new object. If the pool is empty, switch off the debugger.
  104. * Must be called with interrupts disabled.
  105. */
  106. static struct debug_obj *
  107. alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr)
  108. {
  109. struct debug_obj *obj = NULL;
  110. raw_spin_lock(&pool_lock);
  111. if (obj_pool.first) {
  112. obj = hlist_entry(obj_pool.first, typeof(*obj), node);
  113. obj->object = addr;
  114. obj->descr = descr;
  115. obj->state = ODEBUG_STATE_NONE;
  116. obj->astate = 0;
  117. hlist_del(&obj->node);
  118. hlist_add_head(&obj->node, &b->list);
  119. obj_pool_used++;
  120. if (obj_pool_used > obj_pool_max_used)
  121. obj_pool_max_used = obj_pool_used;
  122. obj_pool_free--;
  123. if (obj_pool_free < obj_pool_min_free)
  124. obj_pool_min_free = obj_pool_free;
  125. }
  126. raw_spin_unlock(&pool_lock);
  127. return obj;
  128. }
  129. /*
  130. * workqueue function to free objects.
  131. */
  132. static void free_obj_work(struct work_struct *work)
  133. {
  134. struct debug_obj *obj;
  135. unsigned long flags;
  136. raw_spin_lock_irqsave(&pool_lock, flags);
  137. while (obj_pool_free > ODEBUG_POOL_SIZE) {
  138. obj = hlist_entry(obj_pool.first, typeof(*obj), node);
  139. hlist_del(&obj->node);
  140. obj_pool_free--;
  141. /*
  142. * We release pool_lock across kmem_cache_free() to
  143. * avoid contention on pool_lock.
  144. */
  145. raw_spin_unlock_irqrestore(&pool_lock, flags);
  146. kmem_cache_free(obj_cache, obj);
  147. raw_spin_lock_irqsave(&pool_lock, flags);
  148. }
  149. raw_spin_unlock_irqrestore(&pool_lock, flags);
  150. }
  151. /*
  152. * Put the object back into the pool and schedule work to free objects
  153. * if necessary.
  154. */
  155. static void free_object(struct debug_obj *obj)
  156. {
  157. unsigned long flags;
  158. int sched = 0;
  159. raw_spin_lock_irqsave(&pool_lock, flags);
  160. /*
  161. * schedule work when the pool is filled and the cache is
  162. * initialized:
  163. */
  164. if (obj_pool_free > ODEBUG_POOL_SIZE && obj_cache)
  165. sched = keventd_up() && !work_pending(&debug_obj_work);
  166. hlist_add_head(&obj->node, &obj_pool);
  167. obj_pool_free++;
  168. obj_pool_used--;
  169. raw_spin_unlock_irqrestore(&pool_lock, flags);
  170. if (sched)
  171. schedule_work(&debug_obj_work);
  172. }
  173. /*
  174. * We run out of memory. That means we probably have tons of objects
  175. * allocated.
  176. */
  177. static void debug_objects_oom(void)
  178. {
  179. struct debug_bucket *db = obj_hash;
  180. struct hlist_node *node, *tmp;
  181. HLIST_HEAD(freelist);
  182. struct debug_obj *obj;
  183. unsigned long flags;
  184. int i;
  185. printk(KERN_WARNING "ODEBUG: Out of memory. ODEBUG disabled\n");
  186. for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
  187. raw_spin_lock_irqsave(&db->lock, flags);
  188. hlist_move_list(&db->list, &freelist);
  189. raw_spin_unlock_irqrestore(&db->lock, flags);
  190. /* Now free them */
  191. hlist_for_each_entry_safe(obj, node, tmp, &freelist, node) {
  192. hlist_del(&obj->node);
  193. free_object(obj);
  194. }
  195. }
  196. }
  197. /*
  198. * We use the pfn of the address for the hash. That way we can check
  199. * for freed objects simply by checking the affected bucket.
  200. */
  201. static struct debug_bucket *get_bucket(unsigned long addr)
  202. {
  203. unsigned long hash;
  204. hash = hash_long((addr >> ODEBUG_CHUNK_SHIFT), ODEBUG_HASH_BITS);
  205. return &obj_hash[hash];
  206. }
  207. static void debug_print_object(struct debug_obj *obj, char *msg)
  208. {
  209. struct debug_obj_descr *descr = obj->descr;
  210. static int limit;
  211. if (limit < 5 && descr != descr_test) {
  212. void *hint = descr->debug_hint ?
  213. descr->debug_hint(obj->object) : NULL;
  214. limit++;
  215. WARN(1, KERN_ERR "ODEBUG: %s %s (active state %u) "
  216. "object type: %s hint: %pS\n",
  217. msg, obj_states[obj->state], obj->astate,
  218. descr->name, hint);
  219. }
  220. debug_objects_warnings++;
  221. }
  222. /*
  223. * Try to repair the damage, so we have a better chance to get useful
  224. * debug output.
  225. */
  226. static int
  227. debug_object_fixup(int (*fixup)(void *addr, enum debug_obj_state state),
  228. void * addr, enum debug_obj_state state)
  229. {
  230. int fixed = 0;
  231. if (fixup)
  232. fixed = fixup(addr, state);
  233. debug_objects_fixups += fixed;
  234. return fixed;
  235. }
  236. static void debug_object_is_on_stack(void *addr, int onstack)
  237. {
  238. int is_on_stack;
  239. static int limit;
  240. if (limit > 4)
  241. return;
  242. is_on_stack = object_is_on_stack(addr);
  243. if (is_on_stack == onstack)
  244. return;
  245. limit++;
  246. if (is_on_stack)
  247. printk(KERN_WARNING
  248. "ODEBUG: object is on stack, but not annotated\n");
  249. else
  250. printk(KERN_WARNING
  251. "ODEBUG: object is not on stack, but annotated\n");
  252. WARN_ON(1);
  253. }
  254. static void
  255. __debug_object_init(void *addr, struct debug_obj_descr *descr, int onstack)
  256. {
  257. enum debug_obj_state state;
  258. struct debug_bucket *db;
  259. struct debug_obj *obj;
  260. unsigned long flags;
  261. fill_pool();
  262. db = get_bucket((unsigned long) addr);
  263. raw_spin_lock_irqsave(&db->lock, flags);
  264. obj = lookup_object(addr, db);
  265. if (!obj) {
  266. obj = alloc_object(addr, db, descr);
  267. if (!obj) {
  268. debug_objects_enabled = 0;
  269. raw_spin_unlock_irqrestore(&db->lock, flags);
  270. debug_objects_oom();
  271. return;
  272. }
  273. debug_object_is_on_stack(addr, onstack);
  274. }
  275. switch (obj->state) {
  276. case ODEBUG_STATE_NONE:
  277. case ODEBUG_STATE_INIT:
  278. case ODEBUG_STATE_INACTIVE:
  279. obj->state = ODEBUG_STATE_INIT;
  280. break;
  281. case ODEBUG_STATE_ACTIVE:
  282. debug_print_object(obj, "init");
  283. state = obj->state;
  284. raw_spin_unlock_irqrestore(&db->lock, flags);
  285. debug_object_fixup(descr->fixup_init, addr, state);
  286. return;
  287. case ODEBUG_STATE_DESTROYED:
  288. debug_print_object(obj, "init");
  289. break;
  290. default:
  291. break;
  292. }
  293. raw_spin_unlock_irqrestore(&db->lock, flags);
  294. }
  295. /**
  296. * debug_object_init - debug checks when an object is initialized
  297. * @addr: address of the object
  298. * @descr: pointer to an object specific debug description structure
  299. */
  300. void debug_object_init(void *addr, struct debug_obj_descr *descr)
  301. {
  302. if (!debug_objects_enabled)
  303. return;
  304. __debug_object_init(addr, descr, 0);
  305. }
  306. /**
  307. * debug_object_init_on_stack - debug checks when an object on stack is
  308. * initialized
  309. * @addr: address of the object
  310. * @descr: pointer to an object specific debug description structure
  311. */
  312. void debug_object_init_on_stack(void *addr, struct debug_obj_descr *descr)
  313. {
  314. if (!debug_objects_enabled)
  315. return;
  316. __debug_object_init(addr, descr, 1);
  317. }
  318. /**
  319. * debug_object_activate - debug checks when an object is activated
  320. * @addr: address of the object
  321. * @descr: pointer to an object specific debug description structure
  322. */
  323. void debug_object_activate(void *addr, struct debug_obj_descr *descr)
  324. {
  325. enum debug_obj_state state;
  326. struct debug_bucket *db;
  327. struct debug_obj *obj;
  328. unsigned long flags;
  329. struct debug_obj o = { .object = addr,
  330. .state = ODEBUG_STATE_NOTAVAILABLE,
  331. .descr = descr };
  332. if (!debug_objects_enabled)
  333. return;
  334. db = get_bucket((unsigned long) addr);
  335. raw_spin_lock_irqsave(&db->lock, flags);
  336. obj = lookup_object(addr, db);
  337. if (obj) {
  338. switch (obj->state) {
  339. case ODEBUG_STATE_INIT:
  340. case ODEBUG_STATE_INACTIVE:
  341. obj->state = ODEBUG_STATE_ACTIVE;
  342. break;
  343. case ODEBUG_STATE_ACTIVE:
  344. debug_print_object(obj, "activate");
  345. state = obj->state;
  346. raw_spin_unlock_irqrestore(&db->lock, flags);
  347. debug_object_fixup(descr->fixup_activate, addr, state);
  348. return;
  349. case ODEBUG_STATE_DESTROYED:
  350. debug_print_object(obj, "activate");
  351. break;
  352. default:
  353. break;
  354. }
  355. raw_spin_unlock_irqrestore(&db->lock, flags);
  356. return;
  357. }
  358. raw_spin_unlock_irqrestore(&db->lock, flags);
  359. /*
  360. * This happens when a static object is activated. We
  361. * let the type specific code decide whether this is
  362. * true or not.
  363. */
  364. if (debug_object_fixup(descr->fixup_activate, addr,
  365. ODEBUG_STATE_NOTAVAILABLE))
  366. debug_print_object(&o, "activate");
  367. }
  368. /**
  369. * debug_object_deactivate - debug checks when an object is deactivated
  370. * @addr: address of the object
  371. * @descr: pointer to an object specific debug description structure
  372. */
  373. void debug_object_deactivate(void *addr, struct debug_obj_descr *descr)
  374. {
  375. struct debug_bucket *db;
  376. struct debug_obj *obj;
  377. unsigned long flags;
  378. if (!debug_objects_enabled)
  379. return;
  380. db = get_bucket((unsigned long) addr);
  381. raw_spin_lock_irqsave(&db->lock, flags);
  382. obj = lookup_object(addr, db);
  383. if (obj) {
  384. switch (obj->state) {
  385. case ODEBUG_STATE_INIT:
  386. case ODEBUG_STATE_INACTIVE:
  387. case ODEBUG_STATE_ACTIVE:
  388. if (!obj->astate)
  389. obj->state = ODEBUG_STATE_INACTIVE;
  390. else
  391. debug_print_object(obj, "deactivate");
  392. break;
  393. case ODEBUG_STATE_DESTROYED:
  394. debug_print_object(obj, "deactivate");
  395. break;
  396. default:
  397. break;
  398. }
  399. } else {
  400. struct debug_obj o = { .object = addr,
  401. .state = ODEBUG_STATE_NOTAVAILABLE,
  402. .descr = descr };
  403. debug_print_object(&o, "deactivate");
  404. }
  405. raw_spin_unlock_irqrestore(&db->lock, flags);
  406. }
  407. /**
  408. * debug_object_destroy - debug checks when an object is destroyed
  409. * @addr: address of the object
  410. * @descr: pointer to an object specific debug description structure
  411. */
  412. void debug_object_destroy(void *addr, struct debug_obj_descr *descr)
  413. {
  414. enum debug_obj_state state;
  415. struct debug_bucket *db;
  416. struct debug_obj *obj;
  417. unsigned long flags;
  418. if (!debug_objects_enabled)
  419. return;
  420. db = get_bucket((unsigned long) addr);
  421. raw_spin_lock_irqsave(&db->lock, flags);
  422. obj = lookup_object(addr, db);
  423. if (!obj)
  424. goto out_unlock;
  425. switch (obj->state) {
  426. case ODEBUG_STATE_NONE:
  427. case ODEBUG_STATE_INIT:
  428. case ODEBUG_STATE_INACTIVE:
  429. obj->state = ODEBUG_STATE_DESTROYED;
  430. break;
  431. case ODEBUG_STATE_ACTIVE:
  432. debug_print_object(obj, "destroy");
  433. state = obj->state;
  434. raw_spin_unlock_irqrestore(&db->lock, flags);
  435. debug_object_fixup(descr->fixup_destroy, addr, state);
  436. return;
  437. case ODEBUG_STATE_DESTROYED:
  438. debug_print_object(obj, "destroy");
  439. break;
  440. default:
  441. break;
  442. }
  443. out_unlock:
  444. raw_spin_unlock_irqrestore(&db->lock, flags);
  445. }
  446. /**
  447. * debug_object_free - debug checks when an object is freed
  448. * @addr: address of the object
  449. * @descr: pointer to an object specific debug description structure
  450. */
  451. void debug_object_free(void *addr, struct debug_obj_descr *descr)
  452. {
  453. enum debug_obj_state state;
  454. struct debug_bucket *db;
  455. struct debug_obj *obj;
  456. unsigned long flags;
  457. if (!debug_objects_enabled)
  458. return;
  459. db = get_bucket((unsigned long) addr);
  460. raw_spin_lock_irqsave(&db->lock, flags);
  461. obj = lookup_object(addr, db);
  462. if (!obj)
  463. goto out_unlock;
  464. switch (obj->state) {
  465. case ODEBUG_STATE_ACTIVE:
  466. debug_print_object(obj, "free");
  467. state = obj->state;
  468. raw_spin_unlock_irqrestore(&db->lock, flags);
  469. debug_object_fixup(descr->fixup_free, addr, state);
  470. return;
  471. default:
  472. hlist_del(&obj->node);
  473. raw_spin_unlock_irqrestore(&db->lock, flags);
  474. free_object(obj);
  475. return;
  476. }
  477. out_unlock:
  478. raw_spin_unlock_irqrestore(&db->lock, flags);
  479. }
  480. /**
  481. * debug_object_assert_init - debug checks when object should be init-ed
  482. * @addr: address of the object
  483. * @descr: pointer to an object specific debug description structure
  484. */
  485. void debug_object_assert_init(void *addr, struct debug_obj_descr *descr)
  486. {
  487. struct debug_bucket *db;
  488. struct debug_obj *obj;
  489. unsigned long flags;
  490. if (!debug_objects_enabled)
  491. return;
  492. db = get_bucket((unsigned long) addr);
  493. raw_spin_lock_irqsave(&db->lock, flags);
  494. obj = lookup_object(addr, db);
  495. if (!obj) {
  496. struct debug_obj o = { .object = addr,
  497. .state = ODEBUG_STATE_NOTAVAILABLE,
  498. .descr = descr };
  499. raw_spin_unlock_irqrestore(&db->lock, flags);
  500. /*
  501. * Maybe the object is static. Let the type specific
  502. * code decide what to do.
  503. */
  504. if (debug_object_fixup(descr->fixup_assert_init, addr,
  505. ODEBUG_STATE_NOTAVAILABLE))
  506. debug_print_object(&o, "assert_init");
  507. return;
  508. }
  509. raw_spin_unlock_irqrestore(&db->lock, flags);
  510. }
  511. /**
  512. * debug_object_active_state - debug checks object usage state machine
  513. * @addr: address of the object
  514. * @descr: pointer to an object specific debug description structure
  515. * @expect: expected state
  516. * @next: state to move to if expected state is found
  517. */
  518. void
  519. debug_object_active_state(void *addr, struct debug_obj_descr *descr,
  520. unsigned int expect, unsigned int next)
  521. {
  522. struct debug_bucket *db;
  523. struct debug_obj *obj;
  524. unsigned long flags;
  525. if (!debug_objects_enabled)
  526. return;
  527. db = get_bucket((unsigned long) addr);
  528. raw_spin_lock_irqsave(&db->lock, flags);
  529. obj = lookup_object(addr, db);
  530. if (obj) {
  531. switch (obj->state) {
  532. case ODEBUG_STATE_ACTIVE:
  533. if (obj->astate == expect)
  534. obj->astate = next;
  535. else
  536. debug_print_object(obj, "active_state");
  537. break;
  538. default:
  539. debug_print_object(obj, "active_state");
  540. break;
  541. }
  542. } else {
  543. struct debug_obj o = { .object = addr,
  544. .state = ODEBUG_STATE_NOTAVAILABLE,
  545. .descr = descr };
  546. debug_print_object(&o, "active_state");
  547. }
  548. raw_spin_unlock_irqrestore(&db->lock, flags);
  549. }
  550. #ifdef CONFIG_DEBUG_OBJECTS_FREE
  551. static void __debug_check_no_obj_freed(const void *address, unsigned long size)
  552. {
  553. unsigned long flags, oaddr, saddr, eaddr, paddr, chunks;
  554. struct hlist_node *node, *tmp;
  555. HLIST_HEAD(freelist);
  556. struct debug_obj_descr *descr;
  557. enum debug_obj_state state;
  558. struct debug_bucket *db;
  559. struct debug_obj *obj;
  560. int cnt;
  561. saddr = (unsigned long) address;
  562. eaddr = saddr + size;
  563. paddr = saddr & ODEBUG_CHUNK_MASK;
  564. chunks = ((eaddr - paddr) + (ODEBUG_CHUNK_SIZE - 1));
  565. chunks >>= ODEBUG_CHUNK_SHIFT;
  566. for (;chunks > 0; chunks--, paddr += ODEBUG_CHUNK_SIZE) {
  567. db = get_bucket(paddr);
  568. repeat:
  569. cnt = 0;
  570. raw_spin_lock_irqsave(&db->lock, flags);
  571. hlist_for_each_entry_safe(obj, node, tmp, &db->list, node) {
  572. cnt++;
  573. oaddr = (unsigned long) obj->object;
  574. if (oaddr < saddr || oaddr >= eaddr)
  575. continue;
  576. switch (obj->state) {
  577. case ODEBUG_STATE_ACTIVE:
  578. debug_print_object(obj, "free");
  579. descr = obj->descr;
  580. state = obj->state;
  581. raw_spin_unlock_irqrestore(&db->lock, flags);
  582. debug_object_fixup(descr->fixup_free,
  583. (void *) oaddr, state);
  584. goto repeat;
  585. default:
  586. hlist_del(&obj->node);
  587. hlist_add_head(&obj->node, &freelist);
  588. break;
  589. }
  590. }
  591. raw_spin_unlock_irqrestore(&db->lock, flags);
  592. /* Now free them */
  593. hlist_for_each_entry_safe(obj, node, tmp, &freelist, node) {
  594. hlist_del(&obj->node);
  595. free_object(obj);
  596. }
  597. if (cnt > debug_objects_maxchain)
  598. debug_objects_maxchain = cnt;
  599. }
  600. }
  601. void debug_check_no_obj_freed(const void *address, unsigned long size)
  602. {
  603. if (debug_objects_enabled)
  604. __debug_check_no_obj_freed(address, size);
  605. }
  606. #endif
  607. #ifdef CONFIG_DEBUG_FS
  608. static int debug_stats_show(struct seq_file *m, void *v)
  609. {
  610. seq_printf(m, "max_chain :%d\n", debug_objects_maxchain);
  611. seq_printf(m, "warnings :%d\n", debug_objects_warnings);
  612. seq_printf(m, "fixups :%d\n", debug_objects_fixups);
  613. seq_printf(m, "pool_free :%d\n", obj_pool_free);
  614. seq_printf(m, "pool_min_free :%d\n", obj_pool_min_free);
  615. seq_printf(m, "pool_used :%d\n", obj_pool_used);
  616. seq_printf(m, "pool_max_used :%d\n", obj_pool_max_used);
  617. return 0;
  618. }
  619. static int debug_stats_open(struct inode *inode, struct file *filp)
  620. {
  621. return single_open(filp, debug_stats_show, NULL);
  622. }
  623. static const struct file_operations debug_stats_fops = {
  624. .open = debug_stats_open,
  625. .read = seq_read,
  626. .llseek = seq_lseek,
  627. .release = single_release,
  628. };
  629. static int __init debug_objects_init_debugfs(void)
  630. {
  631. struct dentry *dbgdir, *dbgstats;
  632. if (!debug_objects_enabled)
  633. return 0;
  634. dbgdir = debugfs_create_dir("debug_objects", NULL);
  635. if (!dbgdir)
  636. return -ENOMEM;
  637. dbgstats = debugfs_create_file("stats", 0444, dbgdir, NULL,
  638. &debug_stats_fops);
  639. if (!dbgstats)
  640. goto err;
  641. return 0;
  642. err:
  643. debugfs_remove(dbgdir);
  644. return -ENOMEM;
  645. }
  646. __initcall(debug_objects_init_debugfs);
  647. #else
  648. static inline void debug_objects_init_debugfs(void) { }
  649. #endif
  650. #ifdef CONFIG_DEBUG_OBJECTS_SELFTEST
  651. /* Random data structure for the self test */
  652. struct self_test {
  653. unsigned long dummy1[6];
  654. int static_init;
  655. unsigned long dummy2[3];
  656. };
  657. static __initdata struct debug_obj_descr descr_type_test;
  658. /*
  659. * fixup_init is called when:
  660. * - an active object is initialized
  661. */
  662. static int __init fixup_init(void *addr, enum debug_obj_state state)
  663. {
  664. struct self_test *obj = addr;
  665. switch (state) {
  666. case ODEBUG_STATE_ACTIVE:
  667. debug_object_deactivate(obj, &descr_type_test);
  668. debug_object_init(obj, &descr_type_test);
  669. return 1;
  670. default:
  671. return 0;
  672. }
  673. }
  674. /*
  675. * fixup_activate is called when:
  676. * - an active object is activated
  677. * - an unknown object is activated (might be a statically initialized object)
  678. */
  679. static int __init fixup_activate(void *addr, enum debug_obj_state state)
  680. {
  681. struct self_test *obj = addr;
  682. switch (state) {
  683. case ODEBUG_STATE_NOTAVAILABLE:
  684. if (obj->static_init == 1) {
  685. debug_object_init(obj, &descr_type_test);
  686. debug_object_activate(obj, &descr_type_test);
  687. /*
  688. * Real code should return 0 here ! This is
  689. * not a fixup of some bad behaviour. We
  690. * merily call the debug_init function to keep
  691. * track of the object.
  692. */
  693. return 1;
  694. } else {
  695. /* Real code needs to emit a warning here */
  696. }
  697. return 0;
  698. case ODEBUG_STATE_ACTIVE:
  699. debug_object_deactivate(obj, &descr_type_test);
  700. debug_object_activate(obj, &descr_type_test);
  701. return 1;
  702. default:
  703. return 0;
  704. }
  705. }
  706. /*
  707. * fixup_destroy is called when:
  708. * - an active object is destroyed
  709. */
  710. static int __init fixup_destroy(void *addr, enum debug_obj_state state)
  711. {
  712. struct self_test *obj = addr;
  713. switch (state) {
  714. case ODEBUG_STATE_ACTIVE:
  715. debug_object_deactivate(obj, &descr_type_test);
  716. debug_object_destroy(obj, &descr_type_test);
  717. return 1;
  718. default:
  719. return 0;
  720. }
  721. }
  722. /*
  723. * fixup_free is called when:
  724. * - an active object is freed
  725. */
  726. static int __init fixup_free(void *addr, enum debug_obj_state state)
  727. {
  728. struct self_test *obj = addr;
  729. switch (state) {
  730. case ODEBUG_STATE_ACTIVE:
  731. debug_object_deactivate(obj, &descr_type_test);
  732. debug_object_free(obj, &descr_type_test);
  733. return 1;
  734. default:
  735. return 0;
  736. }
  737. }
  738. static int __init
  739. check_results(void *addr, enum debug_obj_state state, int fixups, int warnings)
  740. {
  741. struct debug_bucket *db;
  742. struct debug_obj *obj;
  743. unsigned long flags;
  744. int res = -EINVAL;
  745. db = get_bucket((unsigned long) addr);
  746. raw_spin_lock_irqsave(&db->lock, flags);
  747. obj = lookup_object(addr, db);
  748. if (!obj && state != ODEBUG_STATE_NONE) {
  749. WARN(1, KERN_ERR "ODEBUG: selftest object not found\n");
  750. goto out;
  751. }
  752. if (obj && obj->state != state) {
  753. WARN(1, KERN_ERR "ODEBUG: selftest wrong state: %d != %d\n",
  754. obj->state, state);
  755. goto out;
  756. }
  757. if (fixups != debug_objects_fixups) {
  758. WARN(1, KERN_ERR "ODEBUG: selftest fixups failed %d != %d\n",
  759. fixups, debug_objects_fixups);
  760. goto out;
  761. }
  762. if (warnings != debug_objects_warnings) {
  763. WARN(1, KERN_ERR "ODEBUG: selftest warnings failed %d != %d\n",
  764. warnings, debug_objects_warnings);
  765. goto out;
  766. }
  767. res = 0;
  768. out:
  769. raw_spin_unlock_irqrestore(&db->lock, flags);
  770. if (res)
  771. debug_objects_enabled = 0;
  772. return res;
  773. }
  774. static __initdata struct debug_obj_descr descr_type_test = {
  775. .name = "selftest",
  776. .fixup_init = fixup_init,
  777. .fixup_activate = fixup_activate,
  778. .fixup_destroy = fixup_destroy,
  779. .fixup_free = fixup_free,
  780. };
  781. static __initdata struct self_test obj = { .static_init = 0 };
  782. static void __init debug_objects_selftest(void)
  783. {
  784. int fixups, oldfixups, warnings, oldwarnings;
  785. unsigned long flags;
  786. local_irq_save(flags);
  787. fixups = oldfixups = debug_objects_fixups;
  788. warnings = oldwarnings = debug_objects_warnings;
  789. descr_test = &descr_type_test;
  790. debug_object_init(&obj, &descr_type_test);
  791. if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings))
  792. goto out;
  793. debug_object_activate(&obj, &descr_type_test);
  794. if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
  795. goto out;
  796. debug_object_activate(&obj, &descr_type_test);
  797. if (check_results(&obj, ODEBUG_STATE_ACTIVE, ++fixups, ++warnings))
  798. goto out;
  799. debug_object_deactivate(&obj, &descr_type_test);
  800. if (check_results(&obj, ODEBUG_STATE_INACTIVE, fixups, warnings))
  801. goto out;
  802. debug_object_destroy(&obj, &descr_type_test);
  803. if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, warnings))
  804. goto out;
  805. debug_object_init(&obj, &descr_type_test);
  806. if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
  807. goto out;
  808. debug_object_activate(&obj, &descr_type_test);
  809. if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
  810. goto out;
  811. debug_object_deactivate(&obj, &descr_type_test);
  812. if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
  813. goto out;
  814. debug_object_free(&obj, &descr_type_test);
  815. if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings))
  816. goto out;
  817. obj.static_init = 1;
  818. debug_object_activate(&obj, &descr_type_test);
  819. if (check_results(&obj, ODEBUG_STATE_ACTIVE, ++fixups, warnings))
  820. goto out;
  821. debug_object_init(&obj, &descr_type_test);
  822. if (check_results(&obj, ODEBUG_STATE_INIT, ++fixups, ++warnings))
  823. goto out;
  824. debug_object_free(&obj, &descr_type_test);
  825. if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings))
  826. goto out;
  827. #ifdef CONFIG_DEBUG_OBJECTS_FREE
  828. debug_object_init(&obj, &descr_type_test);
  829. if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings))
  830. goto out;
  831. debug_object_activate(&obj, &descr_type_test);
  832. if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
  833. goto out;
  834. __debug_check_no_obj_freed(&obj, sizeof(obj));
  835. if (check_results(&obj, ODEBUG_STATE_NONE, ++fixups, ++warnings))
  836. goto out;
  837. #endif
  838. printk(KERN_INFO "ODEBUG: selftest passed\n");
  839. out:
  840. debug_objects_fixups = oldfixups;
  841. debug_objects_warnings = oldwarnings;
  842. descr_test = NULL;
  843. local_irq_restore(flags);
  844. }
  845. #else
  846. static inline void debug_objects_selftest(void) { }
  847. #endif
  848. /*
  849. * Called during early boot to initialize the hash buckets and link
  850. * the static object pool objects into the poll list. After this call
  851. * the object tracker is fully operational.
  852. */
  853. void __init debug_objects_early_init(void)
  854. {
  855. int i;
  856. for (i = 0; i < ODEBUG_HASH_SIZE; i++)
  857. raw_spin_lock_init(&obj_hash[i].lock);
  858. for (i = 0; i < ODEBUG_POOL_SIZE; i++)
  859. hlist_add_head(&obj_static_pool[i].node, &obj_pool);
  860. }
  861. /*
  862. * Convert the statically allocated objects to dynamic ones:
  863. */
  864. static int __init debug_objects_replace_static_objects(void)
  865. {
  866. struct debug_bucket *db = obj_hash;
  867. struct hlist_node *node, *tmp;
  868. struct debug_obj *obj, *new;
  869. HLIST_HEAD(objects);
  870. int i, cnt = 0;
  871. for (i = 0; i < ODEBUG_POOL_SIZE; i++) {
  872. obj = kmem_cache_zalloc(obj_cache, GFP_KERNEL);
  873. if (!obj)
  874. goto free;
  875. hlist_add_head(&obj->node, &objects);
  876. }
  877. /*
  878. * When debug_objects_mem_init() is called we know that only
  879. * one CPU is up, so disabling interrupts is enough
  880. * protection. This avoids the lockdep hell of lock ordering.
  881. */
  882. local_irq_disable();
  883. /* Remove the statically allocated objects from the pool */
  884. hlist_for_each_entry_safe(obj, node, tmp, &obj_pool, node)
  885. hlist_del(&obj->node);
  886. /* Move the allocated objects to the pool */
  887. hlist_move_list(&objects, &obj_pool);
  888. /* Replace the active object references */
  889. for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
  890. hlist_move_list(&db->list, &objects);
  891. hlist_for_each_entry(obj, node, &objects, node) {
  892. new = hlist_entry(obj_pool.first, typeof(*obj), node);
  893. hlist_del(&new->node);
  894. /* copy object data */
  895. *new = *obj;
  896. hlist_add_head(&new->node, &db->list);
  897. cnt++;
  898. }
  899. }
  900. printk(KERN_DEBUG "ODEBUG: %d of %d active objects replaced\n", cnt,
  901. obj_pool_used);
  902. local_irq_enable();
  903. return 0;
  904. free:
  905. hlist_for_each_entry_safe(obj, node, tmp, &objects, node) {
  906. hlist_del(&obj->node);
  907. kmem_cache_free(obj_cache, obj);
  908. }
  909. return -ENOMEM;
  910. }
  911. /*
  912. * Called after the kmem_caches are functional to setup a dedicated
  913. * cache pool, which has the SLAB_DEBUG_OBJECTS flag set. This flag
  914. * prevents that the debug code is called on kmem_cache_free() for the
  915. * debug tracker objects to avoid recursive calls.
  916. */
  917. void __init debug_objects_mem_init(void)
  918. {
  919. if (!debug_objects_enabled)
  920. return;
  921. obj_cache = kmem_cache_create("debug_objects_cache",
  922. sizeof (struct debug_obj), 0,
  923. SLAB_DEBUG_OBJECTS, NULL);
  924. if (!obj_cache || debug_objects_replace_static_objects()) {
  925. debug_objects_enabled = 0;
  926. if (obj_cache)
  927. kmem_cache_destroy(obj_cache);
  928. printk(KERN_WARNING "ODEBUG: out of memory.\n");
  929. } else
  930. debug_objects_selftest();
  931. }