debugobjects.c 26 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088
  1. /*
  2. * Generic infrastructure for lifetime debugging of objects.
  3. *
  4. * Started by Thomas Gleixner
  5. *
  6. * Copyright (C) 2008, Thomas Gleixner <tglx@linutronix.de>
  7. *
  8. * For licencing details see kernel-base/COPYING
  9. */
  10. #include <linux/debugobjects.h>
  11. #include <linux/interrupt.h>
  12. #include <linux/sched.h>
  13. #include <linux/seq_file.h>
  14. #include <linux/debugfs.h>
  15. #include <linux/slab.h>
  16. #include <linux/hash.h>
  17. #define ODEBUG_HASH_BITS 14
  18. #define ODEBUG_HASH_SIZE (1 << ODEBUG_HASH_BITS)
  19. #define ODEBUG_POOL_SIZE 512
  20. #define ODEBUG_POOL_MIN_LEVEL 256
  21. #define ODEBUG_CHUNK_SHIFT PAGE_SHIFT
  22. #define ODEBUG_CHUNK_SIZE (1 << ODEBUG_CHUNK_SHIFT)
  23. #define ODEBUG_CHUNK_MASK (~(ODEBUG_CHUNK_SIZE - 1))
  24. struct debug_bucket {
  25. struct hlist_head list;
  26. raw_spinlock_t lock;
  27. };
  28. static struct debug_bucket obj_hash[ODEBUG_HASH_SIZE];
  29. static struct debug_obj obj_static_pool[ODEBUG_POOL_SIZE] __initdata;
  30. static DEFINE_RAW_SPINLOCK(pool_lock);
  31. static HLIST_HEAD(obj_pool);
  32. static int obj_pool_min_free = ODEBUG_POOL_SIZE;
  33. static int obj_pool_free = ODEBUG_POOL_SIZE;
  34. static int obj_pool_used;
  35. static int obj_pool_max_used;
  36. static struct kmem_cache *obj_cache;
  37. static int debug_objects_maxchain __read_mostly;
  38. static int debug_objects_fixups __read_mostly;
  39. static int debug_objects_warnings __read_mostly;
  40. static int debug_objects_enabled __read_mostly
  41. = CONFIG_DEBUG_OBJECTS_ENABLE_DEFAULT;
  42. static struct debug_obj_descr *descr_test __read_mostly;
  43. static void free_obj_work(struct work_struct *work);
  44. static DECLARE_WORK(debug_obj_work, free_obj_work);
  45. static int __init enable_object_debug(char *str)
  46. {
  47. debug_objects_enabled = 1;
  48. return 0;
  49. }
  50. static int __init disable_object_debug(char *str)
  51. {
  52. debug_objects_enabled = 0;
  53. return 0;
  54. }
  55. early_param("debug_objects", enable_object_debug);
  56. early_param("no_debug_objects", disable_object_debug);
  57. static const char *obj_states[ODEBUG_STATE_MAX] = {
  58. [ODEBUG_STATE_NONE] = "none",
  59. [ODEBUG_STATE_INIT] = "initialized",
  60. [ODEBUG_STATE_INACTIVE] = "inactive",
  61. [ODEBUG_STATE_ACTIVE] = "active",
  62. [ODEBUG_STATE_DESTROYED] = "destroyed",
  63. [ODEBUG_STATE_NOTAVAILABLE] = "not available",
  64. };
  65. static void fill_pool(void)
  66. {
  67. gfp_t gfp = GFP_ATOMIC | __GFP_NORETRY | __GFP_NOWARN;
  68. struct debug_obj *new;
  69. unsigned long flags;
  70. if (likely(obj_pool_free >= ODEBUG_POOL_MIN_LEVEL))
  71. return;
  72. if (unlikely(!obj_cache))
  73. return;
  74. while (obj_pool_free < ODEBUG_POOL_MIN_LEVEL) {
  75. new = kmem_cache_zalloc(obj_cache, gfp);
  76. if (!new)
  77. return;
  78. raw_spin_lock_irqsave(&pool_lock, flags);
  79. hlist_add_head(&new->node, &obj_pool);
  80. obj_pool_free++;
  81. raw_spin_unlock_irqrestore(&pool_lock, flags);
  82. }
  83. }
  84. /*
  85. * Lookup an object in the hash bucket.
  86. */
  87. static struct debug_obj *lookup_object(void *addr, struct debug_bucket *b)
  88. {
  89. struct debug_obj *obj;
  90. int cnt = 0;
  91. hlist_for_each_entry(obj, &b->list, node) {
  92. cnt++;
  93. if (obj->object == addr)
  94. return obj;
  95. }
  96. if (cnt > debug_objects_maxchain)
  97. debug_objects_maxchain = cnt;
  98. return NULL;
  99. }
  100. /*
  101. * Allocate a new object. If the pool is empty, switch off the debugger.
  102. * Must be called with interrupts disabled.
  103. */
  104. static struct debug_obj *
  105. alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr)
  106. {
  107. struct debug_obj *obj = NULL;
  108. raw_spin_lock(&pool_lock);
  109. if (obj_pool.first) {
  110. obj = hlist_entry(obj_pool.first, typeof(*obj), node);
  111. obj->object = addr;
  112. obj->descr = descr;
  113. obj->state = ODEBUG_STATE_NONE;
  114. obj->astate = 0;
  115. hlist_del(&obj->node);
  116. hlist_add_head(&obj->node, &b->list);
  117. obj_pool_used++;
  118. if (obj_pool_used > obj_pool_max_used)
  119. obj_pool_max_used = obj_pool_used;
  120. obj_pool_free--;
  121. if (obj_pool_free < obj_pool_min_free)
  122. obj_pool_min_free = obj_pool_free;
  123. }
  124. raw_spin_unlock(&pool_lock);
  125. return obj;
  126. }
  127. /*
  128. * workqueue function to free objects.
  129. */
  130. static void free_obj_work(struct work_struct *work)
  131. {
  132. struct debug_obj *obj;
  133. unsigned long flags;
  134. raw_spin_lock_irqsave(&pool_lock, flags);
  135. while (obj_pool_free > ODEBUG_POOL_SIZE) {
  136. obj = hlist_entry(obj_pool.first, typeof(*obj), node);
  137. hlist_del(&obj->node);
  138. obj_pool_free--;
  139. /*
  140. * We release pool_lock across kmem_cache_free() to
  141. * avoid contention on pool_lock.
  142. */
  143. raw_spin_unlock_irqrestore(&pool_lock, flags);
  144. kmem_cache_free(obj_cache, obj);
  145. raw_spin_lock_irqsave(&pool_lock, flags);
  146. }
  147. raw_spin_unlock_irqrestore(&pool_lock, flags);
  148. }
  149. /*
  150. * Put the object back into the pool and schedule work to free objects
  151. * if necessary.
  152. */
  153. static void free_object(struct debug_obj *obj)
  154. {
  155. unsigned long flags;
  156. int sched = 0;
  157. raw_spin_lock_irqsave(&pool_lock, flags);
  158. /*
  159. * schedule work when the pool is filled and the cache is
  160. * initialized:
  161. */
  162. if (obj_pool_free > ODEBUG_POOL_SIZE && obj_cache)
  163. sched = keventd_up() && !work_pending(&debug_obj_work);
  164. hlist_add_head(&obj->node, &obj_pool);
  165. obj_pool_free++;
  166. obj_pool_used--;
  167. raw_spin_unlock_irqrestore(&pool_lock, flags);
  168. if (sched)
  169. schedule_work(&debug_obj_work);
  170. }
  171. /*
  172. * We run out of memory. That means we probably have tons of objects
  173. * allocated.
  174. */
  175. static void debug_objects_oom(void)
  176. {
  177. struct debug_bucket *db = obj_hash;
  178. struct hlist_node *tmp;
  179. HLIST_HEAD(freelist);
  180. struct debug_obj *obj;
  181. unsigned long flags;
  182. int i;
  183. printk(KERN_WARNING "ODEBUG: Out of memory. ODEBUG disabled\n");
  184. for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
  185. raw_spin_lock_irqsave(&db->lock, flags);
  186. hlist_move_list(&db->list, &freelist);
  187. raw_spin_unlock_irqrestore(&db->lock, flags);
  188. /* Now free them */
  189. hlist_for_each_entry_safe(obj, tmp, &freelist, node) {
  190. hlist_del(&obj->node);
  191. free_object(obj);
  192. }
  193. }
  194. }
  195. /*
  196. * We use the pfn of the address for the hash. That way we can check
  197. * for freed objects simply by checking the affected bucket.
  198. */
  199. static struct debug_bucket *get_bucket(unsigned long addr)
  200. {
  201. unsigned long hash;
  202. hash = hash_long((addr >> ODEBUG_CHUNK_SHIFT), ODEBUG_HASH_BITS);
  203. return &obj_hash[hash];
  204. }
  205. static void debug_print_object(struct debug_obj *obj, char *msg)
  206. {
  207. struct debug_obj_descr *descr = obj->descr;
  208. static int limit;
  209. if (limit < 5 && descr != descr_test) {
  210. void *hint = descr->debug_hint ?
  211. descr->debug_hint(obj->object) : NULL;
  212. limit++;
  213. WARN(1, KERN_ERR "ODEBUG: %s %s (active state %u) "
  214. "object type: %s hint: %pS\n",
  215. msg, obj_states[obj->state], obj->astate,
  216. descr->name, hint);
  217. }
  218. debug_objects_warnings++;
  219. }
  220. /*
  221. * Try to repair the damage, so we have a better chance to get useful
  222. * debug output.
  223. */
  224. static int
  225. debug_object_fixup(int (*fixup)(void *addr, enum debug_obj_state state),
  226. void * addr, enum debug_obj_state state)
  227. {
  228. int fixed = 0;
  229. if (fixup)
  230. fixed = fixup(addr, state);
  231. debug_objects_fixups += fixed;
  232. return fixed;
  233. }
  234. static void debug_object_is_on_stack(void *addr, int onstack)
  235. {
  236. int is_on_stack;
  237. static int limit;
  238. if (limit > 4)
  239. return;
  240. is_on_stack = object_is_on_stack(addr);
  241. if (is_on_stack == onstack)
  242. return;
  243. limit++;
  244. if (is_on_stack)
  245. printk(KERN_WARNING
  246. "ODEBUG: object is on stack, but not annotated\n");
  247. else
  248. printk(KERN_WARNING
  249. "ODEBUG: object is not on stack, but annotated\n");
  250. WARN_ON(1);
  251. }
  252. static void
  253. __debug_object_init(void *addr, struct debug_obj_descr *descr, int onstack)
  254. {
  255. enum debug_obj_state state;
  256. struct debug_bucket *db;
  257. struct debug_obj *obj;
  258. unsigned long flags;
  259. fill_pool();
  260. db = get_bucket((unsigned long) addr);
  261. raw_spin_lock_irqsave(&db->lock, flags);
  262. obj = lookup_object(addr, db);
  263. if (!obj) {
  264. obj = alloc_object(addr, db, descr);
  265. if (!obj) {
  266. debug_objects_enabled = 0;
  267. raw_spin_unlock_irqrestore(&db->lock, flags);
  268. debug_objects_oom();
  269. return;
  270. }
  271. debug_object_is_on_stack(addr, onstack);
  272. }
  273. switch (obj->state) {
  274. case ODEBUG_STATE_NONE:
  275. case ODEBUG_STATE_INIT:
  276. case ODEBUG_STATE_INACTIVE:
  277. obj->state = ODEBUG_STATE_INIT;
  278. break;
  279. case ODEBUG_STATE_ACTIVE:
  280. debug_print_object(obj, "init");
  281. state = obj->state;
  282. raw_spin_unlock_irqrestore(&db->lock, flags);
  283. debug_object_fixup(descr->fixup_init, addr, state);
  284. return;
  285. case ODEBUG_STATE_DESTROYED:
  286. debug_print_object(obj, "init");
  287. break;
  288. default:
  289. break;
  290. }
  291. raw_spin_unlock_irqrestore(&db->lock, flags);
  292. }
  293. /**
  294. * debug_object_init - debug checks when an object is initialized
  295. * @addr: address of the object
  296. * @descr: pointer to an object specific debug description structure
  297. */
  298. void debug_object_init(void *addr, struct debug_obj_descr *descr)
  299. {
  300. if (!debug_objects_enabled)
  301. return;
  302. __debug_object_init(addr, descr, 0);
  303. }
  304. /**
  305. * debug_object_init_on_stack - debug checks when an object on stack is
  306. * initialized
  307. * @addr: address of the object
  308. * @descr: pointer to an object specific debug description structure
  309. */
  310. void debug_object_init_on_stack(void *addr, struct debug_obj_descr *descr)
  311. {
  312. if (!debug_objects_enabled)
  313. return;
  314. __debug_object_init(addr, descr, 1);
  315. }
  316. /**
  317. * debug_object_activate - debug checks when an object is activated
  318. * @addr: address of the object
  319. * @descr: pointer to an object specific debug description structure
  320. */
  321. void debug_object_activate(void *addr, struct debug_obj_descr *descr)
  322. {
  323. enum debug_obj_state state;
  324. struct debug_bucket *db;
  325. struct debug_obj *obj;
  326. unsigned long flags;
  327. struct debug_obj o = { .object = addr,
  328. .state = ODEBUG_STATE_NOTAVAILABLE,
  329. .descr = descr };
  330. if (!debug_objects_enabled)
  331. return;
  332. db = get_bucket((unsigned long) addr);
  333. raw_spin_lock_irqsave(&db->lock, flags);
  334. obj = lookup_object(addr, db);
  335. if (obj) {
  336. switch (obj->state) {
  337. case ODEBUG_STATE_INIT:
  338. case ODEBUG_STATE_INACTIVE:
  339. obj->state = ODEBUG_STATE_ACTIVE;
  340. break;
  341. case ODEBUG_STATE_ACTIVE:
  342. debug_print_object(obj, "activate");
  343. state = obj->state;
  344. raw_spin_unlock_irqrestore(&db->lock, flags);
  345. debug_object_fixup(descr->fixup_activate, addr, state);
  346. return;
  347. case ODEBUG_STATE_DESTROYED:
  348. debug_print_object(obj, "activate");
  349. break;
  350. default:
  351. break;
  352. }
  353. raw_spin_unlock_irqrestore(&db->lock, flags);
  354. return;
  355. }
  356. raw_spin_unlock_irqrestore(&db->lock, flags);
  357. /*
  358. * This happens when a static object is activated. We
  359. * let the type specific code decide whether this is
  360. * true or not.
  361. */
  362. if (debug_object_fixup(descr->fixup_activate, addr,
  363. ODEBUG_STATE_NOTAVAILABLE))
  364. debug_print_object(&o, "activate");
  365. }
  366. /**
  367. * debug_object_deactivate - debug checks when an object is deactivated
  368. * @addr: address of the object
  369. * @descr: pointer to an object specific debug description structure
  370. */
  371. void debug_object_deactivate(void *addr, struct debug_obj_descr *descr)
  372. {
  373. struct debug_bucket *db;
  374. struct debug_obj *obj;
  375. unsigned long flags;
  376. if (!debug_objects_enabled)
  377. return;
  378. db = get_bucket((unsigned long) addr);
  379. raw_spin_lock_irqsave(&db->lock, flags);
  380. obj = lookup_object(addr, db);
  381. if (obj) {
  382. switch (obj->state) {
  383. case ODEBUG_STATE_INIT:
  384. case ODEBUG_STATE_INACTIVE:
  385. case ODEBUG_STATE_ACTIVE:
  386. if (!obj->astate)
  387. obj->state = ODEBUG_STATE_INACTIVE;
  388. else
  389. debug_print_object(obj, "deactivate");
  390. break;
  391. case ODEBUG_STATE_DESTROYED:
  392. debug_print_object(obj, "deactivate");
  393. break;
  394. default:
  395. break;
  396. }
  397. } else {
  398. struct debug_obj o = { .object = addr,
  399. .state = ODEBUG_STATE_NOTAVAILABLE,
  400. .descr = descr };
  401. debug_print_object(&o, "deactivate");
  402. }
  403. raw_spin_unlock_irqrestore(&db->lock, flags);
  404. }
  405. /**
  406. * debug_object_destroy - debug checks when an object is destroyed
  407. * @addr: address of the object
  408. * @descr: pointer to an object specific debug description structure
  409. */
  410. void debug_object_destroy(void *addr, struct debug_obj_descr *descr)
  411. {
  412. enum debug_obj_state state;
  413. struct debug_bucket *db;
  414. struct debug_obj *obj;
  415. unsigned long flags;
  416. if (!debug_objects_enabled)
  417. return;
  418. db = get_bucket((unsigned long) addr);
  419. raw_spin_lock_irqsave(&db->lock, flags);
  420. obj = lookup_object(addr, db);
  421. if (!obj)
  422. goto out_unlock;
  423. switch (obj->state) {
  424. case ODEBUG_STATE_NONE:
  425. case ODEBUG_STATE_INIT:
  426. case ODEBUG_STATE_INACTIVE:
  427. obj->state = ODEBUG_STATE_DESTROYED;
  428. break;
  429. case ODEBUG_STATE_ACTIVE:
  430. debug_print_object(obj, "destroy");
  431. state = obj->state;
  432. raw_spin_unlock_irqrestore(&db->lock, flags);
  433. debug_object_fixup(descr->fixup_destroy, addr, state);
  434. return;
  435. case ODEBUG_STATE_DESTROYED:
  436. debug_print_object(obj, "destroy");
  437. break;
  438. default:
  439. break;
  440. }
  441. out_unlock:
  442. raw_spin_unlock_irqrestore(&db->lock, flags);
  443. }
  444. /**
  445. * debug_object_free - debug checks when an object is freed
  446. * @addr: address of the object
  447. * @descr: pointer to an object specific debug description structure
  448. */
  449. void debug_object_free(void *addr, struct debug_obj_descr *descr)
  450. {
  451. enum debug_obj_state state;
  452. struct debug_bucket *db;
  453. struct debug_obj *obj;
  454. unsigned long flags;
  455. if (!debug_objects_enabled)
  456. return;
  457. db = get_bucket((unsigned long) addr);
  458. raw_spin_lock_irqsave(&db->lock, flags);
  459. obj = lookup_object(addr, db);
  460. if (!obj)
  461. goto out_unlock;
  462. switch (obj->state) {
  463. case ODEBUG_STATE_ACTIVE:
  464. debug_print_object(obj, "free");
  465. state = obj->state;
  466. raw_spin_unlock_irqrestore(&db->lock, flags);
  467. debug_object_fixup(descr->fixup_free, addr, state);
  468. return;
  469. default:
  470. hlist_del(&obj->node);
  471. raw_spin_unlock_irqrestore(&db->lock, flags);
  472. free_object(obj);
  473. return;
  474. }
  475. out_unlock:
  476. raw_spin_unlock_irqrestore(&db->lock, flags);
  477. }
  478. /**
  479. * debug_object_assert_init - debug checks when object should be init-ed
  480. * @addr: address of the object
  481. * @descr: pointer to an object specific debug description structure
  482. */
  483. void debug_object_assert_init(void *addr, struct debug_obj_descr *descr)
  484. {
  485. struct debug_bucket *db;
  486. struct debug_obj *obj;
  487. unsigned long flags;
  488. if (!debug_objects_enabled)
  489. return;
  490. db = get_bucket((unsigned long) addr);
  491. raw_spin_lock_irqsave(&db->lock, flags);
  492. obj = lookup_object(addr, db);
  493. if (!obj) {
  494. struct debug_obj o = { .object = addr,
  495. .state = ODEBUG_STATE_NOTAVAILABLE,
  496. .descr = descr };
  497. raw_spin_unlock_irqrestore(&db->lock, flags);
  498. /*
  499. * Maybe the object is static. Let the type specific
  500. * code decide what to do.
  501. */
  502. if (debug_object_fixup(descr->fixup_assert_init, addr,
  503. ODEBUG_STATE_NOTAVAILABLE))
  504. debug_print_object(&o, "assert_init");
  505. return;
  506. }
  507. raw_spin_unlock_irqrestore(&db->lock, flags);
  508. }
  509. /**
  510. * debug_object_active_state - debug checks object usage state machine
  511. * @addr: address of the object
  512. * @descr: pointer to an object specific debug description structure
  513. * @expect: expected state
  514. * @next: state to move to if expected state is found
  515. */
  516. void
  517. debug_object_active_state(void *addr, struct debug_obj_descr *descr,
  518. unsigned int expect, unsigned int next)
  519. {
  520. struct debug_bucket *db;
  521. struct debug_obj *obj;
  522. unsigned long flags;
  523. if (!debug_objects_enabled)
  524. return;
  525. db = get_bucket((unsigned long) addr);
  526. raw_spin_lock_irqsave(&db->lock, flags);
  527. obj = lookup_object(addr, db);
  528. if (obj) {
  529. switch (obj->state) {
  530. case ODEBUG_STATE_ACTIVE:
  531. if (obj->astate == expect)
  532. obj->astate = next;
  533. else
  534. debug_print_object(obj, "active_state");
  535. break;
  536. default:
  537. debug_print_object(obj, "active_state");
  538. break;
  539. }
  540. } else {
  541. struct debug_obj o = { .object = addr,
  542. .state = ODEBUG_STATE_NOTAVAILABLE,
  543. .descr = descr };
  544. debug_print_object(&o, "active_state");
  545. }
  546. raw_spin_unlock_irqrestore(&db->lock, flags);
  547. }
  548. #ifdef CONFIG_DEBUG_OBJECTS_FREE
  549. static void __debug_check_no_obj_freed(const void *address, unsigned long size)
  550. {
  551. unsigned long flags, oaddr, saddr, eaddr, paddr, chunks;
  552. struct hlist_node *tmp;
  553. HLIST_HEAD(freelist);
  554. struct debug_obj_descr *descr;
  555. enum debug_obj_state state;
  556. struct debug_bucket *db;
  557. struct debug_obj *obj;
  558. int cnt;
  559. saddr = (unsigned long) address;
  560. eaddr = saddr + size;
  561. paddr = saddr & ODEBUG_CHUNK_MASK;
  562. chunks = ((eaddr - paddr) + (ODEBUG_CHUNK_SIZE - 1));
  563. chunks >>= ODEBUG_CHUNK_SHIFT;
  564. for (;chunks > 0; chunks--, paddr += ODEBUG_CHUNK_SIZE) {
  565. db = get_bucket(paddr);
  566. repeat:
  567. cnt = 0;
  568. raw_spin_lock_irqsave(&db->lock, flags);
  569. hlist_for_each_entry_safe(obj, tmp, &db->list, node) {
  570. cnt++;
  571. oaddr = (unsigned long) obj->object;
  572. if (oaddr < saddr || oaddr >= eaddr)
  573. continue;
  574. switch (obj->state) {
  575. case ODEBUG_STATE_ACTIVE:
  576. debug_print_object(obj, "free");
  577. descr = obj->descr;
  578. state = obj->state;
  579. raw_spin_unlock_irqrestore(&db->lock, flags);
  580. debug_object_fixup(descr->fixup_free,
  581. (void *) oaddr, state);
  582. goto repeat;
  583. default:
  584. hlist_del(&obj->node);
  585. hlist_add_head(&obj->node, &freelist);
  586. break;
  587. }
  588. }
  589. raw_spin_unlock_irqrestore(&db->lock, flags);
  590. /* Now free them */
  591. hlist_for_each_entry_safe(obj, tmp, &freelist, node) {
  592. hlist_del(&obj->node);
  593. free_object(obj);
  594. }
  595. if (cnt > debug_objects_maxchain)
  596. debug_objects_maxchain = cnt;
  597. }
  598. }
  599. void debug_check_no_obj_freed(const void *address, unsigned long size)
  600. {
  601. if (debug_objects_enabled)
  602. __debug_check_no_obj_freed(address, size);
  603. }
  604. #endif
  605. #ifdef CONFIG_DEBUG_FS
  606. static int debug_stats_show(struct seq_file *m, void *v)
  607. {
  608. seq_printf(m, "max_chain :%d\n", debug_objects_maxchain);
  609. seq_printf(m, "warnings :%d\n", debug_objects_warnings);
  610. seq_printf(m, "fixups :%d\n", debug_objects_fixups);
  611. seq_printf(m, "pool_free :%d\n", obj_pool_free);
  612. seq_printf(m, "pool_min_free :%d\n", obj_pool_min_free);
  613. seq_printf(m, "pool_used :%d\n", obj_pool_used);
  614. seq_printf(m, "pool_max_used :%d\n", obj_pool_max_used);
  615. return 0;
  616. }
  617. static int debug_stats_open(struct inode *inode, struct file *filp)
  618. {
  619. return single_open(filp, debug_stats_show, NULL);
  620. }
  621. static const struct file_operations debug_stats_fops = {
  622. .open = debug_stats_open,
  623. .read = seq_read,
  624. .llseek = seq_lseek,
  625. .release = single_release,
  626. };
  627. static int __init debug_objects_init_debugfs(void)
  628. {
  629. struct dentry *dbgdir, *dbgstats;
  630. if (!debug_objects_enabled)
  631. return 0;
  632. dbgdir = debugfs_create_dir("debug_objects", NULL);
  633. if (!dbgdir)
  634. return -ENOMEM;
  635. dbgstats = debugfs_create_file("stats", 0444, dbgdir, NULL,
  636. &debug_stats_fops);
  637. if (!dbgstats)
  638. goto err;
  639. return 0;
  640. err:
  641. debugfs_remove(dbgdir);
  642. return -ENOMEM;
  643. }
  644. __initcall(debug_objects_init_debugfs);
  645. #else
  646. static inline void debug_objects_init_debugfs(void) { }
  647. #endif
  648. #ifdef CONFIG_DEBUG_OBJECTS_SELFTEST
  649. /* Random data structure for the self test */
  650. struct self_test {
  651. unsigned long dummy1[6];
  652. int static_init;
  653. unsigned long dummy2[3];
  654. };
  655. static __initdata struct debug_obj_descr descr_type_test;
  656. /*
  657. * fixup_init is called when:
  658. * - an active object is initialized
  659. */
  660. static int __init fixup_init(void *addr, enum debug_obj_state state)
  661. {
  662. struct self_test *obj = addr;
  663. switch (state) {
  664. case ODEBUG_STATE_ACTIVE:
  665. debug_object_deactivate(obj, &descr_type_test);
  666. debug_object_init(obj, &descr_type_test);
  667. return 1;
  668. default:
  669. return 0;
  670. }
  671. }
  672. /*
  673. * fixup_activate is called when:
  674. * - an active object is activated
  675. * - an unknown object is activated (might be a statically initialized object)
  676. */
  677. static int __init fixup_activate(void *addr, enum debug_obj_state state)
  678. {
  679. struct self_test *obj = addr;
  680. switch (state) {
  681. case ODEBUG_STATE_NOTAVAILABLE:
  682. if (obj->static_init == 1) {
  683. debug_object_init(obj, &descr_type_test);
  684. debug_object_activate(obj, &descr_type_test);
  685. return 0;
  686. }
  687. return 1;
  688. case ODEBUG_STATE_ACTIVE:
  689. debug_object_deactivate(obj, &descr_type_test);
  690. debug_object_activate(obj, &descr_type_test);
  691. return 1;
  692. default:
  693. return 0;
  694. }
  695. }
  696. /*
  697. * fixup_destroy is called when:
  698. * - an active object is destroyed
  699. */
  700. static int __init fixup_destroy(void *addr, enum debug_obj_state state)
  701. {
  702. struct self_test *obj = addr;
  703. switch (state) {
  704. case ODEBUG_STATE_ACTIVE:
  705. debug_object_deactivate(obj, &descr_type_test);
  706. debug_object_destroy(obj, &descr_type_test);
  707. return 1;
  708. default:
  709. return 0;
  710. }
  711. }
  712. /*
  713. * fixup_free is called when:
  714. * - an active object is freed
  715. */
  716. static int __init fixup_free(void *addr, enum debug_obj_state state)
  717. {
  718. struct self_test *obj = addr;
  719. switch (state) {
  720. case ODEBUG_STATE_ACTIVE:
  721. debug_object_deactivate(obj, &descr_type_test);
  722. debug_object_free(obj, &descr_type_test);
  723. return 1;
  724. default:
  725. return 0;
  726. }
  727. }
  728. static int __init
  729. check_results(void *addr, enum debug_obj_state state, int fixups, int warnings)
  730. {
  731. struct debug_bucket *db;
  732. struct debug_obj *obj;
  733. unsigned long flags;
  734. int res = -EINVAL;
  735. db = get_bucket((unsigned long) addr);
  736. raw_spin_lock_irqsave(&db->lock, flags);
  737. obj = lookup_object(addr, db);
  738. if (!obj && state != ODEBUG_STATE_NONE) {
  739. WARN(1, KERN_ERR "ODEBUG: selftest object not found\n");
  740. goto out;
  741. }
  742. if (obj && obj->state != state) {
  743. WARN(1, KERN_ERR "ODEBUG: selftest wrong state: %d != %d\n",
  744. obj->state, state);
  745. goto out;
  746. }
  747. if (fixups != debug_objects_fixups) {
  748. WARN(1, KERN_ERR "ODEBUG: selftest fixups failed %d != %d\n",
  749. fixups, debug_objects_fixups);
  750. goto out;
  751. }
  752. if (warnings != debug_objects_warnings) {
  753. WARN(1, KERN_ERR "ODEBUG: selftest warnings failed %d != %d\n",
  754. warnings, debug_objects_warnings);
  755. goto out;
  756. }
  757. res = 0;
  758. out:
  759. raw_spin_unlock_irqrestore(&db->lock, flags);
  760. if (res)
  761. debug_objects_enabled = 0;
  762. return res;
  763. }
  764. static __initdata struct debug_obj_descr descr_type_test = {
  765. .name = "selftest",
  766. .fixup_init = fixup_init,
  767. .fixup_activate = fixup_activate,
  768. .fixup_destroy = fixup_destroy,
  769. .fixup_free = fixup_free,
  770. };
  771. static __initdata struct self_test obj = { .static_init = 0 };
  772. static void __init debug_objects_selftest(void)
  773. {
  774. int fixups, oldfixups, warnings, oldwarnings;
  775. unsigned long flags;
  776. local_irq_save(flags);
  777. fixups = oldfixups = debug_objects_fixups;
  778. warnings = oldwarnings = debug_objects_warnings;
  779. descr_test = &descr_type_test;
  780. debug_object_init(&obj, &descr_type_test);
  781. if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings))
  782. goto out;
  783. debug_object_activate(&obj, &descr_type_test);
  784. if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
  785. goto out;
  786. debug_object_activate(&obj, &descr_type_test);
  787. if (check_results(&obj, ODEBUG_STATE_ACTIVE, ++fixups, ++warnings))
  788. goto out;
  789. debug_object_deactivate(&obj, &descr_type_test);
  790. if (check_results(&obj, ODEBUG_STATE_INACTIVE, fixups, warnings))
  791. goto out;
  792. debug_object_destroy(&obj, &descr_type_test);
  793. if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, warnings))
  794. goto out;
  795. debug_object_init(&obj, &descr_type_test);
  796. if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
  797. goto out;
  798. debug_object_activate(&obj, &descr_type_test);
  799. if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
  800. goto out;
  801. debug_object_deactivate(&obj, &descr_type_test);
  802. if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
  803. goto out;
  804. debug_object_free(&obj, &descr_type_test);
  805. if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings))
  806. goto out;
  807. obj.static_init = 1;
  808. debug_object_activate(&obj, &descr_type_test);
  809. if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
  810. goto out;
  811. debug_object_init(&obj, &descr_type_test);
  812. if (check_results(&obj, ODEBUG_STATE_INIT, ++fixups, ++warnings))
  813. goto out;
  814. debug_object_free(&obj, &descr_type_test);
  815. if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings))
  816. goto out;
  817. #ifdef CONFIG_DEBUG_OBJECTS_FREE
  818. debug_object_init(&obj, &descr_type_test);
  819. if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings))
  820. goto out;
  821. debug_object_activate(&obj, &descr_type_test);
  822. if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
  823. goto out;
  824. __debug_check_no_obj_freed(&obj, sizeof(obj));
  825. if (check_results(&obj, ODEBUG_STATE_NONE, ++fixups, ++warnings))
  826. goto out;
  827. #endif
  828. printk(KERN_INFO "ODEBUG: selftest passed\n");
  829. out:
  830. debug_objects_fixups = oldfixups;
  831. debug_objects_warnings = oldwarnings;
  832. descr_test = NULL;
  833. local_irq_restore(flags);
  834. }
  835. #else
  836. static inline void debug_objects_selftest(void) { }
  837. #endif
  838. /*
  839. * Called during early boot to initialize the hash buckets and link
  840. * the static object pool objects into the poll list. After this call
  841. * the object tracker is fully operational.
  842. */
  843. void __init debug_objects_early_init(void)
  844. {
  845. int i;
  846. for (i = 0; i < ODEBUG_HASH_SIZE; i++)
  847. raw_spin_lock_init(&obj_hash[i].lock);
  848. for (i = 0; i < ODEBUG_POOL_SIZE; i++)
  849. hlist_add_head(&obj_static_pool[i].node, &obj_pool);
  850. }
  851. /*
  852. * Convert the statically allocated objects to dynamic ones:
  853. */
  854. static int __init debug_objects_replace_static_objects(void)
  855. {
  856. struct debug_bucket *db = obj_hash;
  857. struct hlist_node *tmp;
  858. struct debug_obj *obj, *new;
  859. HLIST_HEAD(objects);
  860. int i, cnt = 0;
  861. for (i = 0; i < ODEBUG_POOL_SIZE; i++) {
  862. obj = kmem_cache_zalloc(obj_cache, GFP_KERNEL);
  863. if (!obj)
  864. goto free;
  865. hlist_add_head(&obj->node, &objects);
  866. }
  867. /*
  868. * When debug_objects_mem_init() is called we know that only
  869. * one CPU is up, so disabling interrupts is enough
  870. * protection. This avoids the lockdep hell of lock ordering.
  871. */
  872. local_irq_disable();
  873. /* Remove the statically allocated objects from the pool */
  874. hlist_for_each_entry_safe(obj, tmp, &obj_pool, node)
  875. hlist_del(&obj->node);
  876. /* Move the allocated objects to the pool */
  877. hlist_move_list(&objects, &obj_pool);
  878. /* Replace the active object references */
  879. for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
  880. hlist_move_list(&db->list, &objects);
  881. hlist_for_each_entry(obj, &objects, node) {
  882. new = hlist_entry(obj_pool.first, typeof(*obj), node);
  883. hlist_del(&new->node);
  884. /* copy object data */
  885. *new = *obj;
  886. hlist_add_head(&new->node, &db->list);
  887. cnt++;
  888. }
  889. }
  890. local_irq_enable();
  891. printk(KERN_DEBUG "ODEBUG: %d of %d active objects replaced\n", cnt,
  892. obj_pool_used);
  893. return 0;
  894. free:
  895. hlist_for_each_entry_safe(obj, tmp, &objects, node) {
  896. hlist_del(&obj->node);
  897. kmem_cache_free(obj_cache, obj);
  898. }
  899. return -ENOMEM;
  900. }
  901. /*
  902. * Called after the kmem_caches are functional to setup a dedicated
  903. * cache pool, which has the SLAB_DEBUG_OBJECTS flag set. This flag
  904. * prevents that the debug code is called on kmem_cache_free() for the
  905. * debug tracker objects to avoid recursive calls.
  906. */
  907. void __init debug_objects_mem_init(void)
  908. {
  909. if (!debug_objects_enabled)
  910. return;
  911. obj_cache = kmem_cache_create("debug_objects_cache",
  912. sizeof (struct debug_obj), 0,
  913. SLAB_DEBUG_OBJECTS, NULL);
  914. if (!obj_cache || debug_objects_replace_static_objects()) {
  915. debug_objects_enabled = 0;
  916. if (obj_cache)
  917. kmem_cache_destroy(obj_cache);
  918. printk(KERN_WARNING "ODEBUG: out of memory.\n");
  919. } else
  920. debug_objects_selftest();
  921. }