debugobjects.c 25 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049
  1. /*
  2. * Generic infrastructure for lifetime debugging of objects.
  3. *
  4. * Started by Thomas Gleixner
  5. *
  6. * Copyright (C) 2008, Thomas Gleixner <tglx@linutronix.de>
  7. *
  8. * For licencing details see kernel-base/COPYING
  9. */
  10. #include <linux/debugobjects.h>
  11. #include <linux/interrupt.h>
  12. #include <linux/sched.h>
  13. #include <linux/seq_file.h>
  14. #include <linux/debugfs.h>
  15. #include <linux/slab.h>
  16. #include <linux/hash.h>
  17. #define ODEBUG_HASH_BITS 14
  18. #define ODEBUG_HASH_SIZE (1 << ODEBUG_HASH_BITS)
  19. #define ODEBUG_POOL_SIZE 512
  20. #define ODEBUG_POOL_MIN_LEVEL 256
  21. #define ODEBUG_CHUNK_SHIFT PAGE_SHIFT
  22. #define ODEBUG_CHUNK_SIZE (1 << ODEBUG_CHUNK_SHIFT)
  23. #define ODEBUG_CHUNK_MASK (~(ODEBUG_CHUNK_SIZE - 1))
  24. struct debug_bucket {
  25. struct hlist_head list;
  26. raw_spinlock_t lock;
  27. };
  28. static struct debug_bucket obj_hash[ODEBUG_HASH_SIZE];
  29. static struct debug_obj obj_static_pool[ODEBUG_POOL_SIZE] __initdata;
  30. static DEFINE_RAW_SPINLOCK(pool_lock);
  31. static HLIST_HEAD(obj_pool);
  32. static int obj_pool_min_free = ODEBUG_POOL_SIZE;
  33. static int obj_pool_free = ODEBUG_POOL_SIZE;
  34. static int obj_pool_used;
  35. static int obj_pool_max_used;
  36. static struct kmem_cache *obj_cache;
  37. static int debug_objects_maxchain __read_mostly;
  38. static int debug_objects_fixups __read_mostly;
  39. static int debug_objects_warnings __read_mostly;
  40. static int debug_objects_enabled __read_mostly
  41. = CONFIG_DEBUG_OBJECTS_ENABLE_DEFAULT;
  42. static struct debug_obj_descr *descr_test __read_mostly;
  43. static void free_obj_work(struct work_struct *work);
  44. static DECLARE_WORK(debug_obj_work, free_obj_work);
  45. static int __init enable_object_debug(char *str)
  46. {
  47. debug_objects_enabled = 1;
  48. return 0;
  49. }
  50. static int __init disable_object_debug(char *str)
  51. {
  52. debug_objects_enabled = 0;
  53. return 0;
  54. }
  55. early_param("debug_objects", enable_object_debug);
  56. early_param("no_debug_objects", disable_object_debug);
  57. static const char *obj_states[ODEBUG_STATE_MAX] = {
  58. [ODEBUG_STATE_NONE] = "none",
  59. [ODEBUG_STATE_INIT] = "initialized",
  60. [ODEBUG_STATE_INACTIVE] = "inactive",
  61. [ODEBUG_STATE_ACTIVE] = "active",
  62. [ODEBUG_STATE_DESTROYED] = "destroyed",
  63. [ODEBUG_STATE_NOTAVAILABLE] = "not available",
  64. };
  65. static int fill_pool(void)
  66. {
  67. gfp_t gfp = GFP_ATOMIC | __GFP_NORETRY | __GFP_NOWARN;
  68. struct debug_obj *new;
  69. unsigned long flags;
  70. if (likely(obj_pool_free >= ODEBUG_POOL_MIN_LEVEL))
  71. return obj_pool_free;
  72. if (unlikely(!obj_cache))
  73. return obj_pool_free;
  74. while (obj_pool_free < ODEBUG_POOL_MIN_LEVEL) {
  75. new = kmem_cache_zalloc(obj_cache, gfp);
  76. if (!new)
  77. return obj_pool_free;
  78. raw_spin_lock_irqsave(&pool_lock, flags);
  79. hlist_add_head(&new->node, &obj_pool);
  80. obj_pool_free++;
  81. raw_spin_unlock_irqrestore(&pool_lock, flags);
  82. }
  83. return obj_pool_free;
  84. }
  85. /*
  86. * Lookup an object in the hash bucket.
  87. */
  88. static struct debug_obj *lookup_object(void *addr, struct debug_bucket *b)
  89. {
  90. struct hlist_node *node;
  91. struct debug_obj *obj;
  92. int cnt = 0;
  93. hlist_for_each_entry(obj, node, &b->list, node) {
  94. cnt++;
  95. if (obj->object == addr)
  96. return obj;
  97. }
  98. if (cnt > debug_objects_maxchain)
  99. debug_objects_maxchain = cnt;
  100. return NULL;
  101. }
  102. /*
  103. * Allocate a new object. If the pool is empty, switch off the debugger.
  104. * Must be called with interrupts disabled.
  105. */
  106. static struct debug_obj *
  107. alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr)
  108. {
  109. struct debug_obj *obj = NULL;
  110. raw_spin_lock(&pool_lock);
  111. if (obj_pool.first) {
  112. obj = hlist_entry(obj_pool.first, typeof(*obj), node);
  113. obj->object = addr;
  114. obj->descr = descr;
  115. obj->state = ODEBUG_STATE_NONE;
  116. obj->astate = 0;
  117. hlist_del(&obj->node);
  118. hlist_add_head(&obj->node, &b->list);
  119. obj_pool_used++;
  120. if (obj_pool_used > obj_pool_max_used)
  121. obj_pool_max_used = obj_pool_used;
  122. obj_pool_free--;
  123. if (obj_pool_free < obj_pool_min_free)
  124. obj_pool_min_free = obj_pool_free;
  125. }
  126. raw_spin_unlock(&pool_lock);
  127. return obj;
  128. }
  129. /*
  130. * workqueue function to free objects.
  131. */
  132. static void free_obj_work(struct work_struct *work)
  133. {
  134. struct debug_obj *obj;
  135. unsigned long flags;
  136. raw_spin_lock_irqsave(&pool_lock, flags);
  137. while (obj_pool_free > ODEBUG_POOL_SIZE) {
  138. obj = hlist_entry(obj_pool.first, typeof(*obj), node);
  139. hlist_del(&obj->node);
  140. obj_pool_free--;
  141. /*
  142. * We release pool_lock across kmem_cache_free() to
  143. * avoid contention on pool_lock.
  144. */
  145. raw_spin_unlock_irqrestore(&pool_lock, flags);
  146. kmem_cache_free(obj_cache, obj);
  147. raw_spin_lock_irqsave(&pool_lock, flags);
  148. }
  149. raw_spin_unlock_irqrestore(&pool_lock, flags);
  150. }
  151. /*
  152. * Put the object back into the pool and schedule work to free objects
  153. * if necessary.
  154. */
  155. static void free_object(struct debug_obj *obj)
  156. {
  157. unsigned long flags;
  158. int sched = 0;
  159. raw_spin_lock_irqsave(&pool_lock, flags);
  160. /*
  161. * schedule work when the pool is filled and the cache is
  162. * initialized:
  163. */
  164. if (obj_pool_free > ODEBUG_POOL_SIZE && obj_cache)
  165. sched = !work_pending(&debug_obj_work);
  166. hlist_add_head(&obj->node, &obj_pool);
  167. obj_pool_free++;
  168. obj_pool_used--;
  169. raw_spin_unlock_irqrestore(&pool_lock, flags);
  170. if (sched)
  171. schedule_work(&debug_obj_work);
  172. }
  173. /*
  174. * We run out of memory. That means we probably have tons of objects
  175. * allocated.
  176. */
  177. static void debug_objects_oom(void)
  178. {
  179. struct debug_bucket *db = obj_hash;
  180. struct hlist_node *node, *tmp;
  181. HLIST_HEAD(freelist);
  182. struct debug_obj *obj;
  183. unsigned long flags;
  184. int i;
  185. printk(KERN_WARNING "ODEBUG: Out of memory. ODEBUG disabled\n");
  186. for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
  187. raw_spin_lock_irqsave(&db->lock, flags);
  188. hlist_move_list(&db->list, &freelist);
  189. raw_spin_unlock_irqrestore(&db->lock, flags);
  190. /* Now free them */
  191. hlist_for_each_entry_safe(obj, node, tmp, &freelist, node) {
  192. hlist_del(&obj->node);
  193. free_object(obj);
  194. }
  195. }
  196. }
  197. /*
  198. * We use the pfn of the address for the hash. That way we can check
  199. * for freed objects simply by checking the affected bucket.
  200. */
  201. static struct debug_bucket *get_bucket(unsigned long addr)
  202. {
  203. unsigned long hash;
  204. hash = hash_long((addr >> ODEBUG_CHUNK_SHIFT), ODEBUG_HASH_BITS);
  205. return &obj_hash[hash];
  206. }
  207. static void debug_print_object(struct debug_obj *obj, char *msg)
  208. {
  209. static int limit;
  210. if (limit < 5 && obj->descr != descr_test) {
  211. limit++;
  212. WARN(1, KERN_ERR "ODEBUG: %s %s (active state %u) "
  213. "object type: %s\n",
  214. msg, obj_states[obj->state], obj->astate,
  215. obj->descr->name);
  216. }
  217. debug_objects_warnings++;
  218. }
  219. /*
  220. * Try to repair the damage, so we have a better chance to get useful
  221. * debug output.
  222. */
  223. static void
  224. debug_object_fixup(int (*fixup)(void *addr, enum debug_obj_state state),
  225. void * addr, enum debug_obj_state state)
  226. {
  227. if (fixup)
  228. debug_objects_fixups += fixup(addr, state);
  229. }
  230. static void debug_object_is_on_stack(void *addr, int onstack)
  231. {
  232. int is_on_stack;
  233. static int limit;
  234. if (limit > 4)
  235. return;
  236. is_on_stack = object_is_on_stack(addr);
  237. if (is_on_stack == onstack)
  238. return;
  239. limit++;
  240. if (is_on_stack)
  241. printk(KERN_WARNING
  242. "ODEBUG: object is on stack, but not annotated\n");
  243. else
  244. printk(KERN_WARNING
  245. "ODEBUG: object is not on stack, but annotated\n");
  246. WARN_ON(1);
  247. }
  248. static void
  249. __debug_object_init(void *addr, struct debug_obj_descr *descr, int onstack)
  250. {
  251. enum debug_obj_state state;
  252. struct debug_bucket *db;
  253. struct debug_obj *obj;
  254. unsigned long flags;
  255. fill_pool();
  256. db = get_bucket((unsigned long) addr);
  257. raw_spin_lock_irqsave(&db->lock, flags);
  258. obj = lookup_object(addr, db);
  259. if (!obj) {
  260. obj = alloc_object(addr, db, descr);
  261. if (!obj) {
  262. debug_objects_enabled = 0;
  263. raw_spin_unlock_irqrestore(&db->lock, flags);
  264. debug_objects_oom();
  265. return;
  266. }
  267. debug_object_is_on_stack(addr, onstack);
  268. }
  269. switch (obj->state) {
  270. case ODEBUG_STATE_NONE:
  271. case ODEBUG_STATE_INIT:
  272. case ODEBUG_STATE_INACTIVE:
  273. obj->state = ODEBUG_STATE_INIT;
  274. break;
  275. case ODEBUG_STATE_ACTIVE:
  276. debug_print_object(obj, "init");
  277. state = obj->state;
  278. raw_spin_unlock_irqrestore(&db->lock, flags);
  279. debug_object_fixup(descr->fixup_init, addr, state);
  280. return;
  281. case ODEBUG_STATE_DESTROYED:
  282. debug_print_object(obj, "init");
  283. break;
  284. default:
  285. break;
  286. }
  287. raw_spin_unlock_irqrestore(&db->lock, flags);
  288. }
  289. /**
  290. * debug_object_init - debug checks when an object is initialized
  291. * @addr: address of the object
  292. * @descr: pointer to an object specific debug description structure
  293. */
  294. void debug_object_init(void *addr, struct debug_obj_descr *descr)
  295. {
  296. if (!debug_objects_enabled)
  297. return;
  298. __debug_object_init(addr, descr, 0);
  299. }
  300. /**
  301. * debug_object_init_on_stack - debug checks when an object on stack is
  302. * initialized
  303. * @addr: address of the object
  304. * @descr: pointer to an object specific debug description structure
  305. */
  306. void debug_object_init_on_stack(void *addr, struct debug_obj_descr *descr)
  307. {
  308. if (!debug_objects_enabled)
  309. return;
  310. __debug_object_init(addr, descr, 1);
  311. }
  312. /**
  313. * debug_object_activate - debug checks when an object is activated
  314. * @addr: address of the object
  315. * @descr: pointer to an object specific debug description structure
  316. */
  317. void debug_object_activate(void *addr, struct debug_obj_descr *descr)
  318. {
  319. enum debug_obj_state state;
  320. struct debug_bucket *db;
  321. struct debug_obj *obj;
  322. unsigned long flags;
  323. if (!debug_objects_enabled)
  324. return;
  325. db = get_bucket((unsigned long) addr);
  326. raw_spin_lock_irqsave(&db->lock, flags);
  327. obj = lookup_object(addr, db);
  328. if (obj) {
  329. switch (obj->state) {
  330. case ODEBUG_STATE_INIT:
  331. case ODEBUG_STATE_INACTIVE:
  332. obj->state = ODEBUG_STATE_ACTIVE;
  333. break;
  334. case ODEBUG_STATE_ACTIVE:
  335. debug_print_object(obj, "activate");
  336. state = obj->state;
  337. raw_spin_unlock_irqrestore(&db->lock, flags);
  338. debug_object_fixup(descr->fixup_activate, addr, state);
  339. return;
  340. case ODEBUG_STATE_DESTROYED:
  341. debug_print_object(obj, "activate");
  342. break;
  343. default:
  344. break;
  345. }
  346. raw_spin_unlock_irqrestore(&db->lock, flags);
  347. return;
  348. }
  349. raw_spin_unlock_irqrestore(&db->lock, flags);
  350. /*
  351. * This happens when a static object is activated. We
  352. * let the type specific code decide whether this is
  353. * true or not.
  354. */
  355. debug_object_fixup(descr->fixup_activate, addr,
  356. ODEBUG_STATE_NOTAVAILABLE);
  357. }
  358. /**
  359. * debug_object_deactivate - debug checks when an object is deactivated
  360. * @addr: address of the object
  361. * @descr: pointer to an object specific debug description structure
  362. */
  363. void debug_object_deactivate(void *addr, struct debug_obj_descr *descr)
  364. {
  365. struct debug_bucket *db;
  366. struct debug_obj *obj;
  367. unsigned long flags;
  368. if (!debug_objects_enabled)
  369. return;
  370. db = get_bucket((unsigned long) addr);
  371. raw_spin_lock_irqsave(&db->lock, flags);
  372. obj = lookup_object(addr, db);
  373. if (obj) {
  374. switch (obj->state) {
  375. case ODEBUG_STATE_INIT:
  376. case ODEBUG_STATE_INACTIVE:
  377. case ODEBUG_STATE_ACTIVE:
  378. if (!obj->astate)
  379. obj->state = ODEBUG_STATE_INACTIVE;
  380. else
  381. debug_print_object(obj, "deactivate");
  382. break;
  383. case ODEBUG_STATE_DESTROYED:
  384. debug_print_object(obj, "deactivate");
  385. break;
  386. default:
  387. break;
  388. }
  389. } else {
  390. struct debug_obj o = { .object = addr,
  391. .state = ODEBUG_STATE_NOTAVAILABLE,
  392. .descr = descr };
  393. debug_print_object(&o, "deactivate");
  394. }
  395. raw_spin_unlock_irqrestore(&db->lock, flags);
  396. }
  397. /**
  398. * debug_object_destroy - debug checks when an object is destroyed
  399. * @addr: address of the object
  400. * @descr: pointer to an object specific debug description structure
  401. */
  402. void debug_object_destroy(void *addr, struct debug_obj_descr *descr)
  403. {
  404. enum debug_obj_state state;
  405. struct debug_bucket *db;
  406. struct debug_obj *obj;
  407. unsigned long flags;
  408. if (!debug_objects_enabled)
  409. return;
  410. db = get_bucket((unsigned long) addr);
  411. raw_spin_lock_irqsave(&db->lock, flags);
  412. obj = lookup_object(addr, db);
  413. if (!obj)
  414. goto out_unlock;
  415. switch (obj->state) {
  416. case ODEBUG_STATE_NONE:
  417. case ODEBUG_STATE_INIT:
  418. case ODEBUG_STATE_INACTIVE:
  419. obj->state = ODEBUG_STATE_DESTROYED;
  420. break;
  421. case ODEBUG_STATE_ACTIVE:
  422. debug_print_object(obj, "destroy");
  423. state = obj->state;
  424. raw_spin_unlock_irqrestore(&db->lock, flags);
  425. debug_object_fixup(descr->fixup_destroy, addr, state);
  426. return;
  427. case ODEBUG_STATE_DESTROYED:
  428. debug_print_object(obj, "destroy");
  429. break;
  430. default:
  431. break;
  432. }
  433. out_unlock:
  434. raw_spin_unlock_irqrestore(&db->lock, flags);
  435. }
  436. /**
  437. * debug_object_free - debug checks when an object is freed
  438. * @addr: address of the object
  439. * @descr: pointer to an object specific debug description structure
  440. */
  441. void debug_object_free(void *addr, struct debug_obj_descr *descr)
  442. {
  443. enum debug_obj_state state;
  444. struct debug_bucket *db;
  445. struct debug_obj *obj;
  446. unsigned long flags;
  447. if (!debug_objects_enabled)
  448. return;
  449. db = get_bucket((unsigned long) addr);
  450. raw_spin_lock_irqsave(&db->lock, flags);
  451. obj = lookup_object(addr, db);
  452. if (!obj)
  453. goto out_unlock;
  454. switch (obj->state) {
  455. case ODEBUG_STATE_ACTIVE:
  456. debug_print_object(obj, "free");
  457. state = obj->state;
  458. raw_spin_unlock_irqrestore(&db->lock, flags);
  459. debug_object_fixup(descr->fixup_free, addr, state);
  460. return;
  461. default:
  462. hlist_del(&obj->node);
  463. raw_spin_unlock_irqrestore(&db->lock, flags);
  464. free_object(obj);
  465. return;
  466. }
  467. out_unlock:
  468. raw_spin_unlock_irqrestore(&db->lock, flags);
  469. }
  470. /**
  471. * debug_object_active_state - debug checks object usage state machine
  472. * @addr: address of the object
  473. * @descr: pointer to an object specific debug description structure
  474. * @expect: expected state
  475. * @next: state to move to if expected state is found
  476. */
  477. void
  478. debug_object_active_state(void *addr, struct debug_obj_descr *descr,
  479. unsigned int expect, unsigned int next)
  480. {
  481. struct debug_bucket *db;
  482. struct debug_obj *obj;
  483. unsigned long flags;
  484. if (!debug_objects_enabled)
  485. return;
  486. db = get_bucket((unsigned long) addr);
  487. raw_spin_lock_irqsave(&db->lock, flags);
  488. obj = lookup_object(addr, db);
  489. if (obj) {
  490. switch (obj->state) {
  491. case ODEBUG_STATE_ACTIVE:
  492. if (obj->astate == expect)
  493. obj->astate = next;
  494. else
  495. debug_print_object(obj, "active_state");
  496. break;
  497. default:
  498. debug_print_object(obj, "active_state");
  499. break;
  500. }
  501. } else {
  502. struct debug_obj o = { .object = addr,
  503. .state = ODEBUG_STATE_NOTAVAILABLE,
  504. .descr = descr };
  505. debug_print_object(&o, "active_state");
  506. }
  507. raw_spin_unlock_irqrestore(&db->lock, flags);
  508. }
  509. #ifdef CONFIG_DEBUG_OBJECTS_FREE
  510. static void __debug_check_no_obj_freed(const void *address, unsigned long size)
  511. {
  512. unsigned long flags, oaddr, saddr, eaddr, paddr, chunks;
  513. struct hlist_node *node, *tmp;
  514. HLIST_HEAD(freelist);
  515. struct debug_obj_descr *descr;
  516. enum debug_obj_state state;
  517. struct debug_bucket *db;
  518. struct debug_obj *obj;
  519. int cnt;
  520. saddr = (unsigned long) address;
  521. eaddr = saddr + size;
  522. paddr = saddr & ODEBUG_CHUNK_MASK;
  523. chunks = ((eaddr - paddr) + (ODEBUG_CHUNK_SIZE - 1));
  524. chunks >>= ODEBUG_CHUNK_SHIFT;
  525. for (;chunks > 0; chunks--, paddr += ODEBUG_CHUNK_SIZE) {
  526. db = get_bucket(paddr);
  527. repeat:
  528. cnt = 0;
  529. raw_spin_lock_irqsave(&db->lock, flags);
  530. hlist_for_each_entry_safe(obj, node, tmp, &db->list, node) {
  531. cnt++;
  532. oaddr = (unsigned long) obj->object;
  533. if (oaddr < saddr || oaddr >= eaddr)
  534. continue;
  535. switch (obj->state) {
  536. case ODEBUG_STATE_ACTIVE:
  537. debug_print_object(obj, "free");
  538. descr = obj->descr;
  539. state = obj->state;
  540. raw_spin_unlock_irqrestore(&db->lock, flags);
  541. debug_object_fixup(descr->fixup_free,
  542. (void *) oaddr, state);
  543. goto repeat;
  544. default:
  545. hlist_del(&obj->node);
  546. hlist_add_head(&obj->node, &freelist);
  547. break;
  548. }
  549. }
  550. raw_spin_unlock_irqrestore(&db->lock, flags);
  551. /* Now free them */
  552. hlist_for_each_entry_safe(obj, node, tmp, &freelist, node) {
  553. hlist_del(&obj->node);
  554. free_object(obj);
  555. }
  556. if (cnt > debug_objects_maxchain)
  557. debug_objects_maxchain = cnt;
  558. }
  559. }
  560. void debug_check_no_obj_freed(const void *address, unsigned long size)
  561. {
  562. if (debug_objects_enabled)
  563. __debug_check_no_obj_freed(address, size);
  564. }
  565. #endif
  566. #ifdef CONFIG_DEBUG_FS
  567. static int debug_stats_show(struct seq_file *m, void *v)
  568. {
  569. seq_printf(m, "max_chain :%d\n", debug_objects_maxchain);
  570. seq_printf(m, "warnings :%d\n", debug_objects_warnings);
  571. seq_printf(m, "fixups :%d\n", debug_objects_fixups);
  572. seq_printf(m, "pool_free :%d\n", obj_pool_free);
  573. seq_printf(m, "pool_min_free :%d\n", obj_pool_min_free);
  574. seq_printf(m, "pool_used :%d\n", obj_pool_used);
  575. seq_printf(m, "pool_max_used :%d\n", obj_pool_max_used);
  576. return 0;
  577. }
  578. static int debug_stats_open(struct inode *inode, struct file *filp)
  579. {
  580. return single_open(filp, debug_stats_show, NULL);
  581. }
  582. static const struct file_operations debug_stats_fops = {
  583. .open = debug_stats_open,
  584. .read = seq_read,
  585. .llseek = seq_lseek,
  586. .release = single_release,
  587. };
  588. static int __init debug_objects_init_debugfs(void)
  589. {
  590. struct dentry *dbgdir, *dbgstats;
  591. if (!debug_objects_enabled)
  592. return 0;
  593. dbgdir = debugfs_create_dir("debug_objects", NULL);
  594. if (!dbgdir)
  595. return -ENOMEM;
  596. dbgstats = debugfs_create_file("stats", 0444, dbgdir, NULL,
  597. &debug_stats_fops);
  598. if (!dbgstats)
  599. goto err;
  600. return 0;
  601. err:
  602. debugfs_remove(dbgdir);
  603. return -ENOMEM;
  604. }
  605. __initcall(debug_objects_init_debugfs);
  606. #else
  607. static inline void debug_objects_init_debugfs(void) { }
  608. #endif
  609. #ifdef CONFIG_DEBUG_OBJECTS_SELFTEST
  610. /* Random data structure for the self test */
  611. struct self_test {
  612. unsigned long dummy1[6];
  613. int static_init;
  614. unsigned long dummy2[3];
  615. };
  616. static __initdata struct debug_obj_descr descr_type_test;
  617. /*
  618. * fixup_init is called when:
  619. * - an active object is initialized
  620. */
  621. static int __init fixup_init(void *addr, enum debug_obj_state state)
  622. {
  623. struct self_test *obj = addr;
  624. switch (state) {
  625. case ODEBUG_STATE_ACTIVE:
  626. debug_object_deactivate(obj, &descr_type_test);
  627. debug_object_init(obj, &descr_type_test);
  628. return 1;
  629. default:
  630. return 0;
  631. }
  632. }
  633. /*
  634. * fixup_activate is called when:
  635. * - an active object is activated
  636. * - an unknown object is activated (might be a statically initialized object)
  637. */
  638. static int __init fixup_activate(void *addr, enum debug_obj_state state)
  639. {
  640. struct self_test *obj = addr;
  641. switch (state) {
  642. case ODEBUG_STATE_NOTAVAILABLE:
  643. if (obj->static_init == 1) {
  644. debug_object_init(obj, &descr_type_test);
  645. debug_object_activate(obj, &descr_type_test);
  646. /*
  647. * Real code should return 0 here ! This is
  648. * not a fixup of some bad behaviour. We
  649. * merily call the debug_init function to keep
  650. * track of the object.
  651. */
  652. return 1;
  653. } else {
  654. /* Real code needs to emit a warning here */
  655. }
  656. return 0;
  657. case ODEBUG_STATE_ACTIVE:
  658. debug_object_deactivate(obj, &descr_type_test);
  659. debug_object_activate(obj, &descr_type_test);
  660. return 1;
  661. default:
  662. return 0;
  663. }
  664. }
  665. /*
  666. * fixup_destroy is called when:
  667. * - an active object is destroyed
  668. */
  669. static int __init fixup_destroy(void *addr, enum debug_obj_state state)
  670. {
  671. struct self_test *obj = addr;
  672. switch (state) {
  673. case ODEBUG_STATE_ACTIVE:
  674. debug_object_deactivate(obj, &descr_type_test);
  675. debug_object_destroy(obj, &descr_type_test);
  676. return 1;
  677. default:
  678. return 0;
  679. }
  680. }
  681. /*
  682. * fixup_free is called when:
  683. * - an active object is freed
  684. */
  685. static int __init fixup_free(void *addr, enum debug_obj_state state)
  686. {
  687. struct self_test *obj = addr;
  688. switch (state) {
  689. case ODEBUG_STATE_ACTIVE:
  690. debug_object_deactivate(obj, &descr_type_test);
  691. debug_object_free(obj, &descr_type_test);
  692. return 1;
  693. default:
  694. return 0;
  695. }
  696. }
  697. static int __init
  698. check_results(void *addr, enum debug_obj_state state, int fixups, int warnings)
  699. {
  700. struct debug_bucket *db;
  701. struct debug_obj *obj;
  702. unsigned long flags;
  703. int res = -EINVAL;
  704. db = get_bucket((unsigned long) addr);
  705. raw_spin_lock_irqsave(&db->lock, flags);
  706. obj = lookup_object(addr, db);
  707. if (!obj && state != ODEBUG_STATE_NONE) {
  708. WARN(1, KERN_ERR "ODEBUG: selftest object not found\n");
  709. goto out;
  710. }
  711. if (obj && obj->state != state) {
  712. WARN(1, KERN_ERR "ODEBUG: selftest wrong state: %d != %d\n",
  713. obj->state, state);
  714. goto out;
  715. }
  716. if (fixups != debug_objects_fixups) {
  717. WARN(1, KERN_ERR "ODEBUG: selftest fixups failed %d != %d\n",
  718. fixups, debug_objects_fixups);
  719. goto out;
  720. }
  721. if (warnings != debug_objects_warnings) {
  722. WARN(1, KERN_ERR "ODEBUG: selftest warnings failed %d != %d\n",
  723. warnings, debug_objects_warnings);
  724. goto out;
  725. }
  726. res = 0;
  727. out:
  728. raw_spin_unlock_irqrestore(&db->lock, flags);
  729. if (res)
  730. debug_objects_enabled = 0;
  731. return res;
  732. }
  733. static __initdata struct debug_obj_descr descr_type_test = {
  734. .name = "selftest",
  735. .fixup_init = fixup_init,
  736. .fixup_activate = fixup_activate,
  737. .fixup_destroy = fixup_destroy,
  738. .fixup_free = fixup_free,
  739. };
  740. static __initdata struct self_test obj = { .static_init = 0 };
  741. static void __init debug_objects_selftest(void)
  742. {
  743. int fixups, oldfixups, warnings, oldwarnings;
  744. unsigned long flags;
  745. local_irq_save(flags);
  746. fixups = oldfixups = debug_objects_fixups;
  747. warnings = oldwarnings = debug_objects_warnings;
  748. descr_test = &descr_type_test;
  749. debug_object_init(&obj, &descr_type_test);
  750. if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings))
  751. goto out;
  752. debug_object_activate(&obj, &descr_type_test);
  753. if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
  754. goto out;
  755. debug_object_activate(&obj, &descr_type_test);
  756. if (check_results(&obj, ODEBUG_STATE_ACTIVE, ++fixups, ++warnings))
  757. goto out;
  758. debug_object_deactivate(&obj, &descr_type_test);
  759. if (check_results(&obj, ODEBUG_STATE_INACTIVE, fixups, warnings))
  760. goto out;
  761. debug_object_destroy(&obj, &descr_type_test);
  762. if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, warnings))
  763. goto out;
  764. debug_object_init(&obj, &descr_type_test);
  765. if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
  766. goto out;
  767. debug_object_activate(&obj, &descr_type_test);
  768. if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
  769. goto out;
  770. debug_object_deactivate(&obj, &descr_type_test);
  771. if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
  772. goto out;
  773. debug_object_free(&obj, &descr_type_test);
  774. if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings))
  775. goto out;
  776. obj.static_init = 1;
  777. debug_object_activate(&obj, &descr_type_test);
  778. if (check_results(&obj, ODEBUG_STATE_ACTIVE, ++fixups, warnings))
  779. goto out;
  780. debug_object_init(&obj, &descr_type_test);
  781. if (check_results(&obj, ODEBUG_STATE_INIT, ++fixups, ++warnings))
  782. goto out;
  783. debug_object_free(&obj, &descr_type_test);
  784. if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings))
  785. goto out;
  786. #ifdef CONFIG_DEBUG_OBJECTS_FREE
  787. debug_object_init(&obj, &descr_type_test);
  788. if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings))
  789. goto out;
  790. debug_object_activate(&obj, &descr_type_test);
  791. if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
  792. goto out;
  793. __debug_check_no_obj_freed(&obj, sizeof(obj));
  794. if (check_results(&obj, ODEBUG_STATE_NONE, ++fixups, ++warnings))
  795. goto out;
  796. #endif
  797. printk(KERN_INFO "ODEBUG: selftest passed\n");
  798. out:
  799. debug_objects_fixups = oldfixups;
  800. debug_objects_warnings = oldwarnings;
  801. descr_test = NULL;
  802. local_irq_restore(flags);
  803. }
  804. #else
  805. static inline void debug_objects_selftest(void) { }
  806. #endif
  807. /*
  808. * Called during early boot to initialize the hash buckets and link
  809. * the static object pool objects into the poll list. After this call
  810. * the object tracker is fully operational.
  811. */
  812. void __init debug_objects_early_init(void)
  813. {
  814. int i;
  815. for (i = 0; i < ODEBUG_HASH_SIZE; i++)
  816. raw_spin_lock_init(&obj_hash[i].lock);
  817. for (i = 0; i < ODEBUG_POOL_SIZE; i++)
  818. hlist_add_head(&obj_static_pool[i].node, &obj_pool);
  819. }
  820. /*
  821. * Convert the statically allocated objects to dynamic ones:
  822. */
  823. static int __init debug_objects_replace_static_objects(void)
  824. {
  825. struct debug_bucket *db = obj_hash;
  826. struct hlist_node *node, *tmp;
  827. struct debug_obj *obj, *new;
  828. HLIST_HEAD(objects);
  829. int i, cnt = 0;
  830. for (i = 0; i < ODEBUG_POOL_SIZE; i++) {
  831. obj = kmem_cache_zalloc(obj_cache, GFP_KERNEL);
  832. if (!obj)
  833. goto free;
  834. hlist_add_head(&obj->node, &objects);
  835. }
  836. /*
  837. * When debug_objects_mem_init() is called we know that only
  838. * one CPU is up, so disabling interrupts is enough
  839. * protection. This avoids the lockdep hell of lock ordering.
  840. */
  841. local_irq_disable();
  842. /* Remove the statically allocated objects from the pool */
  843. hlist_for_each_entry_safe(obj, node, tmp, &obj_pool, node)
  844. hlist_del(&obj->node);
  845. /* Move the allocated objects to the pool */
  846. hlist_move_list(&objects, &obj_pool);
  847. /* Replace the active object references */
  848. for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
  849. hlist_move_list(&db->list, &objects);
  850. hlist_for_each_entry(obj, node, &objects, node) {
  851. new = hlist_entry(obj_pool.first, typeof(*obj), node);
  852. hlist_del(&new->node);
  853. /* copy object data */
  854. *new = *obj;
  855. hlist_add_head(&new->node, &db->list);
  856. cnt++;
  857. }
  858. }
  859. printk(KERN_DEBUG "ODEBUG: %d of %d active objects replaced\n", cnt,
  860. obj_pool_used);
  861. local_irq_enable();
  862. return 0;
  863. free:
  864. hlist_for_each_entry_safe(obj, node, tmp, &objects, node) {
  865. hlist_del(&obj->node);
  866. kmem_cache_free(obj_cache, obj);
  867. }
  868. return -ENOMEM;
  869. }
  870. /*
  871. * Called after the kmem_caches are functional to setup a dedicated
  872. * cache pool, which has the SLAB_DEBUG_OBJECTS flag set. This flag
  873. * prevents that the debug code is called on kmem_cache_free() for the
  874. * debug tracker objects to avoid recursive calls.
  875. */
  876. void __init debug_objects_mem_init(void)
  877. {
  878. if (!debug_objects_enabled)
  879. return;
  880. obj_cache = kmem_cache_create("debug_objects_cache",
  881. sizeof (struct debug_obj), 0,
  882. SLAB_DEBUG_OBJECTS, NULL);
  883. if (!obj_cache || debug_objects_replace_static_objects()) {
  884. debug_objects_enabled = 0;
  885. if (obj_cache)
  886. kmem_cache_destroy(obj_cache);
  887. printk(KERN_WARNING "ODEBUG: out of memory.\n");
  888. } else
  889. debug_objects_selftest();
  890. }