mutex-debug.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463
  1. /*
  2. * kernel/mutex-debug.c
  3. *
  4. * Debugging code for mutexes
  5. *
  6. * Started by Ingo Molnar:
  7. *
  8. * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
  9. *
  10. * lock debugging, locking tree, deadlock detection started by:
  11. *
  12. * Copyright (C) 2004, LynuxWorks, Inc., Igor Manyilov, Bill Huey
  13. * Released under the General Public License (GPL).
  14. */
  15. #include <linux/mutex.h>
  16. #include <linux/sched.h>
  17. #include <linux/delay.h>
  18. #include <linux/module.h>
  19. #include <linux/poison.h>
  20. #include <linux/spinlock.h>
  21. #include <linux/kallsyms.h>
  22. #include <linux/interrupt.h>
  23. #include "mutex-debug.h"
  24. /*
  25. * We need a global lock when we walk through the multi-process
  26. * lock tree. Only used in the deadlock-debugging case.
  27. */
  28. DEFINE_SPINLOCK(debug_mutex_lock);
  29. /*
  30. * All locks held by all tasks, in a single global list:
  31. */
  32. LIST_HEAD(debug_mutex_held_locks);
  33. /*
  34. * In the debug case we carry the caller's instruction pointer into
  35. * other functions, but we dont want the function argument overhead
  36. * in the nondebug case - hence these macros:
  37. */
  38. #define __IP_DECL__ , unsigned long ip
  39. #define __IP__ , ip
  40. #define __RET_IP__ , (unsigned long)__builtin_return_address(0)
  41. /*
  42. * "mutex debugging enabled" flag. We turn it off when we detect
  43. * the first problem because we dont want to recurse back
  44. * into the tracing code when doing error printk or
  45. * executing a BUG():
  46. */
  47. int debug_mutex_on = 1;
  48. static void printk_task(struct task_struct *p)
  49. {
  50. if (p)
  51. printk("%16s:%5d [%p, %3d]", p->comm, p->pid, p, p->prio);
  52. else
  53. printk("<none>");
  54. }
  55. static void printk_ti(struct thread_info *ti)
  56. {
  57. if (ti)
  58. printk_task(ti->task);
  59. else
  60. printk("<none>");
  61. }
  62. static void printk_task_short(struct task_struct *p)
  63. {
  64. if (p)
  65. printk("%s/%d [%p, %3d]", p->comm, p->pid, p, p->prio);
  66. else
  67. printk("<none>");
  68. }
  69. static void printk_lock(struct mutex *lock, int print_owner)
  70. {
  71. printk(" [%p] {%s}\n", lock, lock->name);
  72. if (print_owner && lock->owner) {
  73. printk(".. held by: ");
  74. printk_ti(lock->owner);
  75. printk("\n");
  76. }
  77. if (lock->owner) {
  78. printk("... acquired at: ");
  79. print_symbol("%s\n", lock->acquire_ip);
  80. }
  81. }
  82. /*
  83. * printk locks held by a task:
  84. */
  85. static void show_task_locks(struct task_struct *p)
  86. {
  87. switch (p->state) {
  88. case TASK_RUNNING: printk("R"); break;
  89. case TASK_INTERRUPTIBLE: printk("S"); break;
  90. case TASK_UNINTERRUPTIBLE: printk("D"); break;
  91. case TASK_STOPPED: printk("T"); break;
  92. case EXIT_ZOMBIE: printk("Z"); break;
  93. case EXIT_DEAD: printk("X"); break;
  94. default: printk("?"); break;
  95. }
  96. printk_task(p);
  97. if (p->blocked_on) {
  98. struct mutex *lock = p->blocked_on->lock;
  99. printk(" blocked on mutex:");
  100. printk_lock(lock, 1);
  101. } else
  102. printk(" (not blocked on mutex)\n");
  103. }
  104. /*
  105. * printk all locks held in the system (if filter == NULL),
  106. * or all locks belonging to a single task (if filter != NULL):
  107. */
  108. void show_held_locks(struct task_struct *filter)
  109. {
  110. struct list_head *curr, *cursor = NULL;
  111. struct mutex *lock;
  112. struct thread_info *t;
  113. unsigned long flags;
  114. int count = 0;
  115. if (filter) {
  116. printk("------------------------------\n");
  117. printk("| showing all locks held by: | (");
  118. printk_task_short(filter);
  119. printk("):\n");
  120. printk("------------------------------\n");
  121. } else {
  122. printk("---------------------------\n");
  123. printk("| showing all locks held: |\n");
  124. printk("---------------------------\n");
  125. }
  126. /*
  127. * Play safe and acquire the global trace lock. We
  128. * cannot printk with that lock held so we iterate
  129. * very carefully:
  130. */
  131. next:
  132. debug_spin_lock_save(&debug_mutex_lock, flags);
  133. list_for_each(curr, &debug_mutex_held_locks) {
  134. if (cursor && curr != cursor)
  135. continue;
  136. lock = list_entry(curr, struct mutex, held_list);
  137. t = lock->owner;
  138. if (filter && (t != filter->thread_info))
  139. continue;
  140. count++;
  141. cursor = curr->next;
  142. debug_spin_unlock_restore(&debug_mutex_lock, flags);
  143. printk("\n#%03d: ", count);
  144. printk_lock(lock, filter ? 0 : 1);
  145. goto next;
  146. }
  147. debug_spin_unlock_restore(&debug_mutex_lock, flags);
  148. printk("\n");
  149. }
  150. void mutex_debug_show_all_locks(void)
  151. {
  152. struct task_struct *g, *p;
  153. int count = 10;
  154. int unlock = 1;
  155. printk("\nShowing all blocking locks in the system:\n");
  156. /*
  157. * Here we try to get the tasklist_lock as hard as possible,
  158. * if not successful after 2 seconds we ignore it (but keep
  159. * trying). This is to enable a debug printout even if a
  160. * tasklist_lock-holding task deadlocks or crashes.
  161. */
  162. retry:
  163. if (!read_trylock(&tasklist_lock)) {
  164. if (count == 10)
  165. printk("hm, tasklist_lock locked, retrying... ");
  166. if (count) {
  167. count--;
  168. printk(" #%d", 10-count);
  169. mdelay(200);
  170. goto retry;
  171. }
  172. printk(" ignoring it.\n");
  173. unlock = 0;
  174. }
  175. if (count != 10)
  176. printk(" locked it.\n");
  177. do_each_thread(g, p) {
  178. show_task_locks(p);
  179. if (!unlock)
  180. if (read_trylock(&tasklist_lock))
  181. unlock = 1;
  182. } while_each_thread(g, p);
  183. printk("\n");
  184. show_held_locks(NULL);
  185. printk("=============================================\n\n");
  186. if (unlock)
  187. read_unlock(&tasklist_lock);
  188. }
  189. static void report_deadlock(struct task_struct *task, struct mutex *lock,
  190. struct mutex *lockblk, unsigned long ip)
  191. {
  192. printk("\n%s/%d is trying to acquire this lock:\n",
  193. current->comm, current->pid);
  194. printk_lock(lock, 1);
  195. printk("... trying at: ");
  196. print_symbol("%s\n", ip);
  197. show_held_locks(current);
  198. if (lockblk) {
  199. printk("but %s/%d is deadlocking current task %s/%d!\n\n",
  200. task->comm, task->pid, current->comm, current->pid);
  201. printk("\n%s/%d is blocked on this lock:\n",
  202. task->comm, task->pid);
  203. printk_lock(lockblk, 1);
  204. show_held_locks(task);
  205. printk("\n%s/%d's [blocked] stackdump:\n\n",
  206. task->comm, task->pid);
  207. show_stack(task, NULL);
  208. }
  209. printk("\n%s/%d's [current] stackdump:\n\n",
  210. current->comm, current->pid);
  211. dump_stack();
  212. mutex_debug_show_all_locks();
  213. printk("[ turning off deadlock detection. Please report this. ]\n\n");
  214. local_irq_disable();
  215. }
  216. /*
  217. * Recursively check for mutex deadlocks:
  218. */
  219. static int check_deadlock(struct mutex *lock, int depth,
  220. struct thread_info *ti, unsigned long ip)
  221. {
  222. struct mutex *lockblk;
  223. struct task_struct *task;
  224. if (!debug_mutex_on)
  225. return 0;
  226. ti = lock->owner;
  227. if (!ti)
  228. return 0;
  229. task = ti->task;
  230. lockblk = NULL;
  231. if (task->blocked_on)
  232. lockblk = task->blocked_on->lock;
  233. /* Self-deadlock: */
  234. if (current == task) {
  235. DEBUG_OFF();
  236. if (depth)
  237. return 1;
  238. printk("\n==========================================\n");
  239. printk( "[ BUG: lock recursion deadlock detected! |\n");
  240. printk( "------------------------------------------\n");
  241. report_deadlock(task, lock, NULL, ip);
  242. return 0;
  243. }
  244. /* Ugh, something corrupted the lock data structure? */
  245. if (depth > 20) {
  246. DEBUG_OFF();
  247. printk("\n===========================================\n");
  248. printk( "[ BUG: infinite lock dependency detected!? |\n");
  249. printk( "-------------------------------------------\n");
  250. report_deadlock(task, lock, lockblk, ip);
  251. return 0;
  252. }
  253. /* Recursively check for dependencies: */
  254. if (lockblk && check_deadlock(lockblk, depth+1, ti, ip)) {
  255. printk("\n============================================\n");
  256. printk( "[ BUG: circular locking deadlock detected! ]\n");
  257. printk( "--------------------------------------------\n");
  258. report_deadlock(task, lock, lockblk, ip);
  259. return 0;
  260. }
  261. return 0;
  262. }
  263. /*
  264. * Called when a task exits, this function checks whether the
  265. * task is holding any locks, and reports the first one if so:
  266. */
  267. void mutex_debug_check_no_locks_held(struct task_struct *task)
  268. {
  269. struct list_head *curr, *next;
  270. struct thread_info *t;
  271. unsigned long flags;
  272. struct mutex *lock;
  273. if (!debug_mutex_on)
  274. return;
  275. debug_spin_lock_save(&debug_mutex_lock, flags);
  276. list_for_each_safe(curr, next, &debug_mutex_held_locks) {
  277. lock = list_entry(curr, struct mutex, held_list);
  278. t = lock->owner;
  279. if (t != task->thread_info)
  280. continue;
  281. list_del_init(curr);
  282. DEBUG_OFF();
  283. debug_spin_unlock_restore(&debug_mutex_lock, flags);
  284. printk("BUG: %s/%d, lock held at task exit time!\n",
  285. task->comm, task->pid);
  286. printk_lock(lock, 1);
  287. if (lock->owner != task->thread_info)
  288. printk("exiting task is not even the owner??\n");
  289. return;
  290. }
  291. debug_spin_unlock_restore(&debug_mutex_lock, flags);
  292. }
  293. /*
  294. * Called when kernel memory is freed (or unmapped), or if a mutex
  295. * is destroyed or reinitialized - this code checks whether there is
  296. * any held lock in the memory range of <from> to <to>:
  297. */
  298. void mutex_debug_check_no_locks_freed(const void *from, unsigned long len)
  299. {
  300. struct list_head *curr, *next;
  301. const void *to = from + len;
  302. unsigned long flags;
  303. struct mutex *lock;
  304. void *lock_addr;
  305. if (!debug_mutex_on)
  306. return;
  307. debug_spin_lock_save(&debug_mutex_lock, flags);
  308. list_for_each_safe(curr, next, &debug_mutex_held_locks) {
  309. lock = list_entry(curr, struct mutex, held_list);
  310. lock_addr = lock;
  311. if (lock_addr < from || lock_addr >= to)
  312. continue;
  313. list_del_init(curr);
  314. DEBUG_OFF();
  315. debug_spin_unlock_restore(&debug_mutex_lock, flags);
  316. printk("BUG: %s/%d, active lock [%p(%p-%p)] freed!\n",
  317. current->comm, current->pid, lock, from, to);
  318. dump_stack();
  319. printk_lock(lock, 1);
  320. if (lock->owner != current_thread_info())
  321. printk("freeing task is not even the owner??\n");
  322. return;
  323. }
  324. debug_spin_unlock_restore(&debug_mutex_lock, flags);
  325. }
  326. /*
  327. * Must be called with lock->wait_lock held.
  328. */
  329. void debug_mutex_set_owner(struct mutex *lock,
  330. struct thread_info *new_owner __IP_DECL__)
  331. {
  332. lock->owner = new_owner;
  333. DEBUG_LOCKS_WARN_ON(!list_empty(&lock->held_list));
  334. if (debug_mutex_on) {
  335. list_add_tail(&lock->held_list, &debug_mutex_held_locks);
  336. lock->acquire_ip = ip;
  337. }
  338. }
  339. void debug_mutex_init_waiter(struct mutex_waiter *waiter)
  340. {
  341. memset(waiter, MUTEX_DEBUG_INIT, sizeof(*waiter));
  342. waiter->magic = waiter;
  343. INIT_LIST_HEAD(&waiter->list);
  344. }
  345. void debug_mutex_wake_waiter(struct mutex *lock, struct mutex_waiter *waiter)
  346. {
  347. SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
  348. DEBUG_LOCKS_WARN_ON(list_empty(&lock->wait_list));
  349. DEBUG_LOCKS_WARN_ON(waiter->magic != waiter);
  350. DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
  351. }
  352. void debug_mutex_free_waiter(struct mutex_waiter *waiter)
  353. {
  354. DEBUG_LOCKS_WARN_ON(!list_empty(&waiter->list));
  355. memset(waiter, MUTEX_DEBUG_FREE, sizeof(*waiter));
  356. }
  357. void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
  358. struct thread_info *ti __IP_DECL__)
  359. {
  360. SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
  361. check_deadlock(lock, 0, ti, ip);
  362. /* Mark the current thread as blocked on the lock: */
  363. ti->task->blocked_on = waiter;
  364. waiter->lock = lock;
  365. }
  366. void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
  367. struct thread_info *ti)
  368. {
  369. DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
  370. DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
  371. DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
  372. ti->task->blocked_on = NULL;
  373. list_del_init(&waiter->list);
  374. waiter->task = NULL;
  375. }
  376. void debug_mutex_unlock(struct mutex *lock)
  377. {
  378. DEBUG_LOCKS_WARN_ON(lock->magic != lock);
  379. DEBUG_LOCKS_WARN_ON(!lock->wait_list.prev && !lock->wait_list.next);
  380. DEBUG_LOCKS_WARN_ON(lock->owner != current_thread_info());
  381. if (debug_mutex_on) {
  382. DEBUG_LOCKS_WARN_ON(list_empty(&lock->held_list));
  383. list_del_init(&lock->held_list);
  384. }
  385. }
  386. void debug_mutex_init(struct mutex *lock, const char *name)
  387. {
  388. /*
  389. * Make sure we are not reinitializing a held lock:
  390. */
  391. mutex_debug_check_no_locks_freed((void *)lock, sizeof(*lock));
  392. lock->owner = NULL;
  393. INIT_LIST_HEAD(&lock->held_list);
  394. lock->name = name;
  395. lock->magic = lock;
  396. }
  397. /***
  398. * mutex_destroy - mark a mutex unusable
  399. * @lock: the mutex to be destroyed
  400. *
  401. * This function marks the mutex uninitialized, and any subsequent
  402. * use of the mutex is forbidden. The mutex must not be locked when
  403. * this function is called.
  404. */
  405. void fastcall mutex_destroy(struct mutex *lock)
  406. {
  407. DEBUG_LOCKS_WARN_ON(mutex_is_locked(lock));
  408. lock->magic = NULL;
  409. }
  410. EXPORT_SYMBOL_GPL(mutex_destroy);