rtmutex-debug.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513
  1. /*
  2. * RT-Mutexes: blocking mutual exclusion locks with PI support
  3. *
  4. * started by Ingo Molnar and Thomas Gleixner:
  5. *
  6. * Copyright (C) 2004-2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
  7. * Copyright (C) 2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com>
  8. *
  9. * This code is based on the rt.c implementation in the preempt-rt tree.
  10. * Portions of said code are
  11. *
  12. * Copyright (C) 2004 LynuxWorks, Inc., Igor Manyilov, Bill Huey
  13. * Copyright (C) 2006 Esben Nielsen
  14. * Copyright (C) 2006 Kihon Technologies Inc.,
  15. * Steven Rostedt <rostedt@goodmis.org>
  16. *
  17. * See rt.c in preempt-rt for proper credits and further information
  18. */
  19. #include <linux/config.h>
  20. #include <linux/sched.h>
  21. #include <linux/delay.h>
  22. #include <linux/module.h>
  23. #include <linux/spinlock.h>
  24. #include <linux/kallsyms.h>
  25. #include <linux/syscalls.h>
  26. #include <linux/interrupt.h>
  27. #include <linux/plist.h>
  28. #include <linux/fs.h>
  29. #include "rtmutex_common.h"
  30. #ifdef CONFIG_DEBUG_RT_MUTEXES
  31. # include "rtmutex-debug.h"
  32. #else
  33. # include "rtmutex.h"
  34. #endif
  35. # define TRACE_WARN_ON(x) WARN_ON(x)
  36. # define TRACE_BUG_ON(x) BUG_ON(x)
  37. # define TRACE_OFF() \
  38. do { \
  39. if (rt_trace_on) { \
  40. rt_trace_on = 0; \
  41. console_verbose(); \
  42. if (spin_is_locked(&current->pi_lock)) \
  43. spin_unlock(&current->pi_lock); \
  44. if (spin_is_locked(&current->held_list_lock)) \
  45. spin_unlock(&current->held_list_lock); \
  46. } \
  47. } while (0)
  48. # define TRACE_OFF_NOLOCK() \
  49. do { \
  50. if (rt_trace_on) { \
  51. rt_trace_on = 0; \
  52. console_verbose(); \
  53. } \
  54. } while (0)
  55. # define TRACE_BUG_LOCKED() \
  56. do { \
  57. TRACE_OFF(); \
  58. BUG(); \
  59. } while (0)
  60. # define TRACE_WARN_ON_LOCKED(c) \
  61. do { \
  62. if (unlikely(c)) { \
  63. TRACE_OFF(); \
  64. WARN_ON(1); \
  65. } \
  66. } while (0)
  67. # define TRACE_BUG_ON_LOCKED(c) \
  68. do { \
  69. if (unlikely(c)) \
  70. TRACE_BUG_LOCKED(); \
  71. } while (0)
  72. #ifdef CONFIG_SMP
  73. # define SMP_TRACE_BUG_ON_LOCKED(c) TRACE_BUG_ON_LOCKED(c)
  74. #else
  75. # define SMP_TRACE_BUG_ON_LOCKED(c) do { } while (0)
  76. #endif
  77. /*
  78. * deadlock detection flag. We turn it off when we detect
  79. * the first problem because we dont want to recurse back
  80. * into the tracing code when doing error printk or
  81. * executing a BUG():
  82. */
  83. int rt_trace_on = 1;
  84. void deadlock_trace_off(void)
  85. {
  86. rt_trace_on = 0;
  87. }
  88. static void printk_task(task_t *p)
  89. {
  90. if (p)
  91. printk("%16s:%5d [%p, %3d]", p->comm, p->pid, p, p->prio);
  92. else
  93. printk("<none>");
  94. }
  95. static void printk_task_short(task_t *p)
  96. {
  97. if (p)
  98. printk("%s/%d [%p, %3d]", p->comm, p->pid, p, p->prio);
  99. else
  100. printk("<none>");
  101. }
  102. static void printk_lock(struct rt_mutex *lock, int print_owner)
  103. {
  104. if (lock->name)
  105. printk(" [%p] {%s}\n",
  106. lock, lock->name);
  107. else
  108. printk(" [%p] {%s:%d}\n",
  109. lock, lock->file, lock->line);
  110. if (print_owner && rt_mutex_owner(lock)) {
  111. printk(".. ->owner: %p\n", lock->owner);
  112. printk(".. held by: ");
  113. printk_task(rt_mutex_owner(lock));
  114. printk("\n");
  115. }
  116. if (rt_mutex_owner(lock)) {
  117. printk("... acquired at: ");
  118. print_symbol("%s\n", lock->acquire_ip);
  119. }
  120. }
  121. static void printk_waiter(struct rt_mutex_waiter *w)
  122. {
  123. printk("-------------------------\n");
  124. printk("| waiter struct %p:\n", w);
  125. printk("| w->list_entry: [DP:%p/%p|SP:%p/%p|PRI:%d]\n",
  126. w->list_entry.plist.prio_list.prev, w->list_entry.plist.prio_list.next,
  127. w->list_entry.plist.node_list.prev, w->list_entry.plist.node_list.next,
  128. w->list_entry.prio);
  129. printk("| w->pi_list_entry: [DP:%p/%p|SP:%p/%p|PRI:%d]\n",
  130. w->pi_list_entry.plist.prio_list.prev, w->pi_list_entry.plist.prio_list.next,
  131. w->pi_list_entry.plist.node_list.prev, w->pi_list_entry.plist.node_list.next,
  132. w->pi_list_entry.prio);
  133. printk("\n| lock:\n");
  134. printk_lock(w->lock, 1);
  135. printk("| w->ti->task:\n");
  136. printk_task(w->task);
  137. printk("| blocked at: ");
  138. print_symbol("%s\n", w->ip);
  139. printk("-------------------------\n");
  140. }
  141. static void show_task_locks(task_t *p)
  142. {
  143. switch (p->state) {
  144. case TASK_RUNNING: printk("R"); break;
  145. case TASK_INTERRUPTIBLE: printk("S"); break;
  146. case TASK_UNINTERRUPTIBLE: printk("D"); break;
  147. case TASK_STOPPED: printk("T"); break;
  148. case EXIT_ZOMBIE: printk("Z"); break;
  149. case EXIT_DEAD: printk("X"); break;
  150. default: printk("?"); break;
  151. }
  152. printk_task(p);
  153. if (p->pi_blocked_on) {
  154. struct rt_mutex *lock = p->pi_blocked_on->lock;
  155. printk(" blocked on:");
  156. printk_lock(lock, 1);
  157. } else
  158. printk(" (not blocked)\n");
  159. }
  160. void rt_mutex_show_held_locks(task_t *task, int verbose)
  161. {
  162. struct list_head *curr, *cursor = NULL;
  163. struct rt_mutex *lock;
  164. task_t *t;
  165. unsigned long flags;
  166. int count = 0;
  167. if (!rt_trace_on)
  168. return;
  169. if (verbose) {
  170. printk("------------------------------\n");
  171. printk("| showing all locks held by: | (");
  172. printk_task_short(task);
  173. printk("):\n");
  174. printk("------------------------------\n");
  175. }
  176. next:
  177. spin_lock_irqsave(&task->held_list_lock, flags);
  178. list_for_each(curr, &task->held_list_head) {
  179. if (cursor && curr != cursor)
  180. continue;
  181. lock = list_entry(curr, struct rt_mutex, held_list_entry);
  182. t = rt_mutex_owner(lock);
  183. WARN_ON(t != task);
  184. count++;
  185. cursor = curr->next;
  186. spin_unlock_irqrestore(&task->held_list_lock, flags);
  187. printk("\n#%03d: ", count);
  188. printk_lock(lock, 0);
  189. goto next;
  190. }
  191. spin_unlock_irqrestore(&task->held_list_lock, flags);
  192. printk("\n");
  193. }
  194. void rt_mutex_show_all_locks(void)
  195. {
  196. task_t *g, *p;
  197. int count = 10;
  198. int unlock = 1;
  199. printk("\n");
  200. printk("----------------------\n");
  201. printk("| showing all tasks: |\n");
  202. printk("----------------------\n");
  203. /*
  204. * Here we try to get the tasklist_lock as hard as possible,
  205. * if not successful after 2 seconds we ignore it (but keep
  206. * trying). This is to enable a debug printout even if a
  207. * tasklist_lock-holding task deadlocks or crashes.
  208. */
  209. retry:
  210. if (!read_trylock(&tasklist_lock)) {
  211. if (count == 10)
  212. printk("hm, tasklist_lock locked, retrying... ");
  213. if (count) {
  214. count--;
  215. printk(" #%d", 10-count);
  216. mdelay(200);
  217. goto retry;
  218. }
  219. printk(" ignoring it.\n");
  220. unlock = 0;
  221. }
  222. if (count != 10)
  223. printk(" locked it.\n");
  224. do_each_thread(g, p) {
  225. show_task_locks(p);
  226. if (!unlock)
  227. if (read_trylock(&tasklist_lock))
  228. unlock = 1;
  229. } while_each_thread(g, p);
  230. printk("\n");
  231. printk("-----------------------------------------\n");
  232. printk("| showing all locks held in the system: |\n");
  233. printk("-----------------------------------------\n");
  234. do_each_thread(g, p) {
  235. rt_mutex_show_held_locks(p, 0);
  236. if (!unlock)
  237. if (read_trylock(&tasklist_lock))
  238. unlock = 1;
  239. } while_each_thread(g, p);
  240. printk("=============================================\n\n");
  241. if (unlock)
  242. read_unlock(&tasklist_lock);
  243. }
  244. void rt_mutex_debug_check_no_locks_held(task_t *task)
  245. {
  246. struct rt_mutex_waiter *w;
  247. struct list_head *curr;
  248. struct rt_mutex *lock;
  249. if (!rt_trace_on)
  250. return;
  251. if (!rt_prio(task->normal_prio) && rt_prio(task->prio)) {
  252. printk("BUG: PI priority boost leaked!\n");
  253. printk_task(task);
  254. printk("\n");
  255. }
  256. if (list_empty(&task->held_list_head))
  257. return;
  258. spin_lock(&task->pi_lock);
  259. plist_for_each_entry(w, &task->pi_waiters, pi_list_entry) {
  260. TRACE_OFF();
  261. printk("hm, PI interest held at exit time? Task:\n");
  262. printk_task(task);
  263. printk_waiter(w);
  264. return;
  265. }
  266. spin_unlock(&task->pi_lock);
  267. list_for_each(curr, &task->held_list_head) {
  268. lock = list_entry(curr, struct rt_mutex, held_list_entry);
  269. printk("BUG: %s/%d, lock held at task exit time!\n",
  270. task->comm, task->pid);
  271. printk_lock(lock, 1);
  272. if (rt_mutex_owner(lock) != task)
  273. printk("exiting task is not even the owner??\n");
  274. }
  275. }
  276. int rt_mutex_debug_check_no_locks_freed(const void *from, unsigned long len)
  277. {
  278. const void *to = from + len;
  279. struct list_head *curr;
  280. struct rt_mutex *lock;
  281. unsigned long flags;
  282. void *lock_addr;
  283. if (!rt_trace_on)
  284. return 0;
  285. spin_lock_irqsave(&current->held_list_lock, flags);
  286. list_for_each(curr, &current->held_list_head) {
  287. lock = list_entry(curr, struct rt_mutex, held_list_entry);
  288. lock_addr = lock;
  289. if (lock_addr < from || lock_addr >= to)
  290. continue;
  291. TRACE_OFF();
  292. printk("BUG: %s/%d, active lock [%p(%p-%p)] freed!\n",
  293. current->comm, current->pid, lock, from, to);
  294. dump_stack();
  295. printk_lock(lock, 1);
  296. if (rt_mutex_owner(lock) != current)
  297. printk("freeing task is not even the owner??\n");
  298. return 1;
  299. }
  300. spin_unlock_irqrestore(&current->held_list_lock, flags);
  301. return 0;
  302. }
  303. void rt_mutex_debug_task_free(struct task_struct *task)
  304. {
  305. WARN_ON(!plist_head_empty(&task->pi_waiters));
  306. WARN_ON(task->pi_blocked_on);
  307. }
  308. /*
  309. * We fill out the fields in the waiter to store the information about
  310. * the deadlock. We print when we return. act_waiter can be NULL in
  311. * case of a remove waiter operation.
  312. */
  313. void debug_rt_mutex_deadlock(int detect, struct rt_mutex_waiter *act_waiter,
  314. struct rt_mutex *lock)
  315. {
  316. struct task_struct *task;
  317. if (!rt_trace_on || detect || !act_waiter)
  318. return;
  319. task = rt_mutex_owner(act_waiter->lock);
  320. if (task && task != current) {
  321. act_waiter->deadlock_task_pid = task->pid;
  322. act_waiter->deadlock_lock = lock;
  323. }
  324. }
  325. void debug_rt_mutex_print_deadlock(struct rt_mutex_waiter *waiter)
  326. {
  327. struct task_struct *task;
  328. if (!waiter->deadlock_lock || !rt_trace_on)
  329. return;
  330. task = find_task_by_pid(waiter->deadlock_task_pid);
  331. if (!task)
  332. return;
  333. TRACE_OFF_NOLOCK();
  334. printk("\n============================================\n");
  335. printk( "[ BUG: circular locking deadlock detected! ]\n");
  336. printk( "--------------------------------------------\n");
  337. printk("%s/%d is deadlocking current task %s/%d\n\n",
  338. task->comm, task->pid, current->comm, current->pid);
  339. printk("\n1) %s/%d is trying to acquire this lock:\n",
  340. current->comm, current->pid);
  341. printk_lock(waiter->lock, 1);
  342. printk("... trying at: ");
  343. print_symbol("%s\n", waiter->ip);
  344. printk("\n2) %s/%d is blocked on this lock:\n", task->comm, task->pid);
  345. printk_lock(waiter->deadlock_lock, 1);
  346. rt_mutex_show_held_locks(current, 1);
  347. rt_mutex_show_held_locks(task, 1);
  348. printk("\n%s/%d's [blocked] stackdump:\n\n", task->comm, task->pid);
  349. show_stack(task, NULL);
  350. printk("\n%s/%d's [current] stackdump:\n\n",
  351. current->comm, current->pid);
  352. dump_stack();
  353. rt_mutex_show_all_locks();
  354. printk("[ turning off deadlock detection."
  355. "Please report this trace. ]\n\n");
  356. local_irq_disable();
  357. }
  358. void debug_rt_mutex_lock(struct rt_mutex *lock __IP_DECL__)
  359. {
  360. unsigned long flags;
  361. if (rt_trace_on) {
  362. TRACE_WARN_ON_LOCKED(!list_empty(&lock->held_list_entry));
  363. spin_lock_irqsave(&current->held_list_lock, flags);
  364. list_add_tail(&lock->held_list_entry, &current->held_list_head);
  365. spin_unlock_irqrestore(&current->held_list_lock, flags);
  366. lock->acquire_ip = ip;
  367. }
  368. }
  369. void debug_rt_mutex_unlock(struct rt_mutex *lock)
  370. {
  371. unsigned long flags;
  372. if (rt_trace_on) {
  373. TRACE_WARN_ON_LOCKED(rt_mutex_owner(lock) != current);
  374. TRACE_WARN_ON_LOCKED(list_empty(&lock->held_list_entry));
  375. spin_lock_irqsave(&current->held_list_lock, flags);
  376. list_del_init(&lock->held_list_entry);
  377. spin_unlock_irqrestore(&current->held_list_lock, flags);
  378. }
  379. }
  380. void debug_rt_mutex_proxy_lock(struct rt_mutex *lock,
  381. struct task_struct *powner __IP_DECL__)
  382. {
  383. unsigned long flags;
  384. if (rt_trace_on) {
  385. TRACE_WARN_ON_LOCKED(!list_empty(&lock->held_list_entry));
  386. spin_lock_irqsave(&powner->held_list_lock, flags);
  387. list_add_tail(&lock->held_list_entry, &powner->held_list_head);
  388. spin_unlock_irqrestore(&powner->held_list_lock, flags);
  389. lock->acquire_ip = ip;
  390. }
  391. }
  392. void debug_rt_mutex_proxy_unlock(struct rt_mutex *lock)
  393. {
  394. unsigned long flags;
  395. if (rt_trace_on) {
  396. struct task_struct *owner = rt_mutex_owner(lock);
  397. TRACE_WARN_ON_LOCKED(!owner);
  398. TRACE_WARN_ON_LOCKED(list_empty(&lock->held_list_entry));
  399. spin_lock_irqsave(&owner->held_list_lock, flags);
  400. list_del_init(&lock->held_list_entry);
  401. spin_unlock_irqrestore(&owner->held_list_lock, flags);
  402. }
  403. }
  404. void debug_rt_mutex_init_waiter(struct rt_mutex_waiter *waiter)
  405. {
  406. memset(waiter, 0x11, sizeof(*waiter));
  407. plist_node_init(&waiter->list_entry, MAX_PRIO);
  408. plist_node_init(&waiter->pi_list_entry, MAX_PRIO);
  409. }
  410. void debug_rt_mutex_free_waiter(struct rt_mutex_waiter *waiter)
  411. {
  412. TRACE_WARN_ON(!plist_node_empty(&waiter->list_entry));
  413. TRACE_WARN_ON(!plist_node_empty(&waiter->pi_list_entry));
  414. TRACE_WARN_ON(waiter->task);
  415. memset(waiter, 0x22, sizeof(*waiter));
  416. }
  417. void debug_rt_mutex_init(struct rt_mutex *lock, const char *name)
  418. {
  419. void *addr = lock;
  420. if (rt_trace_on) {
  421. rt_mutex_debug_check_no_locks_freed(addr,
  422. sizeof(struct rt_mutex));
  423. INIT_LIST_HEAD(&lock->held_list_entry);
  424. lock->name = name;
  425. }
  426. }
  427. void rt_mutex_deadlock_account_lock(struct rt_mutex *lock, task_t *task)
  428. {
  429. }
  430. void rt_mutex_deadlock_account_unlock(struct task_struct *task)
  431. {
  432. }