spinlock_debug.c 6.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298
  1. /*
  2. * Copyright 2005, Red Hat, Inc., Ingo Molnar
  3. * Released under the General Public License (GPL).
  4. *
  5. * This file contains the spinlock/rwlock implementations for
  6. * DEBUG_SPINLOCK.
  7. */
  8. #include <linux/spinlock.h>
  9. #include <linux/nmi.h>
  10. #include <linux/interrupt.h>
  11. #include <linux/debug_locks.h>
  12. #include <linux/delay.h>
  13. #include <linux/export.h>
  14. void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
  15. struct lock_class_key *key)
  16. {
  17. #ifdef CONFIG_DEBUG_LOCK_ALLOC
  18. /*
  19. * Make sure we are not reinitializing a held lock:
  20. */
  21. debug_check_no_locks_freed((void *)lock, sizeof(*lock));
  22. lockdep_init_map(&lock->dep_map, name, key, 0);
  23. #endif
  24. lock->raw_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
  25. lock->magic = SPINLOCK_MAGIC;
  26. lock->owner = SPINLOCK_OWNER_INIT;
  27. lock->owner_cpu = -1;
  28. }
  29. EXPORT_SYMBOL(__raw_spin_lock_init);
  30. void __rwlock_init(rwlock_t *lock, const char *name,
  31. struct lock_class_key *key)
  32. {
  33. #ifdef CONFIG_DEBUG_LOCK_ALLOC
  34. /*
  35. * Make sure we are not reinitializing a held lock:
  36. */
  37. debug_check_no_locks_freed((void *)lock, sizeof(*lock));
  38. lockdep_init_map(&lock->dep_map, name, key, 0);
  39. #endif
  40. lock->raw_lock = (arch_rwlock_t) __ARCH_RW_LOCK_UNLOCKED;
  41. lock->magic = RWLOCK_MAGIC;
  42. lock->owner = SPINLOCK_OWNER_INIT;
  43. lock->owner_cpu = -1;
  44. }
  45. EXPORT_SYMBOL(__rwlock_init);
  46. static void spin_dump(raw_spinlock_t *lock, const char *msg)
  47. {
  48. struct task_struct *owner = NULL;
  49. if (lock->owner && lock->owner != SPINLOCK_OWNER_INIT)
  50. owner = lock->owner;
  51. printk(KERN_EMERG "BUG: spinlock %s on CPU#%d, %s/%d\n",
  52. msg, raw_smp_processor_id(),
  53. current->comm, task_pid_nr(current));
  54. printk(KERN_EMERG " lock: %pS, .magic: %08x, .owner: %s/%d, "
  55. ".owner_cpu: %d\n",
  56. lock, lock->magic,
  57. owner ? owner->comm : "<none>",
  58. owner ? task_pid_nr(owner) : -1,
  59. lock->owner_cpu);
  60. dump_stack();
  61. }
  62. static void spin_bug(raw_spinlock_t *lock, const char *msg)
  63. {
  64. if (!debug_locks_off())
  65. return;
  66. spin_dump(lock, msg);
  67. }
  68. #define SPIN_BUG_ON(cond, lock, msg) if (unlikely(cond)) spin_bug(lock, msg)
  69. static inline void
  70. debug_spin_lock_before(raw_spinlock_t *lock)
  71. {
  72. SPIN_BUG_ON(lock->magic != SPINLOCK_MAGIC, lock, "bad magic");
  73. SPIN_BUG_ON(lock->owner == current, lock, "recursion");
  74. SPIN_BUG_ON(lock->owner_cpu == raw_smp_processor_id(),
  75. lock, "cpu recursion");
  76. }
  77. static inline void debug_spin_lock_after(raw_spinlock_t *lock)
  78. {
  79. lock->owner_cpu = raw_smp_processor_id();
  80. lock->owner = current;
  81. }
  82. static inline void debug_spin_unlock(raw_spinlock_t *lock)
  83. {
  84. SPIN_BUG_ON(lock->magic != SPINLOCK_MAGIC, lock, "bad magic");
  85. SPIN_BUG_ON(!raw_spin_is_locked(lock), lock, "already unlocked");
  86. SPIN_BUG_ON(lock->owner != current, lock, "wrong owner");
  87. SPIN_BUG_ON(lock->owner_cpu != raw_smp_processor_id(),
  88. lock, "wrong CPU");
  89. lock->owner = SPINLOCK_OWNER_INIT;
  90. lock->owner_cpu = -1;
  91. }
  92. static void __spin_lock_debug(raw_spinlock_t *lock)
  93. {
  94. u64 i;
  95. u64 loops = loops_per_jiffy * HZ;
  96. int print_once = 1;
  97. for (;;) {
  98. for (i = 0; i < loops; i++) {
  99. if (arch_spin_trylock(&lock->raw_lock))
  100. return;
  101. __delay(1);
  102. }
  103. /* lockup suspected: */
  104. if (print_once) {
  105. print_once = 0;
  106. spin_dump(lock, "lockup suspected");
  107. #ifdef CONFIG_SMP
  108. trigger_all_cpu_backtrace();
  109. #endif
  110. }
  111. }
  112. }
  113. void do_raw_spin_lock(raw_spinlock_t *lock)
  114. {
  115. debug_spin_lock_before(lock);
  116. if (unlikely(!arch_spin_trylock(&lock->raw_lock)))
  117. __spin_lock_debug(lock);
  118. debug_spin_lock_after(lock);
  119. }
  120. int do_raw_spin_trylock(raw_spinlock_t *lock)
  121. {
  122. int ret = arch_spin_trylock(&lock->raw_lock);
  123. if (ret)
  124. debug_spin_lock_after(lock);
  125. #ifndef CONFIG_SMP
  126. /*
  127. * Must not happen on UP:
  128. */
  129. SPIN_BUG_ON(!ret, lock, "trylock failure on UP");
  130. #endif
  131. return ret;
  132. }
  133. void do_raw_spin_unlock(raw_spinlock_t *lock)
  134. {
  135. debug_spin_unlock(lock);
  136. arch_spin_unlock(&lock->raw_lock);
  137. }
  138. static void rwlock_bug(rwlock_t *lock, const char *msg)
  139. {
  140. if (!debug_locks_off())
  141. return;
  142. printk(KERN_EMERG "BUG: rwlock %s on CPU#%d, %s/%d, %p\n",
  143. msg, raw_smp_processor_id(), current->comm,
  144. task_pid_nr(current), lock);
  145. dump_stack();
  146. }
  147. #define RWLOCK_BUG_ON(cond, lock, msg) if (unlikely(cond)) rwlock_bug(lock, msg)
  148. #if 0 /* __write_lock_debug() can lock up - maybe this can too? */
  149. static void __read_lock_debug(rwlock_t *lock)
  150. {
  151. u64 i;
  152. u64 loops = loops_per_jiffy * HZ;
  153. int print_once = 1;
  154. for (;;) {
  155. for (i = 0; i < loops; i++) {
  156. if (arch_read_trylock(&lock->raw_lock))
  157. return;
  158. __delay(1);
  159. }
  160. /* lockup suspected: */
  161. if (print_once) {
  162. print_once = 0;
  163. printk(KERN_EMERG "BUG: read-lock lockup on CPU#%d, "
  164. "%s/%d, %p\n",
  165. raw_smp_processor_id(), current->comm,
  166. current->pid, lock);
  167. dump_stack();
  168. }
  169. }
  170. }
  171. #endif
  172. void do_raw_read_lock(rwlock_t *lock)
  173. {
  174. RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
  175. arch_read_lock(&lock->raw_lock);
  176. }
  177. int do_raw_read_trylock(rwlock_t *lock)
  178. {
  179. int ret = arch_read_trylock(&lock->raw_lock);
  180. #ifndef CONFIG_SMP
  181. /*
  182. * Must not happen on UP:
  183. */
  184. RWLOCK_BUG_ON(!ret, lock, "trylock failure on UP");
  185. #endif
  186. return ret;
  187. }
  188. void do_raw_read_unlock(rwlock_t *lock)
  189. {
  190. RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
  191. arch_read_unlock(&lock->raw_lock);
  192. }
  193. static inline void debug_write_lock_before(rwlock_t *lock)
  194. {
  195. RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
  196. RWLOCK_BUG_ON(lock->owner == current, lock, "recursion");
  197. RWLOCK_BUG_ON(lock->owner_cpu == raw_smp_processor_id(),
  198. lock, "cpu recursion");
  199. }
  200. static inline void debug_write_lock_after(rwlock_t *lock)
  201. {
  202. lock->owner_cpu = raw_smp_processor_id();
  203. lock->owner = current;
  204. }
  205. static inline void debug_write_unlock(rwlock_t *lock)
  206. {
  207. RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
  208. RWLOCK_BUG_ON(lock->owner != current, lock, "wrong owner");
  209. RWLOCK_BUG_ON(lock->owner_cpu != raw_smp_processor_id(),
  210. lock, "wrong CPU");
  211. lock->owner = SPINLOCK_OWNER_INIT;
  212. lock->owner_cpu = -1;
  213. }
  214. #if 0 /* This can cause lockups */
  215. static void __write_lock_debug(rwlock_t *lock)
  216. {
  217. u64 i;
  218. u64 loops = loops_per_jiffy * HZ;
  219. int print_once = 1;
  220. for (;;) {
  221. for (i = 0; i < loops; i++) {
  222. if (arch_write_trylock(&lock->raw_lock))
  223. return;
  224. __delay(1);
  225. }
  226. /* lockup suspected: */
  227. if (print_once) {
  228. print_once = 0;
  229. printk(KERN_EMERG "BUG: write-lock lockup on CPU#%d, "
  230. "%s/%d, %p\n",
  231. raw_smp_processor_id(), current->comm,
  232. current->pid, lock);
  233. dump_stack();
  234. }
  235. }
  236. }
  237. #endif
  238. void do_raw_write_lock(rwlock_t *lock)
  239. {
  240. debug_write_lock_before(lock);
  241. arch_write_lock(&lock->raw_lock);
  242. debug_write_lock_after(lock);
  243. }
  244. int do_raw_write_trylock(rwlock_t *lock)
  245. {
  246. int ret = arch_write_trylock(&lock->raw_lock);
  247. if (ret)
  248. debug_write_lock_after(lock);
  249. #ifndef CONFIG_SMP
  250. /*
  251. * Must not happen on UP:
  252. */
  253. RWLOCK_BUG_ON(!ret, lock, "trylock failure on UP");
  254. #endif
  255. return ret;
  256. }
  257. void do_raw_write_unlock(rwlock_t *lock)
  258. {
  259. debug_write_unlock(lock);
  260. arch_write_unlock(&lock->raw_lock);
  261. }