spinlock_debug.c 6.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293
  1. /*
  2. * Copyright 2005, Red Hat, Inc., Ingo Molnar
  3. * Released under the General Public License (GPL).
  4. *
  5. * This file contains the spinlock/rwlock implementations for
  6. * DEBUG_SPINLOCK.
  7. */
  8. #include <linux/spinlock.h>
  9. #include <linux/interrupt.h>
  10. #include <linux/debug_locks.h>
  11. #include <linux/delay.h>
  12. #include <linux/module.h>
  13. void __spin_lock_init(spinlock_t *lock, const char *name,
  14. struct lock_class_key *key)
  15. {
  16. #ifdef CONFIG_DEBUG_LOCK_ALLOC
  17. /*
  18. * Make sure we are not reinitializing a held lock:
  19. */
  20. debug_check_no_locks_freed((void *)lock, sizeof(*lock));
  21. lockdep_init_map(&lock->dep_map, name, key);
  22. #endif
  23. lock->raw_lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
  24. lock->magic = SPINLOCK_MAGIC;
  25. lock->owner = SPINLOCK_OWNER_INIT;
  26. lock->owner_cpu = -1;
  27. }
  28. EXPORT_SYMBOL(__spin_lock_init);
  29. void __rwlock_init(rwlock_t *lock, const char *name,
  30. struct lock_class_key *key)
  31. {
  32. #ifdef CONFIG_DEBUG_LOCK_ALLOC
  33. /*
  34. * Make sure we are not reinitializing a held lock:
  35. */
  36. debug_check_no_locks_freed((void *)lock, sizeof(*lock));
  37. lockdep_init_map(&lock->dep_map, name, key);
  38. #endif
  39. lock->raw_lock = (raw_rwlock_t) __RAW_RW_LOCK_UNLOCKED;
  40. lock->magic = RWLOCK_MAGIC;
  41. lock->owner = SPINLOCK_OWNER_INIT;
  42. lock->owner_cpu = -1;
  43. }
  44. EXPORT_SYMBOL(__rwlock_init);
  45. static void spin_bug(spinlock_t *lock, const char *msg)
  46. {
  47. struct task_struct *owner = NULL;
  48. if (!debug_locks_off())
  49. return;
  50. if (lock->owner && lock->owner != SPINLOCK_OWNER_INIT)
  51. owner = lock->owner;
  52. printk(KERN_EMERG "BUG: spinlock %s on CPU#%d, %s/%d\n",
  53. msg, raw_smp_processor_id(),
  54. current->comm, current->pid);
  55. printk(KERN_EMERG " lock: %p, .magic: %08x, .owner: %s/%d, "
  56. ".owner_cpu: %d\n",
  57. lock, lock->magic,
  58. owner ? owner->comm : "<none>",
  59. owner ? owner->pid : -1,
  60. lock->owner_cpu);
  61. dump_stack();
  62. }
  63. #define SPIN_BUG_ON(cond, lock, msg) if (unlikely(cond)) spin_bug(lock, msg)
  64. static inline void
  65. debug_spin_lock_before(spinlock_t *lock)
  66. {
  67. SPIN_BUG_ON(lock->magic != SPINLOCK_MAGIC, lock, "bad magic");
  68. SPIN_BUG_ON(lock->owner == current, lock, "recursion");
  69. SPIN_BUG_ON(lock->owner_cpu == raw_smp_processor_id(),
  70. lock, "cpu recursion");
  71. }
  72. static inline void debug_spin_lock_after(spinlock_t *lock)
  73. {
  74. lock->owner_cpu = raw_smp_processor_id();
  75. lock->owner = current;
  76. }
  77. static inline void debug_spin_unlock(spinlock_t *lock)
  78. {
  79. SPIN_BUG_ON(lock->magic != SPINLOCK_MAGIC, lock, "bad magic");
  80. SPIN_BUG_ON(!spin_is_locked(lock), lock, "already unlocked");
  81. SPIN_BUG_ON(lock->owner != current, lock, "wrong owner");
  82. SPIN_BUG_ON(lock->owner_cpu != raw_smp_processor_id(),
  83. lock, "wrong CPU");
  84. lock->owner = SPINLOCK_OWNER_INIT;
  85. lock->owner_cpu = -1;
  86. }
  87. static void __spin_lock_debug(spinlock_t *lock)
  88. {
  89. u64 i;
  90. u64 loops = loops_per_jiffy * HZ;
  91. int print_once = 1;
  92. for (;;) {
  93. for (i = 0; i < loops; i++) {
  94. if (__raw_spin_trylock(&lock->raw_lock))
  95. return;
  96. __delay(1);
  97. }
  98. /* lockup suspected: */
  99. if (print_once) {
  100. print_once = 0;
  101. printk(KERN_EMERG "BUG: spinlock lockup on CPU#%d, "
  102. "%s/%d, %p\n",
  103. raw_smp_processor_id(), current->comm,
  104. current->pid, lock);
  105. dump_stack();
  106. }
  107. }
  108. }
  109. void _raw_spin_lock(spinlock_t *lock)
  110. {
  111. debug_spin_lock_before(lock);
  112. if (unlikely(!__raw_spin_trylock(&lock->raw_lock)))
  113. __spin_lock_debug(lock);
  114. debug_spin_lock_after(lock);
  115. }
  116. int _raw_spin_trylock(spinlock_t *lock)
  117. {
  118. int ret = __raw_spin_trylock(&lock->raw_lock);
  119. if (ret)
  120. debug_spin_lock_after(lock);
  121. #ifndef CONFIG_SMP
  122. /*
  123. * Must not happen on UP:
  124. */
  125. SPIN_BUG_ON(!ret, lock, "trylock failure on UP");
  126. #endif
  127. return ret;
  128. }
  129. void _raw_spin_unlock(spinlock_t *lock)
  130. {
  131. debug_spin_unlock(lock);
  132. __raw_spin_unlock(&lock->raw_lock);
  133. }
  134. static void rwlock_bug(rwlock_t *lock, const char *msg)
  135. {
  136. if (!debug_locks_off())
  137. return;
  138. printk(KERN_EMERG "BUG: rwlock %s on CPU#%d, %s/%d, %p\n",
  139. msg, raw_smp_processor_id(), current->comm,
  140. current->pid, lock);
  141. dump_stack();
  142. }
  143. #define RWLOCK_BUG_ON(cond, lock, msg) if (unlikely(cond)) rwlock_bug(lock, msg)
  144. #if 0 /* __write_lock_debug() can lock up - maybe this can too? */
  145. static void __read_lock_debug(rwlock_t *lock)
  146. {
  147. u64 i;
  148. u64 loops = loops_per_jiffy * HZ;
  149. int print_once = 1;
  150. for (;;) {
  151. for (i = 0; i < loops; i++) {
  152. if (__raw_read_trylock(&lock->raw_lock))
  153. return;
  154. __delay(1);
  155. }
  156. /* lockup suspected: */
  157. if (print_once) {
  158. print_once = 0;
  159. printk(KERN_EMERG "BUG: read-lock lockup on CPU#%d, "
  160. "%s/%d, %p\n",
  161. raw_smp_processor_id(), current->comm,
  162. current->pid, lock);
  163. dump_stack();
  164. }
  165. }
  166. }
  167. #endif
  168. void _raw_read_lock(rwlock_t *lock)
  169. {
  170. RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
  171. __raw_read_lock(&lock->raw_lock);
  172. }
  173. int _raw_read_trylock(rwlock_t *lock)
  174. {
  175. int ret = __raw_read_trylock(&lock->raw_lock);
  176. #ifndef CONFIG_SMP
  177. /*
  178. * Must not happen on UP:
  179. */
  180. RWLOCK_BUG_ON(!ret, lock, "trylock failure on UP");
  181. #endif
  182. return ret;
  183. }
  184. void _raw_read_unlock(rwlock_t *lock)
  185. {
  186. RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
  187. __raw_read_unlock(&lock->raw_lock);
  188. }
  189. static inline void debug_write_lock_before(rwlock_t *lock)
  190. {
  191. RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
  192. RWLOCK_BUG_ON(lock->owner == current, lock, "recursion");
  193. RWLOCK_BUG_ON(lock->owner_cpu == raw_smp_processor_id(),
  194. lock, "cpu recursion");
  195. }
  196. static inline void debug_write_lock_after(rwlock_t *lock)
  197. {
  198. lock->owner_cpu = raw_smp_processor_id();
  199. lock->owner = current;
  200. }
  201. static inline void debug_write_unlock(rwlock_t *lock)
  202. {
  203. RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
  204. RWLOCK_BUG_ON(lock->owner != current, lock, "wrong owner");
  205. RWLOCK_BUG_ON(lock->owner_cpu != raw_smp_processor_id(),
  206. lock, "wrong CPU");
  207. lock->owner = SPINLOCK_OWNER_INIT;
  208. lock->owner_cpu = -1;
  209. }
  210. #if 0 /* This can cause lockups */
  211. static void __write_lock_debug(rwlock_t *lock)
  212. {
  213. u64 i;
  214. u64 loops = loops_per_jiffy * HZ;
  215. int print_once = 1;
  216. for (;;) {
  217. for (i = 0; i < loops; i++) {
  218. if (__raw_write_trylock(&lock->raw_lock))
  219. return;
  220. __delay(1);
  221. }
  222. /* lockup suspected: */
  223. if (print_once) {
  224. print_once = 0;
  225. printk(KERN_EMERG "BUG: write-lock lockup on CPU#%d, "
  226. "%s/%d, %p\n",
  227. raw_smp_processor_id(), current->comm,
  228. current->pid, lock);
  229. dump_stack();
  230. }
  231. }
  232. }
  233. #endif
  234. void _raw_write_lock(rwlock_t *lock)
  235. {
  236. debug_write_lock_before(lock);
  237. __raw_write_lock(&lock->raw_lock);
  238. debug_write_lock_after(lock);
  239. }
  240. int _raw_write_trylock(rwlock_t *lock)
  241. {
  242. int ret = __raw_write_trylock(&lock->raw_lock);
  243. if (ret)
  244. debug_write_lock_after(lock);
  245. #ifndef CONFIG_SMP
  246. /*
  247. * Must not happen on UP:
  248. */
  249. RWLOCK_BUG_ON(!ret, lock, "trylock failure on UP");
  250. #endif
  251. return ret;
  252. }
  253. void _raw_write_unlock(rwlock_t *lock)
  254. {
  255. debug_write_unlock(lock);
  256. __raw_write_unlock(&lock->raw_lock);
  257. }