spinlock_debug.c 6.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290
  1. /*
  2. * Copyright 2005, Red Hat, Inc., Ingo Molnar
  3. * Released under the General Public License (GPL).
  4. *
  5. * This file contains the spinlock/rwlock implementations for
  6. * DEBUG_SPINLOCK.
  7. */
  8. #include <linux/spinlock.h>
  9. #include <linux/interrupt.h>
  10. #include <linux/debug_locks.h>
  11. #include <linux/delay.h>
  12. #include <linux/module.h>
  13. void __spin_lock_init(spinlock_t *lock, const char *name,
  14. struct lock_class_key *key)
  15. {
  16. #ifdef CONFIG_DEBUG_LOCK_ALLOC
  17. /*
  18. * Make sure we are not reinitializing a held lock:
  19. */
  20. debug_check_no_locks_freed((void *)lock, sizeof(*lock));
  21. lockdep_init_map(&lock->dep_map, name, key);
  22. #endif
  23. lock->raw_lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
  24. lock->magic = SPINLOCK_MAGIC;
  25. lock->owner = SPINLOCK_OWNER_INIT;
  26. lock->owner_cpu = -1;
  27. }
  28. EXPORT_SYMBOL(__spin_lock_init);
  29. void __rwlock_init(rwlock_t *lock, const char *name,
  30. struct lock_class_key *key)
  31. {
  32. #ifdef CONFIG_DEBUG_LOCK_ALLOC
  33. /*
  34. * Make sure we are not reinitializing a held lock:
  35. */
  36. debug_check_no_locks_freed((void *)lock, sizeof(*lock));
  37. lockdep_init_map(&lock->dep_map, name, key);
  38. #endif
  39. lock->raw_lock = (raw_rwlock_t) __RAW_RW_LOCK_UNLOCKED;
  40. lock->magic = RWLOCK_MAGIC;
  41. lock->owner = SPINLOCK_OWNER_INIT;
  42. lock->owner_cpu = -1;
  43. }
  44. EXPORT_SYMBOL(__rwlock_init);
  45. static void spin_bug(spinlock_t *lock, const char *msg)
  46. {
  47. struct task_struct *owner = NULL;
  48. if (!debug_locks_off())
  49. return;
  50. if (lock->owner && lock->owner != SPINLOCK_OWNER_INIT)
  51. owner = lock->owner;
  52. printk(KERN_EMERG "BUG: spinlock %s on CPU#%d, %s/%d\n",
  53. msg, raw_smp_processor_id(),
  54. current->comm, current->pid);
  55. printk(KERN_EMERG " lock: %p, .magic: %08x, .owner: %s/%d, "
  56. ".owner_cpu: %d\n",
  57. lock, lock->magic,
  58. owner ? owner->comm : "<none>",
  59. owner ? owner->pid : -1,
  60. lock->owner_cpu);
  61. dump_stack();
  62. }
  63. #define SPIN_BUG_ON(cond, lock, msg) if (unlikely(cond)) spin_bug(lock, msg)
  64. static inline void
  65. debug_spin_lock_before(spinlock_t *lock)
  66. {
  67. SPIN_BUG_ON(lock->magic != SPINLOCK_MAGIC, lock, "bad magic");
  68. SPIN_BUG_ON(lock->owner == current, lock, "recursion");
  69. SPIN_BUG_ON(lock->owner_cpu == raw_smp_processor_id(),
  70. lock, "cpu recursion");
  71. }
  72. static inline void debug_spin_lock_after(spinlock_t *lock)
  73. {
  74. lock->owner_cpu = raw_smp_processor_id();
  75. lock->owner = current;
  76. }
  77. static inline void debug_spin_unlock(spinlock_t *lock)
  78. {
  79. SPIN_BUG_ON(lock->magic != SPINLOCK_MAGIC, lock, "bad magic");
  80. SPIN_BUG_ON(!spin_is_locked(lock), lock, "already unlocked");
  81. SPIN_BUG_ON(lock->owner != current, lock, "wrong owner");
  82. SPIN_BUG_ON(lock->owner_cpu != raw_smp_processor_id(),
  83. lock, "wrong CPU");
  84. lock->owner = SPINLOCK_OWNER_INIT;
  85. lock->owner_cpu = -1;
  86. }
  87. static void __spin_lock_debug(spinlock_t *lock)
  88. {
  89. int print_once = 1;
  90. u64 i;
  91. for (;;) {
  92. for (i = 0; i < loops_per_jiffy * HZ; i++) {
  93. if (__raw_spin_trylock(&lock->raw_lock))
  94. return;
  95. __delay(1);
  96. }
  97. /* lockup suspected: */
  98. if (print_once) {
  99. print_once = 0;
  100. printk(KERN_EMERG "BUG: spinlock lockup on CPU#%d, "
  101. "%s/%d, %p\n",
  102. raw_smp_processor_id(), current->comm,
  103. current->pid, lock);
  104. dump_stack();
  105. }
  106. }
  107. }
  108. void _raw_spin_lock(spinlock_t *lock)
  109. {
  110. debug_spin_lock_before(lock);
  111. if (unlikely(!__raw_spin_trylock(&lock->raw_lock)))
  112. __spin_lock_debug(lock);
  113. debug_spin_lock_after(lock);
  114. }
  115. int _raw_spin_trylock(spinlock_t *lock)
  116. {
  117. int ret = __raw_spin_trylock(&lock->raw_lock);
  118. if (ret)
  119. debug_spin_lock_after(lock);
  120. #ifndef CONFIG_SMP
  121. /*
  122. * Must not happen on UP:
  123. */
  124. SPIN_BUG_ON(!ret, lock, "trylock failure on UP");
  125. #endif
  126. return ret;
  127. }
  128. void _raw_spin_unlock(spinlock_t *lock)
  129. {
  130. debug_spin_unlock(lock);
  131. __raw_spin_unlock(&lock->raw_lock);
  132. }
  133. static void rwlock_bug(rwlock_t *lock, const char *msg)
  134. {
  135. if (!debug_locks_off())
  136. return;
  137. printk(KERN_EMERG "BUG: rwlock %s on CPU#%d, %s/%d, %p\n",
  138. msg, raw_smp_processor_id(), current->comm,
  139. current->pid, lock);
  140. dump_stack();
  141. }
  142. #define RWLOCK_BUG_ON(cond, lock, msg) if (unlikely(cond)) rwlock_bug(lock, msg)
  143. #if 0 /* __write_lock_debug() can lock up - maybe this can too? */
  144. static void __read_lock_debug(rwlock_t *lock)
  145. {
  146. int print_once = 1;
  147. u64 i;
  148. for (;;) {
  149. for (i = 0; i < loops_per_jiffy * HZ; i++) {
  150. if (__raw_read_trylock(&lock->raw_lock))
  151. return;
  152. __delay(1);
  153. }
  154. /* lockup suspected: */
  155. if (print_once) {
  156. print_once = 0;
  157. printk(KERN_EMERG "BUG: read-lock lockup on CPU#%d, "
  158. "%s/%d, %p\n",
  159. raw_smp_processor_id(), current->comm,
  160. current->pid, lock);
  161. dump_stack();
  162. }
  163. }
  164. }
  165. #endif
  166. void _raw_read_lock(rwlock_t *lock)
  167. {
  168. RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
  169. __raw_read_lock(&lock->raw_lock);
  170. }
  171. int _raw_read_trylock(rwlock_t *lock)
  172. {
  173. int ret = __raw_read_trylock(&lock->raw_lock);
  174. #ifndef CONFIG_SMP
  175. /*
  176. * Must not happen on UP:
  177. */
  178. RWLOCK_BUG_ON(!ret, lock, "trylock failure on UP");
  179. #endif
  180. return ret;
  181. }
  182. void _raw_read_unlock(rwlock_t *lock)
  183. {
  184. RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
  185. __raw_read_unlock(&lock->raw_lock);
  186. }
  187. static inline void debug_write_lock_before(rwlock_t *lock)
  188. {
  189. RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
  190. RWLOCK_BUG_ON(lock->owner == current, lock, "recursion");
  191. RWLOCK_BUG_ON(lock->owner_cpu == raw_smp_processor_id(),
  192. lock, "cpu recursion");
  193. }
  194. static inline void debug_write_lock_after(rwlock_t *lock)
  195. {
  196. lock->owner_cpu = raw_smp_processor_id();
  197. lock->owner = current;
  198. }
  199. static inline void debug_write_unlock(rwlock_t *lock)
  200. {
  201. RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
  202. RWLOCK_BUG_ON(lock->owner != current, lock, "wrong owner");
  203. RWLOCK_BUG_ON(lock->owner_cpu != raw_smp_processor_id(),
  204. lock, "wrong CPU");
  205. lock->owner = SPINLOCK_OWNER_INIT;
  206. lock->owner_cpu = -1;
  207. }
  208. #if 0 /* This can cause lockups */
  209. static void __write_lock_debug(rwlock_t *lock)
  210. {
  211. int print_once = 1;
  212. u64 i;
  213. for (;;) {
  214. for (i = 0; i < loops_per_jiffy * HZ; i++) {
  215. if (__raw_write_trylock(&lock->raw_lock))
  216. return;
  217. __delay(1);
  218. }
  219. /* lockup suspected: */
  220. if (print_once) {
  221. print_once = 0;
  222. printk(KERN_EMERG "BUG: write-lock lockup on CPU#%d, "
  223. "%s/%d, %p\n",
  224. raw_smp_processor_id(), current->comm,
  225. current->pid, lock);
  226. dump_stack();
  227. }
  228. }
  229. }
  230. #endif
  231. void _raw_write_lock(rwlock_t *lock)
  232. {
  233. debug_write_lock_before(lock);
  234. __raw_write_lock(&lock->raw_lock);
  235. debug_write_lock_after(lock);
  236. }
  237. int _raw_write_trylock(rwlock_t *lock)
  238. {
  239. int ret = __raw_write_trylock(&lock->raw_lock);
  240. if (ret)
  241. debug_write_lock_after(lock);
  242. #ifndef CONFIG_SMP
  243. /*
  244. * Must not happen on UP:
  245. */
  246. RWLOCK_BUG_ON(!ret, lock, "trylock failure on UP");
  247. #endif
  248. return ret;
  249. }
  250. void _raw_write_unlock(rwlock_t *lock)
  251. {
  252. debug_write_unlock(lock);
  253. __raw_write_unlock(&lock->raw_lock);
  254. }