spinlock_debug.c 5.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262
  1. /*
  2. * Copyright 2005, Red Hat, Inc., Ingo Molnar
  3. * Released under the General Public License (GPL).
  4. *
  5. * This file contains the spinlock/rwlock implementations for
  6. * DEBUG_SPINLOCK.
  7. */
  8. #include <linux/spinlock.h>
  9. #include <linux/interrupt.h>
  10. #include <linux/delay.h>
  11. static void spin_bug(spinlock_t *lock, const char *msg)
  12. {
  13. static long print_once = 1;
  14. struct task_struct *owner = NULL;
  15. if (xchg(&print_once, 0)) {
  16. if (lock->owner && lock->owner != SPINLOCK_OWNER_INIT)
  17. owner = lock->owner;
  18. printk(KERN_EMERG "BUG: spinlock %s on CPU#%d, %s/%d\n",
  19. msg, raw_smp_processor_id(),
  20. current->comm, current->pid);
  21. printk(KERN_EMERG " lock: %p, .magic: %08x, .owner: %s/%d, "
  22. ".owner_cpu: %d\n",
  23. lock, lock->magic,
  24. owner ? owner->comm : "<none>",
  25. owner ? owner->pid : -1,
  26. lock->owner_cpu);
  27. dump_stack();
  28. #ifdef CONFIG_SMP
  29. /*
  30. * We cannot continue on SMP:
  31. */
  32. // panic("bad locking");
  33. #endif
  34. }
  35. }
  36. #define SPIN_BUG_ON(cond, lock, msg) if (unlikely(cond)) spin_bug(lock, msg)
  37. static inline void debug_spin_lock_before(spinlock_t *lock)
  38. {
  39. SPIN_BUG_ON(lock->magic != SPINLOCK_MAGIC, lock, "bad magic");
  40. SPIN_BUG_ON(lock->owner == current, lock, "recursion");
  41. SPIN_BUG_ON(lock->owner_cpu == raw_smp_processor_id(),
  42. lock, "cpu recursion");
  43. }
  44. static inline void debug_spin_lock_after(spinlock_t *lock)
  45. {
  46. lock->owner_cpu = raw_smp_processor_id();
  47. lock->owner = current;
  48. }
  49. static inline void debug_spin_unlock(spinlock_t *lock)
  50. {
  51. SPIN_BUG_ON(lock->magic != SPINLOCK_MAGIC, lock, "bad magic");
  52. SPIN_BUG_ON(!spin_is_locked(lock), lock, "already unlocked");
  53. SPIN_BUG_ON(lock->owner != current, lock, "wrong owner");
  54. SPIN_BUG_ON(lock->owner_cpu != raw_smp_processor_id(),
  55. lock, "wrong CPU");
  56. lock->owner = SPINLOCK_OWNER_INIT;
  57. lock->owner_cpu = -1;
  58. }
  59. static void __spin_lock_debug(spinlock_t *lock)
  60. {
  61. int print_once = 1;
  62. u64 i;
  63. for (;;) {
  64. for (i = 0; i < loops_per_jiffy * HZ; i++) {
  65. if (__raw_spin_trylock(&lock->raw_lock))
  66. return;
  67. __delay(1);
  68. }
  69. /* lockup suspected: */
  70. if (print_once) {
  71. print_once = 0;
  72. printk(KERN_EMERG "BUG: spinlock lockup on CPU#%d, "
  73. "%s/%d, %p\n",
  74. raw_smp_processor_id(), current->comm,
  75. current->pid, lock);
  76. dump_stack();
  77. }
  78. }
  79. }
  80. void _raw_spin_lock(spinlock_t *lock)
  81. {
  82. debug_spin_lock_before(lock);
  83. if (unlikely(!__raw_spin_trylock(&lock->raw_lock)))
  84. __spin_lock_debug(lock);
  85. debug_spin_lock_after(lock);
  86. }
  87. int _raw_spin_trylock(spinlock_t *lock)
  88. {
  89. int ret = __raw_spin_trylock(&lock->raw_lock);
  90. if (ret)
  91. debug_spin_lock_after(lock);
  92. #ifndef CONFIG_SMP
  93. /*
  94. * Must not happen on UP:
  95. */
  96. SPIN_BUG_ON(!ret, lock, "trylock failure on UP");
  97. #endif
  98. return ret;
  99. }
  100. void _raw_spin_unlock(spinlock_t *lock)
  101. {
  102. debug_spin_unlock(lock);
  103. __raw_spin_unlock(&lock->raw_lock);
  104. }
  105. static void rwlock_bug(rwlock_t *lock, const char *msg)
  106. {
  107. static long print_once = 1;
  108. if (xchg(&print_once, 0)) {
  109. printk(KERN_EMERG "BUG: rwlock %s on CPU#%d, %s/%d, %p\n",
  110. msg, raw_smp_processor_id(), current->comm,
  111. current->pid, lock);
  112. dump_stack();
  113. #ifdef CONFIG_SMP
  114. /*
  115. * We cannot continue on SMP:
  116. */
  117. panic("bad locking");
  118. #endif
  119. }
  120. }
  121. #define RWLOCK_BUG_ON(cond, lock, msg) if (unlikely(cond)) rwlock_bug(lock, msg)
  122. static void __read_lock_debug(rwlock_t *lock)
  123. {
  124. int print_once = 1;
  125. u64 i;
  126. for (;;) {
  127. for (i = 0; i < loops_per_jiffy * HZ; i++) {
  128. if (__raw_read_trylock(&lock->raw_lock))
  129. return;
  130. __delay(1);
  131. }
  132. /* lockup suspected: */
  133. if (print_once) {
  134. print_once = 0;
  135. printk(KERN_EMERG "BUG: read-lock lockup on CPU#%d, "
  136. "%s/%d, %p\n",
  137. raw_smp_processor_id(), current->comm,
  138. current->pid, lock);
  139. dump_stack();
  140. }
  141. }
  142. }
  143. void _raw_read_lock(rwlock_t *lock)
  144. {
  145. RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
  146. if (unlikely(!__raw_read_trylock(&lock->raw_lock)))
  147. __read_lock_debug(lock);
  148. }
  149. int _raw_read_trylock(rwlock_t *lock)
  150. {
  151. int ret = __raw_read_trylock(&lock->raw_lock);
  152. #ifndef CONFIG_SMP
  153. /*
  154. * Must not happen on UP:
  155. */
  156. RWLOCK_BUG_ON(!ret, lock, "trylock failure on UP");
  157. #endif
  158. return ret;
  159. }
  160. void _raw_read_unlock(rwlock_t *lock)
  161. {
  162. RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
  163. __raw_read_unlock(&lock->raw_lock);
  164. }
  165. static inline void debug_write_lock_before(rwlock_t *lock)
  166. {
  167. RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
  168. RWLOCK_BUG_ON(lock->owner == current, lock, "recursion");
  169. RWLOCK_BUG_ON(lock->owner_cpu == raw_smp_processor_id(),
  170. lock, "cpu recursion");
  171. }
  172. static inline void debug_write_lock_after(rwlock_t *lock)
  173. {
  174. lock->owner_cpu = raw_smp_processor_id();
  175. lock->owner = current;
  176. }
  177. static inline void debug_write_unlock(rwlock_t *lock)
  178. {
  179. RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
  180. RWLOCK_BUG_ON(lock->owner != current, lock, "wrong owner");
  181. RWLOCK_BUG_ON(lock->owner_cpu != raw_smp_processor_id(),
  182. lock, "wrong CPU");
  183. lock->owner = SPINLOCK_OWNER_INIT;
  184. lock->owner_cpu = -1;
  185. }
  186. static void __write_lock_debug(rwlock_t *lock)
  187. {
  188. int print_once = 1;
  189. u64 i;
  190. for (;;) {
  191. for (i = 0; i < loops_per_jiffy * HZ; i++) {
  192. if (__raw_write_trylock(&lock->raw_lock))
  193. return;
  194. __delay(1);
  195. }
  196. /* lockup suspected: */
  197. if (print_once) {
  198. print_once = 0;
  199. printk(KERN_EMERG "BUG: write-lock lockup on CPU#%d, "
  200. "%s/%d, %p\n",
  201. raw_smp_processor_id(), current->comm,
  202. current->pid, lock);
  203. dump_stack();
  204. }
  205. }
  206. }
  207. void _raw_write_lock(rwlock_t *lock)
  208. {
  209. debug_write_lock_before(lock);
  210. if (unlikely(!__raw_write_trylock(&lock->raw_lock)))
  211. __write_lock_debug(lock);
  212. debug_write_lock_after(lock);
  213. }
  214. int _raw_write_trylock(rwlock_t *lock)
  215. {
  216. int ret = __raw_write_trylock(&lock->raw_lock);
  217. if (ret)
  218. debug_write_lock_after(lock);
  219. #ifndef CONFIG_SMP
  220. /*
  221. * Must not happen on UP:
  222. */
  223. RWLOCK_BUG_ON(!ret, lock, "trylock failure on UP");
  224. #endif
  225. return ret;
  226. }
  227. void _raw_write_unlock(rwlock_t *lock)
  228. {
  229. debug_write_unlock(lock);
  230. __raw_write_unlock(&lock->raw_lock);
  231. }