spinlock_debug.c 5.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263
  1. /*
  2. * Copyright 2005, Red Hat, Inc., Ingo Molnar
  3. * Released under the General Public License (GPL).
  4. *
  5. * This file contains the spinlock/rwlock implementations for
  6. * DEBUG_SPINLOCK.
  7. */
  8. #include <linux/config.h>
  9. #include <linux/spinlock.h>
  10. #include <linux/interrupt.h>
  11. #include <linux/delay.h>
  12. static void spin_bug(spinlock_t *lock, const char *msg)
  13. {
  14. static long print_once = 1;
  15. struct task_struct *owner = NULL;
  16. if (xchg(&print_once, 0)) {
  17. if (lock->owner && lock->owner != SPINLOCK_OWNER_INIT)
  18. owner = lock->owner;
  19. printk(KERN_EMERG "BUG: spinlock %s on CPU#%d, %s/%d\n",
  20. msg, raw_smp_processor_id(),
  21. current->comm, current->pid);
  22. printk(KERN_EMERG " lock: %p, .magic: %08x, .owner: %s/%d, "
  23. ".owner_cpu: %d\n",
  24. lock, lock->magic,
  25. owner ? owner->comm : "<none>",
  26. owner ? owner->pid : -1,
  27. lock->owner_cpu);
  28. dump_stack();
  29. #ifdef CONFIG_SMP
  30. /*
  31. * We cannot continue on SMP:
  32. */
  33. // panic("bad locking");
  34. #endif
  35. }
  36. }
  37. #define SPIN_BUG_ON(cond, lock, msg) if (unlikely(cond)) spin_bug(lock, msg)
  38. static inline void debug_spin_lock_before(spinlock_t *lock)
  39. {
  40. SPIN_BUG_ON(lock->magic != SPINLOCK_MAGIC, lock, "bad magic");
  41. SPIN_BUG_ON(lock->owner == current, lock, "recursion");
  42. SPIN_BUG_ON(lock->owner_cpu == raw_smp_processor_id(),
  43. lock, "cpu recursion");
  44. }
  45. static inline void debug_spin_lock_after(spinlock_t *lock)
  46. {
  47. lock->owner_cpu = raw_smp_processor_id();
  48. lock->owner = current;
  49. }
  50. static inline void debug_spin_unlock(spinlock_t *lock)
  51. {
  52. SPIN_BUG_ON(lock->magic != SPINLOCK_MAGIC, lock, "bad magic");
  53. SPIN_BUG_ON(!spin_is_locked(lock), lock, "already unlocked");
  54. SPIN_BUG_ON(lock->owner != current, lock, "wrong owner");
  55. SPIN_BUG_ON(lock->owner_cpu != raw_smp_processor_id(),
  56. lock, "wrong CPU");
  57. lock->owner = SPINLOCK_OWNER_INIT;
  58. lock->owner_cpu = -1;
  59. }
  60. static void __spin_lock_debug(spinlock_t *lock)
  61. {
  62. int print_once = 1;
  63. u64 i;
  64. for (;;) {
  65. for (i = 0; i < loops_per_jiffy * HZ; i++) {
  66. if (__raw_spin_trylock(&lock->raw_lock))
  67. return;
  68. __delay(1);
  69. }
  70. /* lockup suspected: */
  71. if (print_once) {
  72. print_once = 0;
  73. printk(KERN_EMERG "BUG: spinlock lockup on CPU#%d, "
  74. "%s/%d, %p\n",
  75. raw_smp_processor_id(), current->comm,
  76. current->pid, lock);
  77. dump_stack();
  78. }
  79. }
  80. }
  81. void _raw_spin_lock(spinlock_t *lock)
  82. {
  83. debug_spin_lock_before(lock);
  84. if (unlikely(!__raw_spin_trylock(&lock->raw_lock)))
  85. __spin_lock_debug(lock);
  86. debug_spin_lock_after(lock);
  87. }
  88. int _raw_spin_trylock(spinlock_t *lock)
  89. {
  90. int ret = __raw_spin_trylock(&lock->raw_lock);
  91. if (ret)
  92. debug_spin_lock_after(lock);
  93. #ifndef CONFIG_SMP
  94. /*
  95. * Must not happen on UP:
  96. */
  97. SPIN_BUG_ON(!ret, lock, "trylock failure on UP");
  98. #endif
  99. return ret;
  100. }
  101. void _raw_spin_unlock(spinlock_t *lock)
  102. {
  103. debug_spin_unlock(lock);
  104. __raw_spin_unlock(&lock->raw_lock);
  105. }
  106. static void rwlock_bug(rwlock_t *lock, const char *msg)
  107. {
  108. static long print_once = 1;
  109. if (xchg(&print_once, 0)) {
  110. printk(KERN_EMERG "BUG: rwlock %s on CPU#%d, %s/%d, %p\n",
  111. msg, raw_smp_processor_id(), current->comm,
  112. current->pid, lock);
  113. dump_stack();
  114. #ifdef CONFIG_SMP
  115. /*
  116. * We cannot continue on SMP:
  117. */
  118. panic("bad locking");
  119. #endif
  120. }
  121. }
  122. #define RWLOCK_BUG_ON(cond, lock, msg) if (unlikely(cond)) rwlock_bug(lock, msg)
  123. static void __read_lock_debug(rwlock_t *lock)
  124. {
  125. int print_once = 1;
  126. u64 i;
  127. for (;;) {
  128. for (i = 0; i < loops_per_jiffy * HZ; i++) {
  129. if (__raw_read_trylock(&lock->raw_lock))
  130. return;
  131. __delay(1);
  132. }
  133. /* lockup suspected: */
  134. if (print_once) {
  135. print_once = 0;
  136. printk(KERN_EMERG "BUG: read-lock lockup on CPU#%d, "
  137. "%s/%d, %p\n",
  138. raw_smp_processor_id(), current->comm,
  139. current->pid, lock);
  140. dump_stack();
  141. }
  142. }
  143. }
  144. void _raw_read_lock(rwlock_t *lock)
  145. {
  146. RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
  147. if (unlikely(!__raw_read_trylock(&lock->raw_lock)))
  148. __read_lock_debug(lock);
  149. }
  150. int _raw_read_trylock(rwlock_t *lock)
  151. {
  152. int ret = __raw_read_trylock(&lock->raw_lock);
  153. #ifndef CONFIG_SMP
  154. /*
  155. * Must not happen on UP:
  156. */
  157. RWLOCK_BUG_ON(!ret, lock, "trylock failure on UP");
  158. #endif
  159. return ret;
  160. }
  161. void _raw_read_unlock(rwlock_t *lock)
  162. {
  163. RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
  164. __raw_read_unlock(&lock->raw_lock);
  165. }
  166. static inline void debug_write_lock_before(rwlock_t *lock)
  167. {
  168. RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
  169. RWLOCK_BUG_ON(lock->owner == current, lock, "recursion");
  170. RWLOCK_BUG_ON(lock->owner_cpu == raw_smp_processor_id(),
  171. lock, "cpu recursion");
  172. }
  173. static inline void debug_write_lock_after(rwlock_t *lock)
  174. {
  175. lock->owner_cpu = raw_smp_processor_id();
  176. lock->owner = current;
  177. }
  178. static inline void debug_write_unlock(rwlock_t *lock)
  179. {
  180. RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
  181. RWLOCK_BUG_ON(lock->owner != current, lock, "wrong owner");
  182. RWLOCK_BUG_ON(lock->owner_cpu != raw_smp_processor_id(),
  183. lock, "wrong CPU");
  184. lock->owner = SPINLOCK_OWNER_INIT;
  185. lock->owner_cpu = -1;
  186. }
  187. static void __write_lock_debug(rwlock_t *lock)
  188. {
  189. int print_once = 1;
  190. u64 i;
  191. for (;;) {
  192. for (i = 0; i < loops_per_jiffy * HZ; i++) {
  193. if (__raw_write_trylock(&lock->raw_lock))
  194. return;
  195. __delay(1);
  196. }
  197. /* lockup suspected: */
  198. if (print_once) {
  199. print_once = 0;
  200. printk(KERN_EMERG "BUG: write-lock lockup on CPU#%d, "
  201. "%s/%d, %p\n",
  202. raw_smp_processor_id(), current->comm,
  203. current->pid, lock);
  204. dump_stack();
  205. }
  206. }
  207. }
  208. void _raw_write_lock(rwlock_t *lock)
  209. {
  210. debug_write_lock_before(lock);
  211. if (unlikely(!__raw_write_trylock(&lock->raw_lock)))
  212. __write_lock_debug(lock);
  213. debug_write_lock_after(lock);
  214. }
  215. int _raw_write_trylock(rwlock_t *lock)
  216. {
  217. int ret = __raw_write_trylock(&lock->raw_lock);
  218. if (ret)
  219. debug_write_lock_after(lock);
  220. #ifndef CONFIG_SMP
  221. /*
  222. * Must not happen on UP:
  223. */
  224. RWLOCK_BUG_ON(!ret, lock, "trylock failure on UP");
  225. #endif
  226. return ret;
  227. }
  228. void _raw_write_unlock(rwlock_t *lock)
  229. {
  230. debug_write_unlock(lock);
  231. __raw_write_unlock(&lock->raw_lock);
  232. }