spinlock_debug.c 5.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257
  1. /*
  2. * Copyright 2005, Red Hat, Inc., Ingo Molnar
  3. * Released under the General Public License (GPL).
  4. *
  5. * This file contains the spinlock/rwlock implementations for
  6. * DEBUG_SPINLOCK.
  7. */
  8. #include <linux/config.h>
  9. #include <linux/spinlock.h>
  10. #include <linux/interrupt.h>
  11. #include <linux/delay.h>
  12. static void spin_bug(spinlock_t *lock, const char *msg)
  13. {
  14. static long print_once = 1;
  15. struct task_struct *owner = NULL;
  16. if (xchg(&print_once, 0)) {
  17. if (lock->owner && lock->owner != SPINLOCK_OWNER_INIT)
  18. owner = lock->owner;
  19. printk("BUG: spinlock %s on CPU#%d, %s/%d\n",
  20. msg, smp_processor_id(), current->comm, current->pid);
  21. printk(" lock: %p, .magic: %08x, .owner: %s/%d, .owner_cpu: %d\n",
  22. lock, lock->magic,
  23. owner ? owner->comm : "<none>",
  24. owner ? owner->pid : -1,
  25. lock->owner_cpu);
  26. dump_stack();
  27. #ifdef CONFIG_SMP
  28. /*
  29. * We cannot continue on SMP:
  30. */
  31. // panic("bad locking");
  32. #endif
  33. }
  34. }
  35. #define SPIN_BUG_ON(cond, lock, msg) if (unlikely(cond)) spin_bug(lock, msg)
  36. static inline void debug_spin_lock_before(spinlock_t *lock)
  37. {
  38. SPIN_BUG_ON(lock->magic != SPINLOCK_MAGIC, lock, "bad magic");
  39. SPIN_BUG_ON(lock->owner == current, lock, "recursion");
  40. SPIN_BUG_ON(lock->owner_cpu == raw_smp_processor_id(),
  41. lock, "cpu recursion");
  42. }
  43. static inline void debug_spin_lock_after(spinlock_t *lock)
  44. {
  45. lock->owner_cpu = raw_smp_processor_id();
  46. lock->owner = current;
  47. }
  48. static inline void debug_spin_unlock(spinlock_t *lock)
  49. {
  50. SPIN_BUG_ON(lock->magic != SPINLOCK_MAGIC, lock, "bad magic");
  51. SPIN_BUG_ON(!spin_is_locked(lock), lock, "already unlocked");
  52. SPIN_BUG_ON(lock->owner != current, lock, "wrong owner");
  53. SPIN_BUG_ON(lock->owner_cpu != raw_smp_processor_id(),
  54. lock, "wrong CPU");
  55. lock->owner = SPINLOCK_OWNER_INIT;
  56. lock->owner_cpu = -1;
  57. }
  58. static void __spin_lock_debug(spinlock_t *lock)
  59. {
  60. int print_once = 1;
  61. u64 i;
  62. for (;;) {
  63. for (i = 0; i < loops_per_jiffy * HZ; i++) {
  64. cpu_relax();
  65. if (__raw_spin_trylock(&lock->raw_lock))
  66. return;
  67. }
  68. /* lockup suspected: */
  69. if (print_once) {
  70. print_once = 0;
  71. printk("BUG: spinlock lockup on CPU#%d, %s/%d, %p\n",
  72. smp_processor_id(), current->comm, current->pid,
  73. lock);
  74. dump_stack();
  75. }
  76. }
  77. }
  78. void _raw_spin_lock(spinlock_t *lock)
  79. {
  80. debug_spin_lock_before(lock);
  81. if (unlikely(!__raw_spin_trylock(&lock->raw_lock)))
  82. __spin_lock_debug(lock);
  83. debug_spin_lock_after(lock);
  84. }
  85. int _raw_spin_trylock(spinlock_t *lock)
  86. {
  87. int ret = __raw_spin_trylock(&lock->raw_lock);
  88. if (ret)
  89. debug_spin_lock_after(lock);
  90. #ifndef CONFIG_SMP
  91. /*
  92. * Must not happen on UP:
  93. */
  94. SPIN_BUG_ON(!ret, lock, "trylock failure on UP");
  95. #endif
  96. return ret;
  97. }
  98. void _raw_spin_unlock(spinlock_t *lock)
  99. {
  100. debug_spin_unlock(lock);
  101. __raw_spin_unlock(&lock->raw_lock);
  102. }
  103. static void rwlock_bug(rwlock_t *lock, const char *msg)
  104. {
  105. static long print_once = 1;
  106. if (xchg(&print_once, 0)) {
  107. printk("BUG: rwlock %s on CPU#%d, %s/%d, %p\n", msg,
  108. smp_processor_id(), current->comm, current->pid, lock);
  109. dump_stack();
  110. #ifdef CONFIG_SMP
  111. /*
  112. * We cannot continue on SMP:
  113. */
  114. panic("bad locking");
  115. #endif
  116. }
  117. }
  118. #define RWLOCK_BUG_ON(cond, lock, msg) if (unlikely(cond)) rwlock_bug(lock, msg)
  119. static void __read_lock_debug(rwlock_t *lock)
  120. {
  121. int print_once = 1;
  122. u64 i;
  123. for (;;) {
  124. for (i = 0; i < loops_per_jiffy * HZ; i++) {
  125. cpu_relax();
  126. if (__raw_read_trylock(&lock->raw_lock))
  127. return;
  128. }
  129. /* lockup suspected: */
  130. if (print_once) {
  131. print_once = 0;
  132. printk("BUG: read-lock lockup on CPU#%d, %s/%d, %p\n",
  133. smp_processor_id(), current->comm, current->pid,
  134. lock);
  135. dump_stack();
  136. }
  137. }
  138. }
  139. void _raw_read_lock(rwlock_t *lock)
  140. {
  141. RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
  142. if (unlikely(!__raw_read_trylock(&lock->raw_lock)))
  143. __read_lock_debug(lock);
  144. }
  145. int _raw_read_trylock(rwlock_t *lock)
  146. {
  147. int ret = __raw_read_trylock(&lock->raw_lock);
  148. #ifndef CONFIG_SMP
  149. /*
  150. * Must not happen on UP:
  151. */
  152. RWLOCK_BUG_ON(!ret, lock, "trylock failure on UP");
  153. #endif
  154. return ret;
  155. }
  156. void _raw_read_unlock(rwlock_t *lock)
  157. {
  158. RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
  159. __raw_read_unlock(&lock->raw_lock);
  160. }
  161. static inline void debug_write_lock_before(rwlock_t *lock)
  162. {
  163. RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
  164. RWLOCK_BUG_ON(lock->owner == current, lock, "recursion");
  165. RWLOCK_BUG_ON(lock->owner_cpu == raw_smp_processor_id(),
  166. lock, "cpu recursion");
  167. }
  168. static inline void debug_write_lock_after(rwlock_t *lock)
  169. {
  170. lock->owner_cpu = raw_smp_processor_id();
  171. lock->owner = current;
  172. }
  173. static inline void debug_write_unlock(rwlock_t *lock)
  174. {
  175. RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
  176. RWLOCK_BUG_ON(lock->owner != current, lock, "wrong owner");
  177. RWLOCK_BUG_ON(lock->owner_cpu != raw_smp_processor_id(),
  178. lock, "wrong CPU");
  179. lock->owner = SPINLOCK_OWNER_INIT;
  180. lock->owner_cpu = -1;
  181. }
  182. static void __write_lock_debug(rwlock_t *lock)
  183. {
  184. int print_once = 1;
  185. u64 i;
  186. for (;;) {
  187. for (i = 0; i < loops_per_jiffy * HZ; i++) {
  188. cpu_relax();
  189. if (__raw_write_trylock(&lock->raw_lock))
  190. return;
  191. }
  192. /* lockup suspected: */
  193. if (print_once) {
  194. print_once = 0;
  195. printk("BUG: write-lock lockup on CPU#%d, %s/%d, %p\n",
  196. smp_processor_id(), current->comm, current->pid,
  197. lock);
  198. dump_stack();
  199. }
  200. }
  201. }
  202. void _raw_write_lock(rwlock_t *lock)
  203. {
  204. debug_write_lock_before(lock);
  205. if (unlikely(!__raw_write_trylock(&lock->raw_lock)))
  206. __write_lock_debug(lock);
  207. debug_write_lock_after(lock);
  208. }
  209. int _raw_write_trylock(rwlock_t *lock)
  210. {
  211. int ret = __raw_write_trylock(&lock->raw_lock);
  212. if (ret)
  213. debug_write_lock_after(lock);
  214. #ifndef CONFIG_SMP
  215. /*
  216. * Must not happen on UP:
  217. */
  218. RWLOCK_BUG_ON(!ret, lock, "trylock failure on UP");
  219. #endif
  220. return ret;
  221. }
  222. void _raw_write_unlock(rwlock_t *lock)
  223. {
  224. debug_write_unlock(lock);
  225. __raw_write_unlock(&lock->raw_lock);
  226. }