spinlock_debug.c 5.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259
  1. /*
  2. * Copyright 2005, Red Hat, Inc., Ingo Molnar
  3. * Released under the General Public License (GPL).
  4. *
  5. * This file contains the spinlock/rwlock implementations for
  6. * DEBUG_SPINLOCK.
  7. */
  8. #include <linux/config.h>
  9. #include <linux/spinlock.h>
  10. #include <linux/interrupt.h>
  11. #include <linux/delay.h>
  12. static void spin_bug(spinlock_t *lock, const char *msg)
  13. {
  14. static long print_once = 1;
  15. struct task_struct *owner = NULL;
  16. if (xchg(&print_once, 0)) {
  17. if (lock->owner && lock->owner != SPINLOCK_OWNER_INIT)
  18. owner = lock->owner;
  19. printk("BUG: spinlock %s on CPU#%d, %s/%d\n",
  20. msg, raw_smp_processor_id(),
  21. current->comm, current->pid);
  22. printk(" lock: %p, .magic: %08x, .owner: %s/%d, .owner_cpu: %d\n",
  23. lock, lock->magic,
  24. owner ? owner->comm : "<none>",
  25. owner ? owner->pid : -1,
  26. lock->owner_cpu);
  27. dump_stack();
  28. #ifdef CONFIG_SMP
  29. /*
  30. * We cannot continue on SMP:
  31. */
  32. // panic("bad locking");
  33. #endif
  34. }
  35. }
  36. #define SPIN_BUG_ON(cond, lock, msg) if (unlikely(cond)) spin_bug(lock, msg)
  37. static inline void debug_spin_lock_before(spinlock_t *lock)
  38. {
  39. SPIN_BUG_ON(lock->magic != SPINLOCK_MAGIC, lock, "bad magic");
  40. SPIN_BUG_ON(lock->owner == current, lock, "recursion");
  41. SPIN_BUG_ON(lock->owner_cpu == raw_smp_processor_id(),
  42. lock, "cpu recursion");
  43. }
  44. static inline void debug_spin_lock_after(spinlock_t *lock)
  45. {
  46. lock->owner_cpu = raw_smp_processor_id();
  47. lock->owner = current;
  48. }
  49. static inline void debug_spin_unlock(spinlock_t *lock)
  50. {
  51. SPIN_BUG_ON(lock->magic != SPINLOCK_MAGIC, lock, "bad magic");
  52. SPIN_BUG_ON(!spin_is_locked(lock), lock, "already unlocked");
  53. SPIN_BUG_ON(lock->owner != current, lock, "wrong owner");
  54. SPIN_BUG_ON(lock->owner_cpu != raw_smp_processor_id(),
  55. lock, "wrong CPU");
  56. lock->owner = SPINLOCK_OWNER_INIT;
  57. lock->owner_cpu = -1;
  58. }
  59. static void __spin_lock_debug(spinlock_t *lock)
  60. {
  61. int print_once = 1;
  62. u64 i;
  63. for (;;) {
  64. for (i = 0; i < loops_per_jiffy * HZ; i++) {
  65. cpu_relax();
  66. if (__raw_spin_trylock(&lock->raw_lock))
  67. return;
  68. }
  69. /* lockup suspected: */
  70. if (print_once) {
  71. print_once = 0;
  72. printk("BUG: spinlock lockup on CPU#%d, %s/%d, %p\n",
  73. raw_smp_processor_id(), current->comm,
  74. current->pid, lock);
  75. dump_stack();
  76. }
  77. }
  78. }
  79. void _raw_spin_lock(spinlock_t *lock)
  80. {
  81. debug_spin_lock_before(lock);
  82. if (unlikely(!__raw_spin_trylock(&lock->raw_lock)))
  83. __spin_lock_debug(lock);
  84. debug_spin_lock_after(lock);
  85. }
  86. int _raw_spin_trylock(spinlock_t *lock)
  87. {
  88. int ret = __raw_spin_trylock(&lock->raw_lock);
  89. if (ret)
  90. debug_spin_lock_after(lock);
  91. #ifndef CONFIG_SMP
  92. /*
  93. * Must not happen on UP:
  94. */
  95. SPIN_BUG_ON(!ret, lock, "trylock failure on UP");
  96. #endif
  97. return ret;
  98. }
  99. void _raw_spin_unlock(spinlock_t *lock)
  100. {
  101. debug_spin_unlock(lock);
  102. __raw_spin_unlock(&lock->raw_lock);
  103. }
  104. static void rwlock_bug(rwlock_t *lock, const char *msg)
  105. {
  106. static long print_once = 1;
  107. if (xchg(&print_once, 0)) {
  108. printk("BUG: rwlock %s on CPU#%d, %s/%d, %p\n", msg,
  109. raw_smp_processor_id(), current->comm,
  110. current->pid, lock);
  111. dump_stack();
  112. #ifdef CONFIG_SMP
  113. /*
  114. * We cannot continue on SMP:
  115. */
  116. panic("bad locking");
  117. #endif
  118. }
  119. }
  120. #define RWLOCK_BUG_ON(cond, lock, msg) if (unlikely(cond)) rwlock_bug(lock, msg)
  121. static void __read_lock_debug(rwlock_t *lock)
  122. {
  123. int print_once = 1;
  124. u64 i;
  125. for (;;) {
  126. for (i = 0; i < loops_per_jiffy * HZ; i++) {
  127. cpu_relax();
  128. if (__raw_read_trylock(&lock->raw_lock))
  129. return;
  130. }
  131. /* lockup suspected: */
  132. if (print_once) {
  133. print_once = 0;
  134. printk("BUG: read-lock lockup on CPU#%d, %s/%d, %p\n",
  135. raw_smp_processor_id(), current->comm,
  136. current->pid, lock);
  137. dump_stack();
  138. }
  139. }
  140. }
  141. void _raw_read_lock(rwlock_t *lock)
  142. {
  143. RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
  144. if (unlikely(!__raw_read_trylock(&lock->raw_lock)))
  145. __read_lock_debug(lock);
  146. }
  147. int _raw_read_trylock(rwlock_t *lock)
  148. {
  149. int ret = __raw_read_trylock(&lock->raw_lock);
  150. #ifndef CONFIG_SMP
  151. /*
  152. * Must not happen on UP:
  153. */
  154. RWLOCK_BUG_ON(!ret, lock, "trylock failure on UP");
  155. #endif
  156. return ret;
  157. }
  158. void _raw_read_unlock(rwlock_t *lock)
  159. {
  160. RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
  161. __raw_read_unlock(&lock->raw_lock);
  162. }
  163. static inline void debug_write_lock_before(rwlock_t *lock)
  164. {
  165. RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
  166. RWLOCK_BUG_ON(lock->owner == current, lock, "recursion");
  167. RWLOCK_BUG_ON(lock->owner_cpu == raw_smp_processor_id(),
  168. lock, "cpu recursion");
  169. }
  170. static inline void debug_write_lock_after(rwlock_t *lock)
  171. {
  172. lock->owner_cpu = raw_smp_processor_id();
  173. lock->owner = current;
  174. }
  175. static inline void debug_write_unlock(rwlock_t *lock)
  176. {
  177. RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
  178. RWLOCK_BUG_ON(lock->owner != current, lock, "wrong owner");
  179. RWLOCK_BUG_ON(lock->owner_cpu != raw_smp_processor_id(),
  180. lock, "wrong CPU");
  181. lock->owner = SPINLOCK_OWNER_INIT;
  182. lock->owner_cpu = -1;
  183. }
  184. static void __write_lock_debug(rwlock_t *lock)
  185. {
  186. int print_once = 1;
  187. u64 i;
  188. for (;;) {
  189. for (i = 0; i < loops_per_jiffy * HZ; i++) {
  190. cpu_relax();
  191. if (__raw_write_trylock(&lock->raw_lock))
  192. return;
  193. }
  194. /* lockup suspected: */
  195. if (print_once) {
  196. print_once = 0;
  197. printk("BUG: write-lock lockup on CPU#%d, %s/%d, %p\n",
  198. raw_smp_processor_id(), current->comm,
  199. current->pid, lock);
  200. dump_stack();
  201. }
  202. }
  203. }
  204. void _raw_write_lock(rwlock_t *lock)
  205. {
  206. debug_write_lock_before(lock);
  207. if (unlikely(!__raw_write_trylock(&lock->raw_lock)))
  208. __write_lock_debug(lock);
  209. debug_write_lock_after(lock);
  210. }
  211. int _raw_write_trylock(rwlock_t *lock)
  212. {
  213. int ret = __raw_write_trylock(&lock->raw_lock);
  214. if (ret)
  215. debug_write_lock_after(lock);
  216. #ifndef CONFIG_SMP
  217. /*
  218. * Must not happen on UP:
  219. */
  220. RWLOCK_BUG_ON(!ret, lock, "trylock failure on UP");
  221. #endif
  222. return ret;
  223. }
  224. void _raw_write_unlock(rwlock_t *lock)
  225. {
  226. debug_write_unlock(lock);
  227. __raw_write_unlock(&lock->raw_lock);
  228. }