spinlock_debug.c 5.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252
  1. /*
  2. * Copyright 2005, Red Hat, Inc., Ingo Molnar
  3. * Released under the General Public License (GPL).
  4. *
  5. * This file contains the spinlock/rwlock implementations for
  6. * DEBUG_SPINLOCK.
  7. */
  8. #include <linux/spinlock.h>
  9. #include <linux/interrupt.h>
  10. #include <linux/debug_locks.h>
  11. #include <linux/delay.h>
  12. #include <linux/module.h>
  13. static void spin_bug(spinlock_t *lock, const char *msg)
  14. {
  15. struct task_struct *owner = NULL;
  16. if (!debug_locks_off())
  17. return;
  18. if (lock->owner && lock->owner != SPINLOCK_OWNER_INIT)
  19. owner = lock->owner;
  20. printk(KERN_EMERG "BUG: spinlock %s on CPU#%d, %s/%d\n",
  21. msg, raw_smp_processor_id(),
  22. current->comm, current->pid);
  23. printk(KERN_EMERG " lock: %p, .magic: %08x, .owner: %s/%d, "
  24. ".owner_cpu: %d\n",
  25. lock, lock->magic,
  26. owner ? owner->comm : "<none>",
  27. owner ? owner->pid : -1,
  28. lock->owner_cpu);
  29. dump_stack();
  30. }
  31. #define SPIN_BUG_ON(cond, lock, msg) if (unlikely(cond)) spin_bug(lock, msg)
  32. static inline void
  33. debug_spin_lock_before(spinlock_t *lock)
  34. {
  35. SPIN_BUG_ON(lock->magic != SPINLOCK_MAGIC, lock, "bad magic");
  36. SPIN_BUG_ON(lock->owner == current, lock, "recursion");
  37. SPIN_BUG_ON(lock->owner_cpu == raw_smp_processor_id(),
  38. lock, "cpu recursion");
  39. }
  40. static inline void debug_spin_lock_after(spinlock_t *lock)
  41. {
  42. lock->owner_cpu = raw_smp_processor_id();
  43. lock->owner = current;
  44. }
  45. static inline void debug_spin_unlock(spinlock_t *lock)
  46. {
  47. SPIN_BUG_ON(lock->magic != SPINLOCK_MAGIC, lock, "bad magic");
  48. SPIN_BUG_ON(!spin_is_locked(lock), lock, "already unlocked");
  49. SPIN_BUG_ON(lock->owner != current, lock, "wrong owner");
  50. SPIN_BUG_ON(lock->owner_cpu != raw_smp_processor_id(),
  51. lock, "wrong CPU");
  52. lock->owner = SPINLOCK_OWNER_INIT;
  53. lock->owner_cpu = -1;
  54. }
  55. static void __spin_lock_debug(spinlock_t *lock)
  56. {
  57. int print_once = 1;
  58. u64 i;
  59. for (;;) {
  60. for (i = 0; i < loops_per_jiffy * HZ; i++) {
  61. if (__raw_spin_trylock(&lock->raw_lock))
  62. return;
  63. __delay(1);
  64. }
  65. /* lockup suspected: */
  66. if (print_once) {
  67. print_once = 0;
  68. printk(KERN_EMERG "BUG: spinlock lockup on CPU#%d, "
  69. "%s/%d, %p\n",
  70. raw_smp_processor_id(), current->comm,
  71. current->pid, lock);
  72. dump_stack();
  73. }
  74. }
  75. }
  76. void _raw_spin_lock(spinlock_t *lock)
  77. {
  78. debug_spin_lock_before(lock);
  79. if (unlikely(!__raw_spin_trylock(&lock->raw_lock)))
  80. __spin_lock_debug(lock);
  81. debug_spin_lock_after(lock);
  82. }
  83. int _raw_spin_trylock(spinlock_t *lock)
  84. {
  85. int ret = __raw_spin_trylock(&lock->raw_lock);
  86. if (ret)
  87. debug_spin_lock_after(lock);
  88. #ifndef CONFIG_SMP
  89. /*
  90. * Must not happen on UP:
  91. */
  92. SPIN_BUG_ON(!ret, lock, "trylock failure on UP");
  93. #endif
  94. return ret;
  95. }
  96. void _raw_spin_unlock(spinlock_t *lock)
  97. {
  98. debug_spin_unlock(lock);
  99. __raw_spin_unlock(&lock->raw_lock);
  100. }
  101. static void rwlock_bug(rwlock_t *lock, const char *msg)
  102. {
  103. if (!debug_locks_off())
  104. return;
  105. printk(KERN_EMERG "BUG: rwlock %s on CPU#%d, %s/%d, %p\n",
  106. msg, raw_smp_processor_id(), current->comm,
  107. current->pid, lock);
  108. dump_stack();
  109. }
  110. #define RWLOCK_BUG_ON(cond, lock, msg) if (unlikely(cond)) rwlock_bug(lock, msg)
  111. static void __read_lock_debug(rwlock_t *lock)
  112. {
  113. int print_once = 1;
  114. u64 i;
  115. for (;;) {
  116. for (i = 0; i < loops_per_jiffy * HZ; i++) {
  117. if (__raw_read_trylock(&lock->raw_lock))
  118. return;
  119. __delay(1);
  120. }
  121. /* lockup suspected: */
  122. if (print_once) {
  123. print_once = 0;
  124. printk(KERN_EMERG "BUG: read-lock lockup on CPU#%d, "
  125. "%s/%d, %p\n",
  126. raw_smp_processor_id(), current->comm,
  127. current->pid, lock);
  128. dump_stack();
  129. }
  130. }
  131. }
  132. void _raw_read_lock(rwlock_t *lock)
  133. {
  134. RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
  135. if (unlikely(!__raw_read_trylock(&lock->raw_lock)))
  136. __read_lock_debug(lock);
  137. }
  138. int _raw_read_trylock(rwlock_t *lock)
  139. {
  140. int ret = __raw_read_trylock(&lock->raw_lock);
  141. #ifndef CONFIG_SMP
  142. /*
  143. * Must not happen on UP:
  144. */
  145. RWLOCK_BUG_ON(!ret, lock, "trylock failure on UP");
  146. #endif
  147. return ret;
  148. }
  149. void _raw_read_unlock(rwlock_t *lock)
  150. {
  151. RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
  152. __raw_read_unlock(&lock->raw_lock);
  153. }
  154. static inline void debug_write_lock_before(rwlock_t *lock)
  155. {
  156. RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
  157. RWLOCK_BUG_ON(lock->owner == current, lock, "recursion");
  158. RWLOCK_BUG_ON(lock->owner_cpu == raw_smp_processor_id(),
  159. lock, "cpu recursion");
  160. }
  161. static inline void debug_write_lock_after(rwlock_t *lock)
  162. {
  163. lock->owner_cpu = raw_smp_processor_id();
  164. lock->owner = current;
  165. }
  166. static inline void debug_write_unlock(rwlock_t *lock)
  167. {
  168. RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
  169. RWLOCK_BUG_ON(lock->owner != current, lock, "wrong owner");
  170. RWLOCK_BUG_ON(lock->owner_cpu != raw_smp_processor_id(),
  171. lock, "wrong CPU");
  172. lock->owner = SPINLOCK_OWNER_INIT;
  173. lock->owner_cpu = -1;
  174. }
  175. static void __write_lock_debug(rwlock_t *lock)
  176. {
  177. int print_once = 1;
  178. u64 i;
  179. for (;;) {
  180. for (i = 0; i < loops_per_jiffy * HZ; i++) {
  181. if (__raw_write_trylock(&lock->raw_lock))
  182. return;
  183. __delay(1);
  184. }
  185. /* lockup suspected: */
  186. if (print_once) {
  187. print_once = 0;
  188. printk(KERN_EMERG "BUG: write-lock lockup on CPU#%d, "
  189. "%s/%d, %p\n",
  190. raw_smp_processor_id(), current->comm,
  191. current->pid, lock);
  192. dump_stack();
  193. }
  194. }
  195. }
  196. void _raw_write_lock(rwlock_t *lock)
  197. {
  198. debug_write_lock_before(lock);
  199. if (unlikely(!__raw_write_trylock(&lock->raw_lock)))
  200. __write_lock_debug(lock);
  201. debug_write_lock_after(lock);
  202. }
  203. int _raw_write_trylock(rwlock_t *lock)
  204. {
  205. int ret = __raw_write_trylock(&lock->raw_lock);
  206. if (ret)
  207. debug_write_lock_after(lock);
  208. #ifndef CONFIG_SMP
  209. /*
  210. * Must not happen on UP:
  211. */
  212. RWLOCK_BUG_ON(!ret, lock, "trylock failure on UP");
  213. #endif
  214. return ret;
  215. }
  216. void _raw_write_unlock(rwlock_t *lock)
  217. {
  218. debug_write_unlock(lock);
  219. __raw_write_unlock(&lock->raw_lock);
  220. }