spinlock.h 6.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258
  1. #ifndef __ASM_SPINLOCK_H
  2. #define __ASM_SPINLOCK_H
  3. #include <asm/system.h>
  4. /* Note that PA-RISC has to use `1' to mean unlocked and `0' to mean locked
  5. * since it only has load-and-zero. Moreover, at least on some PA processors,
  6. * the semaphore address has to be 16-byte aligned.
  7. */
  8. #ifndef CONFIG_DEBUG_SPINLOCK
  9. #define __SPIN_LOCK_UNLOCKED { { 1, 1, 1, 1 } }
  10. #undef SPIN_LOCK_UNLOCKED
  11. #define SPIN_LOCK_UNLOCKED (spinlock_t) __SPIN_LOCK_UNLOCKED
  12. #define spin_lock_init(x) do { *(x) = SPIN_LOCK_UNLOCKED; } while(0)
  13. static inline int spin_is_locked(spinlock_t *x)
  14. {
  15. volatile unsigned int *a = __ldcw_align(x);
  16. return *a == 0;
  17. }
  18. #define spin_unlock_wait(x) do { barrier(); } while(spin_is_locked(x))
  19. #define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock)
  20. static inline void _raw_spin_lock(spinlock_t *x)
  21. {
  22. volatile unsigned int *a;
  23. mb();
  24. a = __ldcw_align(x);
  25. while (__ldcw(a) == 0)
  26. while (*a == 0);
  27. mb();
  28. }
  29. static inline void _raw_spin_unlock(spinlock_t *x)
  30. {
  31. volatile unsigned int *a;
  32. mb();
  33. a = __ldcw_align(x);
  34. *a = 1;
  35. mb();
  36. }
  37. static inline int _raw_spin_trylock(spinlock_t *x)
  38. {
  39. volatile unsigned int *a;
  40. int ret;
  41. mb();
  42. a = __ldcw_align(x);
  43. ret = __ldcw(a) != 0;
  44. mb();
  45. return ret;
  46. }
  47. #define spin_lock_own(LOCK, LOCATION) ((void)0)
  48. #else /* !(CONFIG_DEBUG_SPINLOCK) */
  49. #define SPINLOCK_MAGIC 0x1D244B3C
  50. #define __SPIN_LOCK_UNLOCKED { { 1, 1, 1, 1 }, SPINLOCK_MAGIC, 10, __FILE__ , NULL, 0, -1, NULL, NULL }
  51. #undef SPIN_LOCK_UNLOCKED
  52. #define SPIN_LOCK_UNLOCKED (spinlock_t) __SPIN_LOCK_UNLOCKED
  53. #define spin_lock_init(x) do { *(x) = SPIN_LOCK_UNLOCKED; } while(0)
  54. #define CHECK_LOCK(x) \
  55. do { \
  56. if (unlikely((x)->magic != SPINLOCK_MAGIC)) { \
  57. printk(KERN_ERR "%s:%d: spin_is_locked" \
  58. " on uninitialized spinlock %p.\n", \
  59. __FILE__, __LINE__, (x)); \
  60. } \
  61. } while(0)
  62. #define spin_is_locked(x) \
  63. ({ \
  64. CHECK_LOCK(x); \
  65. volatile unsigned int *a = __ldcw_align(x); \
  66. if (unlikely((*a == 0) && (x)->babble)) { \
  67. (x)->babble--; \
  68. printk("KERN_WARNING \
  69. %s:%d: spin_is_locked(%s/%p) already" \
  70. " locked by %s:%d in %s at %p(%d)\n", \
  71. __FILE__,__LINE__, (x)->module, (x), \
  72. (x)->bfile, (x)->bline, (x)->task->comm,\
  73. (x)->previous, (x)->oncpu); \
  74. } \
  75. *a == 0; \
  76. })
  77. #define spin_unlock_wait(x) \
  78. do { \
  79. CHECK_LOCK(x); \
  80. volatile unsigned int *a = __ldcw_align(x); \
  81. if (unlikely((*a == 0) && (x)->babble)) { \
  82. (x)->babble--; \
  83. printk("KERN_WARNING \
  84. %s:%d: spin_unlock_wait(%s/%p)" \
  85. " owned by %s:%d in %s at %p(%d)\n", \
  86. __FILE__,__LINE__, (x)->module, (x), \
  87. (x)->bfile, (x)->bline, (x)->task->comm,\
  88. (x)->previous, (x)->oncpu); \
  89. } \
  90. barrier(); \
  91. } while (*((volatile unsigned char *)(__ldcw_align(x))) == 0)
  92. extern void _dbg_spin_lock(spinlock_t *lock, const char *base_file, int line_no);
  93. extern void _dbg_spin_unlock(spinlock_t *lock, const char *, int);
  94. extern int _dbg_spin_trylock(spinlock_t * lock, const char *, int);
  95. #define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock)
  96. #define _raw_spin_unlock(lock) _dbg_spin_unlock(lock, __FILE__, __LINE__)
  97. #define _raw_spin_lock(lock) _dbg_spin_lock(lock, __FILE__, __LINE__)
  98. #define _raw_spin_trylock(lock) _dbg_spin_trylock(lock, __FILE__, __LINE__)
  99. /* just in case we need it */
  100. #define spin_lock_own(LOCK, LOCATION) \
  101. do { \
  102. volatile unsigned int *a = __ldcw_align(LOCK); \
  103. if (!((*a == 0) && ((LOCK)->oncpu == smp_processor_id()))) \
  104. printk("KERN_WARNING \
  105. %s: called on %d from %p but lock %s on %d\n", \
  106. LOCATION, smp_processor_id(), \
  107. __builtin_return_address(0), \
  108. (*a == 0) ? "taken" : "freed", (LOCK)->on_cpu); \
  109. } while (0)
  110. #endif /* !(CONFIG_DEBUG_SPINLOCK) */
  111. /*
  112. * Read-write spinlocks, allowing multiple readers
  113. * but only one writer.
  114. */
  115. typedef struct {
  116. spinlock_t lock;
  117. volatile int counter;
  118. #ifdef CONFIG_PREEMPT
  119. unsigned int break_lock;
  120. #endif
  121. } rwlock_t;
  122. #define RW_LOCK_UNLOCKED (rwlock_t) { __SPIN_LOCK_UNLOCKED, 0 }
  123. #define rwlock_init(lp) do { *(lp) = RW_LOCK_UNLOCKED; } while (0)
  124. #define _raw_read_trylock(lock) generic_raw_read_trylock(lock)
  125. /* read_lock, read_unlock are pretty straightforward. Of course it somehow
  126. * sucks we end up saving/restoring flags twice for read_lock_irqsave aso. */
  127. #ifdef CONFIG_DEBUG_RWLOCK
  128. extern void _dbg_read_lock(rwlock_t * rw, const char *bfile, int bline);
  129. #define _raw_read_lock(rw) _dbg_read_lock(rw, __FILE__, __LINE__)
  130. #else
  131. static __inline__ void _raw_read_lock(rwlock_t *rw)
  132. {
  133. unsigned long flags;
  134. local_irq_save(flags);
  135. _raw_spin_lock(&rw->lock);
  136. rw->counter++;
  137. _raw_spin_unlock(&rw->lock);
  138. local_irq_restore(flags);
  139. }
  140. #endif /* CONFIG_DEBUG_RWLOCK */
  141. static __inline__ void _raw_read_unlock(rwlock_t *rw)
  142. {
  143. unsigned long flags;
  144. local_irq_save(flags);
  145. _raw_spin_lock(&rw->lock);
  146. rw->counter--;
  147. _raw_spin_unlock(&rw->lock);
  148. local_irq_restore(flags);
  149. }
  150. /* write_lock is less trivial. We optimistically grab the lock and check
  151. * if we surprised any readers. If so we release the lock and wait till
  152. * they're all gone before trying again
  153. *
  154. * Also note that we don't use the _irqsave / _irqrestore suffixes here.
  155. * If we're called with interrupts enabled and we've got readers (or other
  156. * writers) in interrupt handlers someone fucked up and we'd dead-lock
  157. * sooner or later anyway. prumpf */
  158. #ifdef CONFIG_DEBUG_RWLOCK
  159. extern void _dbg_write_lock(rwlock_t * rw, const char *bfile, int bline);
  160. #define _raw_write_lock(rw) _dbg_write_lock(rw, __FILE__, __LINE__)
  161. #else
  162. static __inline__ void _raw_write_lock(rwlock_t *rw)
  163. {
  164. retry:
  165. _raw_spin_lock(&rw->lock);
  166. if(rw->counter != 0) {
  167. /* this basically never happens */
  168. _raw_spin_unlock(&rw->lock);
  169. while(rw->counter != 0);
  170. goto retry;
  171. }
  172. /* got it. now leave without unlocking */
  173. rw->counter = -1; /* remember we are locked */
  174. }
  175. #endif /* CONFIG_DEBUG_RWLOCK */
  176. /* write_unlock is absolutely trivial - we don't have to wait for anything */
  177. static __inline__ void _raw_write_unlock(rwlock_t *rw)
  178. {
  179. rw->counter = 0;
  180. _raw_spin_unlock(&rw->lock);
  181. }
  182. #ifdef CONFIG_DEBUG_RWLOCK
  183. extern int _dbg_write_trylock(rwlock_t * rw, const char *bfile, int bline);
  184. #define _raw_write_trylock(rw) _dbg_write_trylock(rw, __FILE__, __LINE__)
  185. #else
  186. static __inline__ int _raw_write_trylock(rwlock_t *rw)
  187. {
  188. _raw_spin_lock(&rw->lock);
  189. if (rw->counter != 0) {
  190. /* this basically never happens */
  191. _raw_spin_unlock(&rw->lock);
  192. return 0;
  193. }
  194. /* got it. now leave without unlocking */
  195. rw->counter = -1; /* remember we are locked */
  196. return 1;
  197. }
  198. #endif /* CONFIG_DEBUG_RWLOCK */
  199. static __inline__ int is_read_locked(rwlock_t *rw)
  200. {
  201. return rw->counter > 0;
  202. }
  203. static __inline__ int is_write_locked(rwlock_t *rw)
  204. {
  205. return rw->counter < 0;
  206. }
  207. #endif /* __ASM_SPINLOCK_H */