spinlock.h 4.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215
  1. #ifndef __ASM_SPINLOCK_H
  2. #define __ASM_SPINLOCK_H
  3. #include <asm/system.h>
  4. /*
  5. * Simple spin lock operations.
  6. */
  7. typedef struct {
  8. volatile unsigned long lock;
  9. #ifdef CONFIG_DEBUG_SPINLOCK
  10. volatile unsigned long owner_pc;
  11. volatile unsigned long owner_cpu;
  12. #endif
  13. #ifdef CONFIG_PREEMPT
  14. unsigned int break_lock;
  15. #endif
  16. } spinlock_t;
  17. #ifdef __KERNEL__
  18. #ifdef CONFIG_DEBUG_SPINLOCK
  19. #define SPINLOCK_DEBUG_INIT , 0, 0
  20. #else
  21. #define SPINLOCK_DEBUG_INIT /* */
  22. #endif
  23. #define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 SPINLOCK_DEBUG_INIT }
  24. #define spin_lock_init(x) do { *(x) = SPIN_LOCK_UNLOCKED; } while(0)
  25. #define spin_is_locked(x) ((x)->lock != 0)
  26. #define spin_unlock_wait(x) do { barrier(); } while(spin_is_locked(x))
  27. #define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock)
  28. #ifndef CONFIG_DEBUG_SPINLOCK
  29. static inline void _raw_spin_lock(spinlock_t *lock)
  30. {
  31. unsigned long tmp;
  32. __asm__ __volatile__(
  33. "b 1f # spin_lock\n\
  34. 2: lwzx %0,0,%1\n\
  35. cmpwi 0,%0,0\n\
  36. bne+ 2b\n\
  37. 1: lwarx %0,0,%1\n\
  38. cmpwi 0,%0,0\n\
  39. bne- 2b\n"
  40. PPC405_ERR77(0,%1)
  41. " stwcx. %2,0,%1\n\
  42. bne- 2b\n\
  43. isync"
  44. : "=&r"(tmp)
  45. : "r"(&lock->lock), "r"(1)
  46. : "cr0", "memory");
  47. }
  48. static inline void _raw_spin_unlock(spinlock_t *lock)
  49. {
  50. __asm__ __volatile__("eieio # spin_unlock": : :"memory");
  51. lock->lock = 0;
  52. }
  53. #define _raw_spin_trylock(l) (!test_and_set_bit(0,&(l)->lock))
  54. #else
  55. extern void _raw_spin_lock(spinlock_t *lock);
  56. extern void _raw_spin_unlock(spinlock_t *lock);
  57. extern int _raw_spin_trylock(spinlock_t *lock);
  58. #endif
  59. /*
  60. * Read-write spinlocks, allowing multiple readers
  61. * but only one writer.
  62. *
  63. * NOTE! it is quite common to have readers in interrupts
  64. * but no interrupt writers. For those circumstances we
  65. * can "mix" irq-safe locks - any writer needs to get a
  66. * irq-safe write-lock, but readers can get non-irqsafe
  67. * read-locks.
  68. */
  69. typedef struct {
  70. volatile signed int lock;
  71. #ifdef CONFIG_PREEMPT
  72. unsigned int break_lock;
  73. #endif
  74. } rwlock_t;
  75. #define RW_LOCK_UNLOCKED (rwlock_t) { 0 }
  76. #define rwlock_init(lp) do { *(lp) = RW_LOCK_UNLOCKED; } while(0)
  77. #define read_can_lock(rw) ((rw)->lock >= 0)
  78. #define write_can_lock(rw) (!(rw)->lock)
  79. #ifndef CONFIG_DEBUG_SPINLOCK
  80. static __inline__ int _raw_read_trylock(rwlock_t *rw)
  81. {
  82. signed int tmp;
  83. __asm__ __volatile__(
  84. "2: lwarx %0,0,%1 # read_trylock\n\
  85. addic. %0,%0,1\n\
  86. ble- 1f\n"
  87. PPC405_ERR77(0,%1)
  88. " stwcx. %0,0,%1\n\
  89. bne- 2b\n\
  90. isync\n\
  91. 1:"
  92. : "=&r"(tmp)
  93. : "r"(&rw->lock)
  94. : "cr0", "memory");
  95. return tmp > 0;
  96. }
  97. static __inline__ void _raw_read_lock(rwlock_t *rw)
  98. {
  99. signed int tmp;
  100. __asm__ __volatile__(
  101. "b 2f # read_lock\n\
  102. 1: lwzx %0,0,%1\n\
  103. cmpwi 0,%0,0\n\
  104. blt+ 1b\n\
  105. 2: lwarx %0,0,%1\n\
  106. addic. %0,%0,1\n\
  107. ble- 1b\n"
  108. PPC405_ERR77(0,%1)
  109. " stwcx. %0,0,%1\n\
  110. bne- 2b\n\
  111. isync"
  112. : "=&r"(tmp)
  113. : "r"(&rw->lock)
  114. : "cr0", "memory");
  115. }
  116. static __inline__ void _raw_read_unlock(rwlock_t *rw)
  117. {
  118. signed int tmp;
  119. __asm__ __volatile__(
  120. "eieio # read_unlock\n\
  121. 1: lwarx %0,0,%1\n\
  122. addic %0,%0,-1\n"
  123. PPC405_ERR77(0,%1)
  124. " stwcx. %0,0,%1\n\
  125. bne- 1b"
  126. : "=&r"(tmp)
  127. : "r"(&rw->lock)
  128. : "cr0", "memory");
  129. }
  130. static __inline__ int _raw_write_trylock(rwlock_t *rw)
  131. {
  132. signed int tmp;
  133. __asm__ __volatile__(
  134. "2: lwarx %0,0,%1 # write_trylock\n\
  135. cmpwi 0,%0,0\n\
  136. bne- 1f\n"
  137. PPC405_ERR77(0,%1)
  138. " stwcx. %2,0,%1\n\
  139. bne- 2b\n\
  140. isync\n\
  141. 1:"
  142. : "=&r"(tmp)
  143. : "r"(&rw->lock), "r"(-1)
  144. : "cr0", "memory");
  145. return tmp == 0;
  146. }
  147. static __inline__ void _raw_write_lock(rwlock_t *rw)
  148. {
  149. signed int tmp;
  150. __asm__ __volatile__(
  151. "b 2f # write_lock\n\
  152. 1: lwzx %0,0,%1\n\
  153. cmpwi 0,%0,0\n\
  154. bne+ 1b\n\
  155. 2: lwarx %0,0,%1\n\
  156. cmpwi 0,%0,0\n\
  157. bne- 1b\n"
  158. PPC405_ERR77(0,%1)
  159. " stwcx. %2,0,%1\n\
  160. bne- 2b\n\
  161. isync"
  162. : "=&r"(tmp)
  163. : "r"(&rw->lock), "r"(-1)
  164. : "cr0", "memory");
  165. }
  166. static __inline__ void _raw_write_unlock(rwlock_t *rw)
  167. {
  168. __asm__ __volatile__("eieio # write_unlock": : :"memory");
  169. rw->lock = 0;
  170. }
  171. #else
  172. extern void _raw_read_lock(rwlock_t *rw);
  173. extern void _raw_read_unlock(rwlock_t *rw);
  174. extern void _raw_write_lock(rwlock_t *rw);
  175. extern void _raw_write_unlock(rwlock_t *rw);
  176. extern int _raw_read_trylock(rwlock_t *rw);
  177. extern int _raw_write_trylock(rwlock_t *rw);
  178. #endif
  179. #endif /* __ASM_SPINLOCK_H */
  180. #endif /* __KERNEL__ */