spinlock.h 4.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212
  1. #ifndef _ALPHA_SPINLOCK_H
  2. #define _ALPHA_SPINLOCK_H
  3. #include <linux/config.h>
  4. #include <asm/system.h>
  5. #include <linux/kernel.h>
  6. #include <asm/current.h>
  7. /*
  8. * Simple spin lock operations. There are two variants, one clears IRQ's
  9. * on the local processor, one does not.
  10. *
  11. * We make no fairness assumptions. They have a cost.
  12. */
  13. typedef struct {
  14. volatile unsigned int lock;
  15. #ifdef CONFIG_DEBUG_SPINLOCK
  16. int on_cpu;
  17. int line_no;
  18. void *previous;
  19. struct task_struct * task;
  20. const char *base_file;
  21. #endif
  22. } spinlock_t;
  23. #ifdef CONFIG_DEBUG_SPINLOCK
  24. #define SPIN_LOCK_UNLOCKED (spinlock_t){ 0, -1, 0, NULL, NULL, NULL }
  25. #else
  26. #define SPIN_LOCK_UNLOCKED (spinlock_t){ 0 }
  27. #endif
  28. #define spin_lock_init(x) do { *(x) = SPIN_LOCK_UNLOCKED; } while(0)
  29. #define spin_is_locked(x) ((x)->lock != 0)
  30. #define spin_unlock_wait(x) do { barrier(); } while ((x)->lock)
  31. #ifdef CONFIG_DEBUG_SPINLOCK
  32. extern void _raw_spin_unlock(spinlock_t * lock);
  33. extern void debug_spin_lock(spinlock_t * lock, const char *, int);
  34. extern int debug_spin_trylock(spinlock_t * lock, const char *, int);
  35. #define _raw_spin_lock(LOCK) \
  36. debug_spin_lock(LOCK, __BASE_FILE__, __LINE__)
  37. #define _raw_spin_trylock(LOCK) \
  38. debug_spin_trylock(LOCK, __BASE_FILE__, __LINE__)
  39. #else
  40. static inline void _raw_spin_unlock(spinlock_t * lock)
  41. {
  42. mb();
  43. lock->lock = 0;
  44. }
  45. static inline void _raw_spin_lock(spinlock_t * lock)
  46. {
  47. long tmp;
  48. __asm__ __volatile__(
  49. "1: ldl_l %0,%1\n"
  50. " bne %0,2f\n"
  51. " lda %0,1\n"
  52. " stl_c %0,%1\n"
  53. " beq %0,2f\n"
  54. " mb\n"
  55. ".subsection 2\n"
  56. "2: ldl %0,%1\n"
  57. " bne %0,2b\n"
  58. " br 1b\n"
  59. ".previous"
  60. : "=&r" (tmp), "=m" (lock->lock)
  61. : "m"(lock->lock) : "memory");
  62. }
  63. static inline int _raw_spin_trylock(spinlock_t *lock)
  64. {
  65. return !test_and_set_bit(0, &lock->lock);
  66. }
  67. #endif /* CONFIG_DEBUG_SPINLOCK */
  68. #define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock)
  69. /***********************************************************/
  70. typedef struct {
  71. volatile unsigned int lock;
  72. } rwlock_t;
  73. #define RW_LOCK_UNLOCKED (rwlock_t){ 0 }
  74. #define rwlock_init(x) do { *(x) = RW_LOCK_UNLOCKED; } while(0)
  75. static inline int read_can_lock(rwlock_t *lock)
  76. {
  77. return (lock->lock & 1) == 0;
  78. }
  79. static inline int write_can_lock(rwlock_t *lock)
  80. {
  81. return lock->lock == 0;
  82. }
  83. #ifdef CONFIG_DEBUG_RWLOCK
  84. extern void _raw_write_lock(rwlock_t * lock);
  85. extern void _raw_read_lock(rwlock_t * lock);
  86. #else
  87. static inline void _raw_write_lock(rwlock_t * lock)
  88. {
  89. long regx;
  90. __asm__ __volatile__(
  91. "1: ldl_l %1,%0\n"
  92. " bne %1,6f\n"
  93. " lda %1,1\n"
  94. " stl_c %1,%0\n"
  95. " beq %1,6f\n"
  96. " mb\n"
  97. ".subsection 2\n"
  98. "6: ldl %1,%0\n"
  99. " bne %1,6b\n"
  100. " br 1b\n"
  101. ".previous"
  102. : "=m" (*lock), "=&r" (regx)
  103. : "m" (*lock) : "memory");
  104. }
  105. static inline void _raw_read_lock(rwlock_t * lock)
  106. {
  107. long regx;
  108. __asm__ __volatile__(
  109. "1: ldl_l %1,%0\n"
  110. " blbs %1,6f\n"
  111. " subl %1,2,%1\n"
  112. " stl_c %1,%0\n"
  113. " beq %1,6f\n"
  114. " mb\n"
  115. ".subsection 2\n"
  116. "6: ldl %1,%0\n"
  117. " blbs %1,6b\n"
  118. " br 1b\n"
  119. ".previous"
  120. : "=m" (*lock), "=&r" (regx)
  121. : "m" (*lock) : "memory");
  122. }
  123. #endif /* CONFIG_DEBUG_RWLOCK */
  124. static inline int _raw_read_trylock(rwlock_t * lock)
  125. {
  126. long regx;
  127. int success;
  128. __asm__ __volatile__(
  129. "1: ldl_l %1,%0\n"
  130. " lda %2,0\n"
  131. " blbs %1,2f\n"
  132. " subl %1,2,%2\n"
  133. " stl_c %2,%0\n"
  134. " beq %2,6f\n"
  135. "2: mb\n"
  136. ".subsection 2\n"
  137. "6: br 1b\n"
  138. ".previous"
  139. : "=m" (*lock), "=&r" (regx), "=&r" (success)
  140. : "m" (*lock) : "memory");
  141. return success;
  142. }
  143. static inline int _raw_write_trylock(rwlock_t * lock)
  144. {
  145. long regx;
  146. int success;
  147. __asm__ __volatile__(
  148. "1: ldl_l %1,%0\n"
  149. " lda %2,0\n"
  150. " bne %1,2f\n"
  151. " lda %2,1\n"
  152. " stl_c %2,%0\n"
  153. " beq %2,6f\n"
  154. "2: mb\n"
  155. ".subsection 2\n"
  156. "6: br 1b\n"
  157. ".previous"
  158. : "=m" (*lock), "=&r" (regx), "=&r" (success)
  159. : "m" (*lock) : "memory");
  160. return success;
  161. }
  162. static inline void _raw_write_unlock(rwlock_t * lock)
  163. {
  164. mb();
  165. lock->lock = 0;
  166. }
  167. static inline void _raw_read_unlock(rwlock_t * lock)
  168. {
  169. long regx;
  170. __asm__ __volatile__(
  171. " mb\n"
  172. "1: ldl_l %1,%0\n"
  173. " addl %1,2,%1\n"
  174. " stl_c %1,%0\n"
  175. " beq %1,6f\n"
  176. ".subsection 2\n"
  177. "6: br 1b\n"
  178. ".previous"
  179. : "=m" (*lock), "=&r" (regx)
  180. : "m" (*lock) : "memory");
  181. }
  182. #endif /* _ALPHA_SPINLOCK_H */