spinlock.h 6.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223
  1. #ifndef _ASM_IA64_SPINLOCK_H
  2. #define _ASM_IA64_SPINLOCK_H
  3. /*
  4. * Copyright (C) 1998-2003 Hewlett-Packard Co
  5. * David Mosberger-Tang <davidm@hpl.hp.com>
  6. * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
  7. *
  8. * This file is used for SMP configurations only.
  9. */
  10. #include <linux/compiler.h>
  11. #include <linux/kernel.h>
  12. #include <asm/atomic.h>
  13. #include <asm/bitops.h>
  14. #include <asm/intrinsics.h>
  15. #include <asm/system.h>
  16. typedef struct {
  17. volatile unsigned int lock;
  18. #ifdef CONFIG_PREEMPT
  19. unsigned int break_lock;
  20. #endif
  21. } spinlock_t;
  22. #define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 }
  23. #define spin_lock_init(x) ((x)->lock = 0)
  24. #ifdef ASM_SUPPORTED
  25. /*
  26. * Try to get the lock. If we fail to get the lock, make a non-standard call to
  27. * ia64_spinlock_contention(). We do not use a normal call because that would force all
  28. * callers of spin_lock() to be non-leaf routines. Instead, ia64_spinlock_contention() is
  29. * carefully coded to touch only those registers that spin_lock() marks "clobbered".
  30. */
  31. #define IA64_SPINLOCK_CLOBBERS "ar.ccv", "ar.pfs", "p14", "p15", "r27", "r28", "r29", "r30", "b6", "memory"
  32. static inline void
  33. _raw_spin_lock_flags (spinlock_t *lock, unsigned long flags)
  34. {
  35. register volatile unsigned int *ptr asm ("r31") = &lock->lock;
  36. #if __GNUC__ < 3 || (__GNUC__ == 3 && __GNUC_MINOR__ < 3)
  37. # ifdef CONFIG_ITANIUM
  38. /* don't use brl on Itanium... */
  39. asm volatile ("{\n\t"
  40. " mov ar.ccv = r0\n\t"
  41. " mov r28 = ip\n\t"
  42. " mov r30 = 1;;\n\t"
  43. "}\n\t"
  44. "cmpxchg4.acq r30 = [%1], r30, ar.ccv\n\t"
  45. "movl r29 = ia64_spinlock_contention_pre3_4;;\n\t"
  46. "cmp4.ne p14, p0 = r30, r0\n\t"
  47. "mov b6 = r29;;\n\t"
  48. "mov r27=%2\n\t"
  49. "(p14) br.cond.spnt.many b6"
  50. : "=r"(ptr) : "r"(ptr), "r" (flags) : IA64_SPINLOCK_CLOBBERS);
  51. # else
  52. asm volatile ("{\n\t"
  53. " mov ar.ccv = r0\n\t"
  54. " mov r28 = ip\n\t"
  55. " mov r30 = 1;;\n\t"
  56. "}\n\t"
  57. "cmpxchg4.acq r30 = [%1], r30, ar.ccv;;\n\t"
  58. "cmp4.ne p14, p0 = r30, r0\n\t"
  59. "mov r27=%2\n\t"
  60. "(p14) brl.cond.spnt.many ia64_spinlock_contention_pre3_4;;"
  61. : "=r"(ptr) : "r"(ptr), "r" (flags) : IA64_SPINLOCK_CLOBBERS);
  62. # endif /* CONFIG_MCKINLEY */
  63. #else
  64. # ifdef CONFIG_ITANIUM
  65. /* don't use brl on Itanium... */
  66. /* mis-declare, so we get the entry-point, not it's function descriptor: */
  67. asm volatile ("mov r30 = 1\n\t"
  68. "mov r27=%2\n\t"
  69. "mov ar.ccv = r0;;\n\t"
  70. "cmpxchg4.acq r30 = [%0], r30, ar.ccv\n\t"
  71. "movl r29 = ia64_spinlock_contention;;\n\t"
  72. "cmp4.ne p14, p0 = r30, r0\n\t"
  73. "mov b6 = r29;;\n\t"
  74. "(p14) br.call.spnt.many b6 = b6"
  75. : "=r"(ptr) : "r"(ptr), "r" (flags) : IA64_SPINLOCK_CLOBBERS);
  76. # else
  77. asm volatile ("mov r30 = 1\n\t"
  78. "mov r27=%2\n\t"
  79. "mov ar.ccv = r0;;\n\t"
  80. "cmpxchg4.acq r30 = [%0], r30, ar.ccv;;\n\t"
  81. "cmp4.ne p14, p0 = r30, r0\n\t"
  82. "(p14) brl.call.spnt.many b6=ia64_spinlock_contention;;"
  83. : "=r"(ptr) : "r"(ptr), "r" (flags) : IA64_SPINLOCK_CLOBBERS);
  84. # endif /* CONFIG_MCKINLEY */
  85. #endif
  86. }
  87. #define _raw_spin_lock(lock) _raw_spin_lock_flags(lock, 0)
  88. /* Unlock by doing an ordered store and releasing the cacheline with nta */
  89. static inline void _raw_spin_unlock(spinlock_t *x) {
  90. barrier();
  91. asm volatile ("st4.rel.nta [%0] = r0\n\t" :: "r"(x));
  92. }
  93. #else /* !ASM_SUPPORTED */
  94. #define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock)
  95. # define _raw_spin_lock(x) \
  96. do { \
  97. __u32 *ia64_spinlock_ptr = (__u32 *) (x); \
  98. __u64 ia64_spinlock_val; \
  99. ia64_spinlock_val = ia64_cmpxchg4_acq(ia64_spinlock_ptr, 1, 0); \
  100. if (unlikely(ia64_spinlock_val)) { \
  101. do { \
  102. while (*ia64_spinlock_ptr) \
  103. ia64_barrier(); \
  104. ia64_spinlock_val = ia64_cmpxchg4_acq(ia64_spinlock_ptr, 1, 0); \
  105. } while (ia64_spinlock_val); \
  106. } \
  107. } while (0)
  108. #define _raw_spin_unlock(x) do { barrier(); ((spinlock_t *) x)->lock = 0; } while (0)
  109. #endif /* !ASM_SUPPORTED */
  110. #define spin_is_locked(x) ((x)->lock != 0)
  111. #define _raw_spin_trylock(x) (cmpxchg_acq(&(x)->lock, 0, 1) == 0)
  112. #define spin_unlock_wait(x) do { barrier(); } while ((x)->lock)
  113. typedef struct {
  114. volatile unsigned int read_counter : 24;
  115. volatile unsigned int write_lock : 8;
  116. #ifdef CONFIG_PREEMPT
  117. unsigned int break_lock;
  118. #endif
  119. } rwlock_t;
  120. #define RW_LOCK_UNLOCKED (rwlock_t) { 0, 0 }
  121. #define rwlock_init(x) do { *(x) = RW_LOCK_UNLOCKED; } while(0)
  122. #define read_can_lock(rw) (*(volatile int *)(rw) >= 0)
  123. #define write_can_lock(rw) (*(volatile int *)(rw) == 0)
  124. #define _raw_read_lock(rw) \
  125. do { \
  126. rwlock_t *__read_lock_ptr = (rw); \
  127. \
  128. while (unlikely(ia64_fetchadd(1, (int *) __read_lock_ptr, acq) < 0)) { \
  129. ia64_fetchadd(-1, (int *) __read_lock_ptr, rel); \
  130. while (*(volatile int *)__read_lock_ptr < 0) \
  131. cpu_relax(); \
  132. } \
  133. } while (0)
  134. #define _raw_read_unlock(rw) \
  135. do { \
  136. rwlock_t *__read_lock_ptr = (rw); \
  137. ia64_fetchadd(-1, (int *) __read_lock_ptr, rel); \
  138. } while (0)
  139. #ifdef ASM_SUPPORTED
  140. #define _raw_write_lock(rw) \
  141. do { \
  142. __asm__ __volatile__ ( \
  143. "mov ar.ccv = r0\n" \
  144. "dep r29 = -1, r0, 31, 1;;\n" \
  145. "1:\n" \
  146. "ld4 r2 = [%0];;\n" \
  147. "cmp4.eq p0,p7 = r0,r2\n" \
  148. "(p7) br.cond.spnt.few 1b \n" \
  149. "cmpxchg4.acq r2 = [%0], r29, ar.ccv;;\n" \
  150. "cmp4.eq p0,p7 = r0, r2\n" \
  151. "(p7) br.cond.spnt.few 1b;;\n" \
  152. :: "r"(rw) : "ar.ccv", "p7", "r2", "r29", "memory"); \
  153. } while(0)
  154. #define _raw_write_trylock(rw) \
  155. ({ \
  156. register long result; \
  157. \
  158. __asm__ __volatile__ ( \
  159. "mov ar.ccv = r0\n" \
  160. "dep r29 = -1, r0, 31, 1;;\n" \
  161. "cmpxchg4.acq %0 = [%1], r29, ar.ccv\n" \
  162. : "=r"(result) : "r"(rw) : "ar.ccv", "r29", "memory"); \
  163. (result == 0); \
  164. })
  165. static inline void _raw_write_unlock(rwlock_t *x)
  166. {
  167. u8 *y = (u8 *)x;
  168. barrier();
  169. asm volatile ("st1.rel.nta [%0] = r0\n\t" :: "r"(y+3) : "memory" );
  170. }
  171. #else /* !ASM_SUPPORTED */
  172. #define _raw_write_lock(l) \
  173. ({ \
  174. __u64 ia64_val, ia64_set_val = ia64_dep_mi(-1, 0, 31, 1); \
  175. __u32 *ia64_write_lock_ptr = (__u32 *) (l); \
  176. do { \
  177. while (*ia64_write_lock_ptr) \
  178. ia64_barrier(); \
  179. ia64_val = ia64_cmpxchg4_acq(ia64_write_lock_ptr, ia64_set_val, 0); \
  180. } while (ia64_val); \
  181. })
  182. #define _raw_write_trylock(rw) \
  183. ({ \
  184. __u64 ia64_val; \
  185. __u64 ia64_set_val = ia64_dep_mi(-1, 0, 31,1); \
  186. ia64_val = ia64_cmpxchg4_acq((__u32 *)(rw), ia64_set_val, 0); \
  187. (ia64_val == 0); \
  188. })
  189. static inline void _raw_write_unlock(rwlock_t *x)
  190. {
  191. barrier();
  192. x->write_lock = 0;
  193. }
  194. #endif /* !ASM_SUPPORTED */
  195. #define _raw_read_trylock(lock) generic_raw_read_trylock(lock)
  196. #endif /* _ASM_IA64_SPINLOCK_H */