atomic.h 4.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234
  1. #ifndef __ASM_SH_ATOMIC_H
  2. #define __ASM_SH_ATOMIC_H
  3. /*
  4. * Atomic operations that C can't guarantee us. Useful for
  5. * resource counting etc..
  6. *
  7. */
  8. typedef struct { volatile int counter; } atomic_t;
  9. #define ATOMIC_INIT(i) ( (atomic_t) { (i) } )
  10. #define atomic_read(v) ((v)->counter)
  11. #define atomic_set(v,i) ((v)->counter = (i))
  12. #include <linux/compiler.h>
  13. #include <asm/system.h>
  14. /*
  15. * To get proper branch prediction for the main line, we must branch
  16. * forward to code at the end of this object's .text section, then
  17. * branch back to restart the operation.
  18. */
  19. static inline void atomic_add(int i, atomic_t *v)
  20. {
  21. #ifdef CONFIG_CPU_SH4A
  22. unsigned long tmp;
  23. __asm__ __volatile__ (
  24. "1: movli.l @%3, %0 ! atomic_add \n"
  25. " add %2, %0 \n"
  26. " movco.l %0, @%3 \n"
  27. " bf 1b \n"
  28. : "=&z" (tmp), "=r" (&v->counter)
  29. : "r" (i), "r" (&v->counter)
  30. : "t");
  31. #else
  32. unsigned long flags;
  33. local_irq_save(flags);
  34. *(long *)v += i;
  35. local_irq_restore(flags);
  36. #endif
  37. }
  38. static inline void atomic_sub(int i, atomic_t *v)
  39. {
  40. #ifdef CONFIG_CPU_SH4A
  41. unsigned long tmp;
  42. __asm__ __volatile__ (
  43. "1: movli.l @%3, %0 ! atomic_sub \n"
  44. " sub %2, %0 \n"
  45. " movco.l %0, @%3 \n"
  46. " bf 1b \n"
  47. : "=&z" (tmp), "=r" (&v->counter)
  48. : "r" (i), "r" (&v->counter)
  49. : "t");
  50. #else
  51. unsigned long flags;
  52. local_irq_save(flags);
  53. *(long *)v -= i;
  54. local_irq_restore(flags);
  55. #endif
  56. }
  57. /*
  58. * SH-4A note:
  59. *
  60. * We basically get atomic_xxx_return() for free compared with
  61. * atomic_xxx(). movli.l/movco.l require r0 due to the instruction
  62. * encoding, so the retval is automatically set without having to
  63. * do any special work.
  64. */
  65. static inline int atomic_add_return(int i, atomic_t *v)
  66. {
  67. unsigned long temp;
  68. #ifdef CONFIG_CPU_SH4A
  69. __asm__ __volatile__ (
  70. "1: movli.l @%3, %0 ! atomic_add_return \n"
  71. " add %2, %0 \n"
  72. " movco.l %0, @%3 \n"
  73. " bf 1b \n"
  74. " synco \n"
  75. : "=&z" (temp), "=r" (&v->counter)
  76. : "r" (i), "r" (&v->counter)
  77. : "t");
  78. #else
  79. unsigned long flags;
  80. local_irq_save(flags);
  81. temp = *(long *)v;
  82. temp += i;
  83. *(long *)v = temp;
  84. local_irq_restore(flags);
  85. #endif
  86. return temp;
  87. }
  88. #define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
  89. static inline int atomic_sub_return(int i, atomic_t *v)
  90. {
  91. unsigned long temp;
  92. #ifdef CONFIG_CPU_SH4A
  93. __asm__ __volatile__ (
  94. "1: movli.l @%3, %0 ! atomic_sub_return \n"
  95. " sub %2, %0 \n"
  96. " movco.l %0, @%3 \n"
  97. " bf 1b \n"
  98. " synco \n"
  99. : "=&z" (temp), "=r" (&v->counter)
  100. : "r" (i), "r" (&v->counter)
  101. : "t");
  102. #else
  103. unsigned long flags;
  104. local_irq_save(flags);
  105. temp = *(long *)v;
  106. temp -= i;
  107. *(long *)v = temp;
  108. local_irq_restore(flags);
  109. #endif
  110. return temp;
  111. }
  112. #define atomic_dec_return(v) atomic_sub_return(1,(v))
  113. #define atomic_inc_return(v) atomic_add_return(1,(v))
  114. /*
  115. * atomic_inc_and_test - increment and test
  116. * @v: pointer of type atomic_t
  117. *
  118. * Atomically increments @v by 1
  119. * and returns true if the result is zero, or false for all
  120. * other cases.
  121. */
  122. #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
  123. #define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0)
  124. #define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
  125. #define atomic_inc(v) atomic_add(1,(v))
  126. #define atomic_dec(v) atomic_sub(1,(v))
  127. static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
  128. {
  129. int ret;
  130. unsigned long flags;
  131. local_irq_save(flags);
  132. ret = v->counter;
  133. if (likely(ret == old))
  134. v->counter = new;
  135. local_irq_restore(flags);
  136. return ret;
  137. }
  138. #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
  139. static inline int atomic_add_unless(atomic_t *v, int a, int u)
  140. {
  141. int ret;
  142. unsigned long flags;
  143. local_irq_save(flags);
  144. ret = v->counter;
  145. if (ret != u)
  146. v->counter += a;
  147. local_irq_restore(flags);
  148. return ret != u;
  149. }
  150. #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
  151. static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
  152. {
  153. #ifdef CONFIG_CPU_SH4A
  154. unsigned long tmp;
  155. __asm__ __volatile__ (
  156. "1: movli.l @%3, %0 ! atomic_clear_mask \n"
  157. " and %2, %0 \n"
  158. " movco.l %0, @%3 \n"
  159. " bf 1b \n"
  160. : "=&z" (tmp), "=r" (&v->counter)
  161. : "r" (~mask), "r" (&v->counter)
  162. : "t");
  163. #else
  164. unsigned long flags;
  165. local_irq_save(flags);
  166. *(long *)v &= ~mask;
  167. local_irq_restore(flags);
  168. #endif
  169. }
  170. static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
  171. {
  172. #ifdef CONFIG_CPU_SH4A
  173. unsigned long tmp;
  174. __asm__ __volatile__ (
  175. "1: movli.l @%3, %0 ! atomic_set_mask \n"
  176. " or %2, %0 \n"
  177. " movco.l %0, @%3 \n"
  178. " bf 1b \n"
  179. : "=&z" (tmp), "=r" (&v->counter)
  180. : "r" (mask), "r" (&v->counter)
  181. : "t");
  182. #else
  183. unsigned long flags;
  184. local_irq_save(flags);
  185. *(long *)v |= mask;
  186. local_irq_restore(flags);
  187. #endif
  188. }
  189. /* Atomic operations are already serializing on SH */
  190. #define smp_mb__before_atomic_dec() barrier()
  191. #define smp_mb__after_atomic_dec() barrier()
  192. #define smp_mb__before_atomic_inc() barrier()
  193. #define smp_mb__after_atomic_inc() barrier()
  194. #include <asm-generic/atomic.h>
  195. #endif /* __ASM_SH_ATOMIC_H */