atomic.h 8.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351
  1. /* MN10300 Atomic counter operations
  2. *
  3. * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
  4. * Written by David Howells (dhowells@redhat.com)
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public Licence
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the Licence, or (at your option) any later version.
  10. */
  11. #ifndef _ASM_ATOMIC_H
  12. #define _ASM_ATOMIC_H
  13. #include <asm/irqflags.h>
  14. #ifndef __ASSEMBLY__
  15. #ifdef CONFIG_SMP
  16. #ifdef CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT
  17. static inline
  18. unsigned long __xchg(volatile unsigned long *m, unsigned long val)
  19. {
  20. unsigned long status;
  21. unsigned long oldval;
  22. asm volatile(
  23. "1: mov %4,(_AAR,%3) \n"
  24. " mov (_ADR,%3),%1 \n"
  25. " mov %5,(_ADR,%3) \n"
  26. " mov (_ADR,%3),%0 \n" /* flush */
  27. " mov (_ASR,%3),%0 \n"
  28. " or %0,%0 \n"
  29. " bne 1b \n"
  30. : "=&r"(status), "=&r"(oldval), "=m"(*m)
  31. : "a"(ATOMIC_OPS_BASE_ADDR), "r"(m), "r"(val)
  32. : "memory", "cc");
  33. return oldval;
  34. }
  35. static inline unsigned long __cmpxchg(volatile unsigned long *m,
  36. unsigned long old, unsigned long new)
  37. {
  38. unsigned long status;
  39. unsigned long oldval;
  40. asm volatile(
  41. "1: mov %4,(_AAR,%3) \n"
  42. " mov (_ADR,%3),%1 \n"
  43. " cmp %5,%1 \n"
  44. " bne 2f \n"
  45. " mov %6,(_ADR,%3) \n"
  46. "2: mov (_ADR,%3),%0 \n" /* flush */
  47. " mov (_ASR,%3),%0 \n"
  48. " or %0,%0 \n"
  49. " bne 1b \n"
  50. : "=&r"(status), "=&r"(oldval), "=m"(*m)
  51. : "a"(ATOMIC_OPS_BASE_ADDR), "r"(m),
  52. "r"(old), "r"(new)
  53. : "memory", "cc");
  54. return oldval;
  55. }
  56. #else /* CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT */
  57. #error "No SMP atomic operation support!"
  58. #endif /* CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT */
  59. #else /* CONFIG_SMP */
  60. /*
  61. * Emulate xchg for non-SMP MN10300
  62. */
  63. struct __xchg_dummy { unsigned long a[100]; };
  64. #define __xg(x) ((struct __xchg_dummy *)(x))
  65. static inline
  66. unsigned long __xchg(volatile unsigned long *m, unsigned long val)
  67. {
  68. unsigned long oldval;
  69. unsigned long flags;
  70. flags = arch_local_cli_save();
  71. oldval = *m;
  72. *m = val;
  73. arch_local_irq_restore(flags);
  74. return oldval;
  75. }
  76. /*
  77. * Emulate cmpxchg for non-SMP MN10300
  78. */
  79. static inline unsigned long __cmpxchg(volatile unsigned long *m,
  80. unsigned long old, unsigned long new)
  81. {
  82. unsigned long oldval;
  83. unsigned long flags;
  84. flags = arch_local_cli_save();
  85. oldval = *m;
  86. if (oldval == old)
  87. *m = new;
  88. arch_local_irq_restore(flags);
  89. return oldval;
  90. }
  91. #endif /* CONFIG_SMP */
  92. #define xchg(ptr, v) \
  93. ((__typeof__(*(ptr))) __xchg((unsigned long *)(ptr), \
  94. (unsigned long)(v)))
  95. #define cmpxchg(ptr, o, n) \
  96. ((__typeof__(*(ptr))) __cmpxchg((unsigned long *)(ptr), \
  97. (unsigned long)(o), \
  98. (unsigned long)(n)))
  99. #define atomic_xchg(ptr, v) (xchg(&(ptr)->counter, (v)))
  100. #define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), (old), (new)))
  101. #endif /* !__ASSEMBLY__ */
  102. #ifndef CONFIG_SMP
  103. #include <asm-generic/atomic.h>
  104. #else
  105. /*
  106. * Atomic operations that C can't guarantee us. Useful for
  107. * resource counting etc..
  108. */
  109. #define ATOMIC_INIT(i) { (i) }
  110. #ifdef __KERNEL__
  111. /**
  112. * atomic_read - read atomic variable
  113. * @v: pointer of type atomic_t
  114. *
  115. * Atomically reads the value of @v. Note that the guaranteed
  116. * useful range of an atomic_t is only 24 bits.
  117. */
  118. #define atomic_read(v) (ACCESS_ONCE((v)->counter))
  119. /**
  120. * atomic_set - set atomic variable
  121. * @v: pointer of type atomic_t
  122. * @i: required value
  123. *
  124. * Atomically sets the value of @v to @i. Note that the guaranteed
  125. * useful range of an atomic_t is only 24 bits.
  126. */
  127. #define atomic_set(v, i) (((v)->counter) = (i))
  128. /**
  129. * atomic_add_return - add integer to atomic variable
  130. * @i: integer value to add
  131. * @v: pointer of type atomic_t
  132. *
  133. * Atomically adds @i to @v and returns the result
  134. * Note that the guaranteed useful range of an atomic_t is only 24 bits.
  135. */
  136. static inline int atomic_add_return(int i, atomic_t *v)
  137. {
  138. int retval;
  139. #ifdef CONFIG_SMP
  140. int status;
  141. asm volatile(
  142. "1: mov %4,(_AAR,%3) \n"
  143. " mov (_ADR,%3),%1 \n"
  144. " add %5,%1 \n"
  145. " mov %1,(_ADR,%3) \n"
  146. " mov (_ADR,%3),%0 \n" /* flush */
  147. " mov (_ASR,%3),%0 \n"
  148. " or %0,%0 \n"
  149. " bne 1b \n"
  150. : "=&r"(status), "=&r"(retval), "=m"(v->counter)
  151. : "a"(ATOMIC_OPS_BASE_ADDR), "r"(&v->counter), "r"(i)
  152. : "memory", "cc");
  153. #else
  154. unsigned long flags;
  155. flags = arch_local_cli_save();
  156. retval = v->counter;
  157. retval += i;
  158. v->counter = retval;
  159. arch_local_irq_restore(flags);
  160. #endif
  161. return retval;
  162. }
  163. /**
  164. * atomic_sub_return - subtract integer from atomic variable
  165. * @i: integer value to subtract
  166. * @v: pointer of type atomic_t
  167. *
  168. * Atomically subtracts @i from @v and returns the result
  169. * Note that the guaranteed useful range of an atomic_t is only 24 bits.
  170. */
  171. static inline int atomic_sub_return(int i, atomic_t *v)
  172. {
  173. int retval;
  174. #ifdef CONFIG_SMP
  175. int status;
  176. asm volatile(
  177. "1: mov %4,(_AAR,%3) \n"
  178. " mov (_ADR,%3),%1 \n"
  179. " sub %5,%1 \n"
  180. " mov %1,(_ADR,%3) \n"
  181. " mov (_ADR,%3),%0 \n" /* flush */
  182. " mov (_ASR,%3),%0 \n"
  183. " or %0,%0 \n"
  184. " bne 1b \n"
  185. : "=&r"(status), "=&r"(retval), "=m"(v->counter)
  186. : "a"(ATOMIC_OPS_BASE_ADDR), "r"(&v->counter), "r"(i)
  187. : "memory", "cc");
  188. #else
  189. unsigned long flags;
  190. flags = arch_local_cli_save();
  191. retval = v->counter;
  192. retval -= i;
  193. v->counter = retval;
  194. arch_local_irq_restore(flags);
  195. #endif
  196. return retval;
  197. }
  198. static inline int atomic_add_negative(int i, atomic_t *v)
  199. {
  200. return atomic_add_return(i, v) < 0;
  201. }
  202. static inline void atomic_add(int i, atomic_t *v)
  203. {
  204. atomic_add_return(i, v);
  205. }
  206. static inline void atomic_sub(int i, atomic_t *v)
  207. {
  208. atomic_sub_return(i, v);
  209. }
  210. static inline void atomic_inc(atomic_t *v)
  211. {
  212. atomic_add_return(1, v);
  213. }
  214. static inline void atomic_dec(atomic_t *v)
  215. {
  216. atomic_sub_return(1, v);
  217. }
  218. #define atomic_dec_return(v) atomic_sub_return(1, (v))
  219. #define atomic_inc_return(v) atomic_add_return(1, (v))
  220. #define atomic_sub_and_test(i, v) (atomic_sub_return((i), (v)) == 0)
  221. #define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
  222. #define atomic_inc_and_test(v) (atomic_add_return(1, (v)) == 0)
  223. #define atomic_add_unless(v, a, u) \
  224. ({ \
  225. int c, old; \
  226. c = atomic_read(v); \
  227. while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \
  228. c = old; \
  229. c != (u); \
  230. })
  231. #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
  232. /**
  233. * atomic_clear_mask - Atomically clear bits in memory
  234. * @mask: Mask of the bits to be cleared
  235. * @v: pointer to word in memory
  236. *
  237. * Atomically clears the bits set in mask from the memory word specified.
  238. */
  239. static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
  240. {
  241. #ifdef CONFIG_SMP
  242. int status;
  243. asm volatile(
  244. "1: mov %3,(_AAR,%2) \n"
  245. " mov (_ADR,%2),%0 \n"
  246. " and %4,%0 \n"
  247. " mov %0,(_ADR,%2) \n"
  248. " mov (_ADR,%2),%0 \n" /* flush */
  249. " mov (_ASR,%2),%0 \n"
  250. " or %0,%0 \n"
  251. " bne 1b \n"
  252. : "=&r"(status), "=m"(*addr)
  253. : "a"(ATOMIC_OPS_BASE_ADDR), "r"(addr), "r"(~mask)
  254. : "memory", "cc");
  255. #else
  256. unsigned long flags;
  257. mask = ~mask;
  258. flags = arch_local_cli_save();
  259. *addr &= mask;
  260. arch_local_irq_restore(flags);
  261. #endif
  262. }
  263. /**
  264. * atomic_set_mask - Atomically set bits in memory
  265. * @mask: Mask of the bits to be set
  266. * @v: pointer to word in memory
  267. *
  268. * Atomically sets the bits set in mask from the memory word specified.
  269. */
  270. static inline void atomic_set_mask(unsigned long mask, unsigned long *addr)
  271. {
  272. #ifdef CONFIG_SMP
  273. int status;
  274. asm volatile(
  275. "1: mov %3,(_AAR,%2) \n"
  276. " mov (_ADR,%2),%0 \n"
  277. " or %4,%0 \n"
  278. " mov %0,(_ADR,%2) \n"
  279. " mov (_ADR,%2),%0 \n" /* flush */
  280. " mov (_ASR,%2),%0 \n"
  281. " or %0,%0 \n"
  282. " bne 1b \n"
  283. : "=&r"(status), "=m"(*addr)
  284. : "a"(ATOMIC_OPS_BASE_ADDR), "r"(addr), "r"(mask)
  285. : "memory", "cc");
  286. #else
  287. unsigned long flags;
  288. flags = arch_local_cli_save();
  289. *addr |= mask;
  290. arch_local_irq_restore(flags);
  291. #endif
  292. }
  293. /* Atomic operations are already serializing on MN10300??? */
  294. #define smp_mb__before_atomic_dec() barrier()
  295. #define smp_mb__after_atomic_dec() barrier()
  296. #define smp_mb__before_atomic_inc() barrier()
  297. #define smp_mb__after_atomic_inc() barrier()
  298. #include <asm-generic/atomic-long.h>
  299. #endif /* __KERNEL__ */
  300. #endif /* CONFIG_SMP */
  301. #endif /* _ASM_ATOMIC_H */