atomic_32.h 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333
  1. /*
  2. * Copyright 2010 Tilera Corporation. All Rights Reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License
  6. * as published by the Free Software Foundation, version 2.
  7. *
  8. * This program is distributed in the hope that it will be useful, but
  9. * WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
  11. * NON INFRINGEMENT. See the GNU General Public License for
  12. * more details.
  13. *
  14. * Do not include directly; use <asm/atomic.h>.
  15. */
  16. #ifndef _ASM_TILE_ATOMIC_32_H
  17. #define _ASM_TILE_ATOMIC_32_H
  18. #include <arch/chip.h>
  19. #ifndef __ASSEMBLY__
  20. /* Tile-specific routines to support <asm/atomic.h>. */
  21. int _atomic_xchg(atomic_t *v, int n);
  22. int _atomic_xchg_add(atomic_t *v, int i);
  23. int _atomic_xchg_add_unless(atomic_t *v, int a, int u);
  24. int _atomic_cmpxchg(atomic_t *v, int o, int n);
  25. /**
  26. * atomic_xchg - atomically exchange contents of memory with a new value
  27. * @v: pointer of type atomic_t
  28. * @i: integer value to store in memory
  29. *
  30. * Atomically sets @v to @i and returns old @v
  31. */
  32. static inline int atomic_xchg(atomic_t *v, int n)
  33. {
  34. smp_mb(); /* barrier for proper semantics */
  35. return _atomic_xchg(v, n);
  36. }
  37. /**
  38. * atomic_cmpxchg - atomically exchange contents of memory if it matches
  39. * @v: pointer of type atomic_t
  40. * @o: old value that memory should have
  41. * @n: new value to write to memory if it matches
  42. *
  43. * Atomically checks if @v holds @o and replaces it with @n if so.
  44. * Returns the old value at @v.
  45. */
  46. static inline int atomic_cmpxchg(atomic_t *v, int o, int n)
  47. {
  48. smp_mb(); /* barrier for proper semantics */
  49. return _atomic_cmpxchg(v, o, n);
  50. }
  51. /**
  52. * atomic_add - add integer to atomic variable
  53. * @i: integer value to add
  54. * @v: pointer of type atomic_t
  55. *
  56. * Atomically adds @i to @v.
  57. */
  58. static inline void atomic_add(int i, atomic_t *v)
  59. {
  60. _atomic_xchg_add(v, i);
  61. }
  62. /**
  63. * atomic_add_return - add integer and return
  64. * @v: pointer of type atomic_t
  65. * @i: integer value to add
  66. *
  67. * Atomically adds @i to @v and returns @i + @v
  68. */
  69. static inline int atomic_add_return(int i, atomic_t *v)
  70. {
  71. smp_mb(); /* barrier for proper semantics */
  72. return _atomic_xchg_add(v, i) + i;
  73. }
  74. /**
  75. * atomic_add_unless - add unless the number is already a given value
  76. * @v: pointer of type atomic_t
  77. * @a: the amount to add to v...
  78. * @u: ...unless v is equal to u.
  79. *
  80. * Atomically adds @a to @v, so long as @v was not already @u.
  81. * Returns non-zero if @v was not @u, and zero otherwise.
  82. */
  83. static inline int atomic_add_unless(atomic_t *v, int a, int u)
  84. {
  85. smp_mb(); /* barrier for proper semantics */
  86. return _atomic_xchg_add_unless(v, a, u) != u;
  87. }
  88. /**
  89. * atomic_set - set atomic variable
  90. * @v: pointer of type atomic_t
  91. * @i: required value
  92. *
  93. * Atomically sets the value of @v to @i.
  94. *
  95. * atomic_set() can't be just a raw store, since it would be lost if it
  96. * fell between the load and store of one of the other atomic ops.
  97. */
  98. static inline void atomic_set(atomic_t *v, int n)
  99. {
  100. _atomic_xchg(v, n);
  101. }
  102. #define xchg(ptr, x) ((typeof(*(ptr))) \
  103. ((sizeof(*(ptr)) == sizeof(atomic_t)) ? \
  104. atomic_xchg((atomic_t *)(ptr), (long)(x)) : \
  105. __xchg_called_with_bad_pointer()))
  106. #define cmpxchg(ptr, o, n) ((typeof(*(ptr))) \
  107. ((sizeof(*(ptr)) == sizeof(atomic_t)) ? \
  108. atomic_cmpxchg((atomic_t *)(ptr), (long)(o), (long)(n)) : \
  109. __cmpxchg_called_with_bad_pointer()))
  110. /* A 64bit atomic type */
  111. typedef struct {
  112. u64 __aligned(8) counter;
  113. } atomic64_t;
  114. #define ATOMIC64_INIT(val) { (val) }
  115. u64 _atomic64_xchg(atomic64_t *v, u64 n);
  116. u64 _atomic64_xchg_add(atomic64_t *v, u64 i);
  117. u64 _atomic64_xchg_add_unless(atomic64_t *v, u64 a, u64 u);
  118. u64 _atomic64_cmpxchg(atomic64_t *v, u64 o, u64 n);
  119. /**
  120. * atomic64_read - read atomic variable
  121. * @v: pointer of type atomic64_t
  122. *
  123. * Atomically reads the value of @v.
  124. */
  125. static inline u64 atomic64_read(const atomic64_t *v)
  126. {
  127. /*
  128. * Requires an atomic op to read both 32-bit parts consistently.
  129. * Casting away const is safe since the atomic support routines
  130. * do not write to memory if the value has not been modified.
  131. */
  132. return _atomic64_xchg_add((atomic64_t *)v, 0);
  133. }
  134. /**
  135. * atomic64_xchg - atomically exchange contents of memory with a new value
  136. * @v: pointer of type atomic64_t
  137. * @i: integer value to store in memory
  138. *
  139. * Atomically sets @v to @i and returns old @v
  140. */
  141. static inline u64 atomic64_xchg(atomic64_t *v, u64 n)
  142. {
  143. smp_mb(); /* barrier for proper semantics */
  144. return _atomic64_xchg(v, n);
  145. }
  146. /**
  147. * atomic64_cmpxchg - atomically exchange contents of memory if it matches
  148. * @v: pointer of type atomic64_t
  149. * @o: old value that memory should have
  150. * @n: new value to write to memory if it matches
  151. *
  152. * Atomically checks if @v holds @o and replaces it with @n if so.
  153. * Returns the old value at @v.
  154. */
  155. static inline u64 atomic64_cmpxchg(atomic64_t *v, u64 o, u64 n)
  156. {
  157. smp_mb(); /* barrier for proper semantics */
  158. return _atomic64_cmpxchg(v, o, n);
  159. }
  160. /**
  161. * atomic64_add - add integer to atomic variable
  162. * @i: integer value to add
  163. * @v: pointer of type atomic64_t
  164. *
  165. * Atomically adds @i to @v.
  166. */
  167. static inline void atomic64_add(u64 i, atomic64_t *v)
  168. {
  169. _atomic64_xchg_add(v, i);
  170. }
  171. /**
  172. * atomic64_add_return - add integer and return
  173. * @v: pointer of type atomic64_t
  174. * @i: integer value to add
  175. *
  176. * Atomically adds @i to @v and returns @i + @v
  177. */
  178. static inline u64 atomic64_add_return(u64 i, atomic64_t *v)
  179. {
  180. smp_mb(); /* barrier for proper semantics */
  181. return _atomic64_xchg_add(v, i) + i;
  182. }
  183. /**
  184. * atomic64_add_unless - add unless the number is already a given value
  185. * @v: pointer of type atomic64_t
  186. * @a: the amount to add to v...
  187. * @u: ...unless v is equal to u.
  188. *
  189. * Atomically adds @a to @v, so long as @v was not already @u.
  190. * Returns non-zero if @v was not @u, and zero otherwise.
  191. */
  192. static inline u64 atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
  193. {
  194. smp_mb(); /* barrier for proper semantics */
  195. return _atomic64_xchg_add_unless(v, a, u) != u;
  196. }
  197. /**
  198. * atomic64_set - set atomic variable
  199. * @v: pointer of type atomic64_t
  200. * @i: required value
  201. *
  202. * Atomically sets the value of @v to @i.
  203. *
  204. * atomic64_set() can't be just a raw store, since it would be lost if it
  205. * fell between the load and store of one of the other atomic ops.
  206. */
  207. static inline void atomic64_set(atomic64_t *v, u64 n)
  208. {
  209. _atomic64_xchg(v, n);
  210. }
  211. #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
  212. #define atomic64_inc(v) atomic64_add(1LL, (v))
  213. #define atomic64_inc_return(v) atomic64_add_return(1LL, (v))
  214. #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
  215. #define atomic64_sub_return(i, v) atomic64_add_return(-(i), (v))
  216. #define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
  217. #define atomic64_sub(i, v) atomic64_add(-(i), (v))
  218. #define atomic64_dec(v) atomic64_sub(1LL, (v))
  219. #define atomic64_dec_return(v) atomic64_sub_return(1LL, (v))
  220. #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
  221. #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
  222. /*
  223. * We need to barrier before modifying the word, since the _atomic_xxx()
  224. * routines just tns the lock and then read/modify/write of the word.
  225. * But after the word is updated, the routine issues an "mf" before returning,
  226. * and since it's a function call, we don't even need a compiler barrier.
  227. */
  228. #define smp_mb__before_atomic_dec() smp_mb()
  229. #define smp_mb__before_atomic_inc() smp_mb()
  230. #define smp_mb__after_atomic_dec() do { } while (0)
  231. #define smp_mb__after_atomic_inc() do { } while (0)
  232. #endif /* !__ASSEMBLY__ */
  233. /*
  234. * Internal definitions only beyond this point.
  235. */
  236. #define ATOMIC_LOCKS_FOUND_VIA_TABLE() \
  237. (!CHIP_HAS_CBOX_HOME_MAP() && defined(CONFIG_SMP))
  238. #if ATOMIC_LOCKS_FOUND_VIA_TABLE()
  239. /* Number of entries in atomic_lock_ptr[]. */
  240. #define ATOMIC_HASH_L1_SHIFT 6
  241. #define ATOMIC_HASH_L1_SIZE (1 << ATOMIC_HASH_L1_SHIFT)
  242. /* Number of locks in each struct pointed to by atomic_lock_ptr[]. */
  243. #define ATOMIC_HASH_L2_SHIFT (CHIP_L2_LOG_LINE_SIZE() - 2)
  244. #define ATOMIC_HASH_L2_SIZE (1 << ATOMIC_HASH_L2_SHIFT)
  245. #else /* ATOMIC_LOCKS_FOUND_VIA_TABLE() */
  246. /*
  247. * Number of atomic locks in atomic_locks[]. Must be a power of two.
  248. * There is no reason for more than PAGE_SIZE / 8 entries, since that
  249. * is the maximum number of pointer bits we can use to index this.
  250. * And we cannot have more than PAGE_SIZE / 4, since this has to
  251. * fit on a single page and each entry takes 4 bytes.
  252. */
  253. #define ATOMIC_HASH_SHIFT (PAGE_SHIFT - 3)
  254. #define ATOMIC_HASH_SIZE (1 << ATOMIC_HASH_SHIFT)
  255. #ifndef __ASSEMBLY__
  256. extern int atomic_locks[];
  257. #endif
  258. #endif /* ATOMIC_LOCKS_FOUND_VIA_TABLE() */
  259. /*
  260. * All the code that may fault while holding an atomic lock must
  261. * place the pointer to the lock in ATOMIC_LOCK_REG so the fault code
  262. * can correctly release and reacquire the lock. Note that we
  263. * mention the register number in a comment in "lib/atomic_asm.S" to help
  264. * assembly coders from using this register by mistake, so if it
  265. * is changed here, change that comment as well.
  266. */
  267. #define ATOMIC_LOCK_REG 20
  268. #define ATOMIC_LOCK_REG_NAME r20
  269. #ifndef __ASSEMBLY__
  270. /* Called from setup to initialize a hash table to point to per_cpu locks. */
  271. void __init_atomic_per_cpu(void);
  272. #ifdef CONFIG_SMP
  273. /* Support releasing the atomic lock in do_page_fault_ics(). */
  274. void __atomic_fault_unlock(int *lock_ptr);
  275. #endif
  276. /* Private helper routines in lib/atomic_asm_32.S */
  277. extern struct __get_user __atomic_cmpxchg(volatile int *p,
  278. int *lock, int o, int n);
  279. extern struct __get_user __atomic_xchg(volatile int *p, int *lock, int n);
  280. extern struct __get_user __atomic_xchg_add(volatile int *p, int *lock, int n);
  281. extern struct __get_user __atomic_xchg_add_unless(volatile int *p,
  282. int *lock, int o, int n);
  283. extern struct __get_user __atomic_or(volatile int *p, int *lock, int n);
  284. extern struct __get_user __atomic_andn(volatile int *p, int *lock, int n);
  285. extern struct __get_user __atomic_xor(volatile int *p, int *lock, int n);
  286. extern u64 __atomic64_cmpxchg(volatile u64 *p, int *lock, u64 o, u64 n);
  287. extern u64 __atomic64_xchg(volatile u64 *p, int *lock, u64 n);
  288. extern u64 __atomic64_xchg_add(volatile u64 *p, int *lock, u64 n);
  289. extern u64 __atomic64_xchg_add_unless(volatile u64 *p,
  290. int *lock, u64 o, u64 n);
  291. #endif /* !__ASSEMBLY__ */
  292. #endif /* _ASM_TILE_ATOMIC_32_H */