atomic_32.c 8.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284
  1. /*
  2. * Copyright 2010 Tilera Corporation. All Rights Reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License
  6. * as published by the Free Software Foundation, version 2.
  7. *
  8. * This program is distributed in the hope that it will be useful, but
  9. * WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
  11. * NON INFRINGEMENT. See the GNU General Public License for
  12. * more details.
  13. */
  14. #include <linux/cache.h>
  15. #include <linux/delay.h>
  16. #include <linux/uaccess.h>
  17. #include <linux/module.h>
  18. #include <linux/mm.h>
  19. #include <linux/atomic.h>
  20. #include <arch/chip.h>
  21. /* See <asm/atomic_32.h> */
  22. #if ATOMIC_LOCKS_FOUND_VIA_TABLE()
  23. /*
  24. * A block of memory containing locks for atomic ops. Each instance of this
  25. * struct will be homed on a different CPU.
  26. */
  27. struct atomic_locks_on_cpu {
  28. int lock[ATOMIC_HASH_L2_SIZE];
  29. } __attribute__((aligned(ATOMIC_HASH_L2_SIZE * 4)));
  30. static DEFINE_PER_CPU(struct atomic_locks_on_cpu, atomic_lock_pool);
  31. /* The locks we'll use until __init_atomic_per_cpu is called. */
  32. static struct atomic_locks_on_cpu __initdata initial_atomic_locks;
  33. /* Hash into this vector to get a pointer to lock for the given atomic. */
  34. struct atomic_locks_on_cpu *atomic_lock_ptr[ATOMIC_HASH_L1_SIZE]
  35. __write_once = {
  36. [0 ... ATOMIC_HASH_L1_SIZE-1] (&initial_atomic_locks)
  37. };
  38. #else /* ATOMIC_LOCKS_FOUND_VIA_TABLE() */
  39. /* This page is remapped on startup to be hash-for-home. */
  40. int atomic_locks[PAGE_SIZE / sizeof(int)] __page_aligned_bss;
  41. #endif /* ATOMIC_LOCKS_FOUND_VIA_TABLE() */
  42. int *__atomic_hashed_lock(volatile void *v)
  43. {
  44. /* NOTE: this code must match "sys_cmpxchg" in kernel/intvec_32.S */
  45. #if ATOMIC_LOCKS_FOUND_VIA_TABLE()
  46. unsigned long i =
  47. (unsigned long) v & ((PAGE_SIZE-1) & -sizeof(long long));
  48. unsigned long n = __insn_crc32_32(0, i);
  49. /* Grab high bits for L1 index. */
  50. unsigned long l1_index = n >> ((sizeof(n) * 8) - ATOMIC_HASH_L1_SHIFT);
  51. /* Grab low bits for L2 index. */
  52. unsigned long l2_index = n & (ATOMIC_HASH_L2_SIZE - 1);
  53. return &atomic_lock_ptr[l1_index]->lock[l2_index];
  54. #else
  55. /*
  56. * Use bits [3, 3 + ATOMIC_HASH_SHIFT) as the lock index.
  57. * Using mm works here because atomic_locks is page aligned.
  58. */
  59. unsigned long ptr = __insn_mm((unsigned long)v >> 1,
  60. (unsigned long)atomic_locks,
  61. 2, (ATOMIC_HASH_SHIFT + 2) - 1);
  62. return (int *)ptr;
  63. #endif
  64. }
  65. #ifdef CONFIG_SMP
  66. /* Return whether the passed pointer is a valid atomic lock pointer. */
  67. static int is_atomic_lock(int *p)
  68. {
  69. #if ATOMIC_LOCKS_FOUND_VIA_TABLE()
  70. int i;
  71. for (i = 0; i < ATOMIC_HASH_L1_SIZE; ++i) {
  72. if (p >= &atomic_lock_ptr[i]->lock[0] &&
  73. p < &atomic_lock_ptr[i]->lock[ATOMIC_HASH_L2_SIZE]) {
  74. return 1;
  75. }
  76. }
  77. return 0;
  78. #else
  79. return p >= &atomic_locks[0] && p < &atomic_locks[ATOMIC_HASH_SIZE];
  80. #endif
  81. }
  82. void __atomic_fault_unlock(int *irqlock_word)
  83. {
  84. BUG_ON(!is_atomic_lock(irqlock_word));
  85. BUG_ON(*irqlock_word != 1);
  86. *irqlock_word = 0;
  87. }
  88. #endif /* CONFIG_SMP */
  89. static inline int *__atomic_setup(volatile void *v)
  90. {
  91. /* Issue a load to the target to bring it into cache. */
  92. *(volatile int *)v;
  93. return __atomic_hashed_lock(v);
  94. }
  95. int _atomic_xchg(atomic_t *v, int n)
  96. {
  97. return __atomic_xchg(&v->counter, __atomic_setup(v), n).val;
  98. }
  99. EXPORT_SYMBOL(_atomic_xchg);
  100. int _atomic_xchg_add(atomic_t *v, int i)
  101. {
  102. return __atomic_xchg_add(&v->counter, __atomic_setup(v), i).val;
  103. }
  104. EXPORT_SYMBOL(_atomic_xchg_add);
  105. int _atomic_xchg_add_unless(atomic_t *v, int a, int u)
  106. {
  107. /*
  108. * Note: argument order is switched here since it is easier
  109. * to use the first argument consistently as the "old value"
  110. * in the assembly, as is done for _atomic_cmpxchg().
  111. */
  112. return __atomic_xchg_add_unless(&v->counter, __atomic_setup(v), u, a)
  113. .val;
  114. }
  115. EXPORT_SYMBOL(_atomic_xchg_add_unless);
  116. int _atomic_cmpxchg(atomic_t *v, int o, int n)
  117. {
  118. return __atomic_cmpxchg(&v->counter, __atomic_setup(v), o, n).val;
  119. }
  120. EXPORT_SYMBOL(_atomic_cmpxchg);
  121. unsigned long _atomic_or(volatile unsigned long *p, unsigned long mask)
  122. {
  123. return __atomic_or((int *)p, __atomic_setup(p), mask).val;
  124. }
  125. EXPORT_SYMBOL(_atomic_or);
  126. unsigned long _atomic_andn(volatile unsigned long *p, unsigned long mask)
  127. {
  128. return __atomic_andn((int *)p, __atomic_setup(p), mask).val;
  129. }
  130. EXPORT_SYMBOL(_atomic_andn);
  131. unsigned long _atomic_xor(volatile unsigned long *p, unsigned long mask)
  132. {
  133. return __atomic_xor((int *)p, __atomic_setup(p), mask).val;
  134. }
  135. EXPORT_SYMBOL(_atomic_xor);
  136. u64 _atomic64_xchg(atomic64_t *v, u64 n)
  137. {
  138. return __atomic64_xchg(&v->counter, __atomic_setup(v), n);
  139. }
  140. EXPORT_SYMBOL(_atomic64_xchg);
  141. u64 _atomic64_xchg_add(atomic64_t *v, u64 i)
  142. {
  143. return __atomic64_xchg_add(&v->counter, __atomic_setup(v), i);
  144. }
  145. EXPORT_SYMBOL(_atomic64_xchg_add);
  146. u64 _atomic64_xchg_add_unless(atomic64_t *v, u64 a, u64 u)
  147. {
  148. /*
  149. * Note: argument order is switched here since it is easier
  150. * to use the first argument consistently as the "old value"
  151. * in the assembly, as is done for _atomic_cmpxchg().
  152. */
  153. return __atomic64_xchg_add_unless(&v->counter, __atomic_setup(v),
  154. u, a);
  155. }
  156. EXPORT_SYMBOL(_atomic64_xchg_add_unless);
  157. u64 _atomic64_cmpxchg(atomic64_t *v, u64 o, u64 n)
  158. {
  159. return __atomic64_cmpxchg(&v->counter, __atomic_setup(v), o, n);
  160. }
  161. EXPORT_SYMBOL(_atomic64_cmpxchg);
  162. /*
  163. * If any of the atomic or futex routines hit a bad address (not in
  164. * the page tables at kernel PL) this routine is called. The futex
  165. * routines are never used on kernel space, and the normal atomics and
  166. * bitops are never used on user space. So a fault on kernel space
  167. * must be fatal, but a fault on userspace is a futex fault and we
  168. * need to return -EFAULT. Note that the context this routine is
  169. * invoked in is the context of the "_atomic_xxx()" routines called
  170. * by the functions in this file.
  171. */
  172. struct __get_user __atomic_bad_address(int __user *addr)
  173. {
  174. if (unlikely(!access_ok(VERIFY_WRITE, addr, sizeof(int))))
  175. panic("Bad address used for kernel atomic op: %p\n", addr);
  176. return (struct __get_user) { .err = -EFAULT };
  177. }
  178. #if CHIP_HAS_CBOX_HOME_MAP()
  179. static int __init noatomichash(char *str)
  180. {
  181. pr_warning("noatomichash is deprecated.\n");
  182. return 1;
  183. }
  184. __setup("noatomichash", noatomichash);
  185. #endif
  186. void __init __init_atomic_per_cpu(void)
  187. {
  188. #if ATOMIC_LOCKS_FOUND_VIA_TABLE()
  189. unsigned int i;
  190. int actual_cpu;
  191. /*
  192. * Before this is called from setup, we just have one lock for
  193. * all atomic objects/operations. Here we replace the
  194. * elements of atomic_lock_ptr so that they point at per_cpu
  195. * integers. This seemingly over-complex approach stems from
  196. * the fact that DEFINE_PER_CPU defines an entry for each cpu
  197. * in the grid, not each cpu from 0..ATOMIC_HASH_SIZE-1. But
  198. * for efficient hashing of atomics to their locks we want a
  199. * compile time constant power of 2 for the size of this
  200. * table, so we use ATOMIC_HASH_SIZE.
  201. *
  202. * Here we populate atomic_lock_ptr from the per cpu
  203. * atomic_lock_pool, interspersing by actual cpu so that
  204. * subsequent elements are homed on consecutive cpus.
  205. */
  206. actual_cpu = cpumask_first(cpu_possible_mask);
  207. for (i = 0; i < ATOMIC_HASH_L1_SIZE; ++i) {
  208. /*
  209. * Preincrement to slightly bias against using cpu 0,
  210. * which has plenty of stuff homed on it already.
  211. */
  212. actual_cpu = cpumask_next(actual_cpu, cpu_possible_mask);
  213. if (actual_cpu >= nr_cpu_ids)
  214. actual_cpu = cpumask_first(cpu_possible_mask);
  215. atomic_lock_ptr[i] = &per_cpu(atomic_lock_pool, actual_cpu);
  216. }
  217. #else /* ATOMIC_LOCKS_FOUND_VIA_TABLE() */
  218. /* Validate power-of-two and "bigger than cpus" assumption */
  219. BUILD_BUG_ON(ATOMIC_HASH_SIZE & (ATOMIC_HASH_SIZE-1));
  220. BUG_ON(ATOMIC_HASH_SIZE < nr_cpu_ids);
  221. /*
  222. * On TILEPro we prefer to use a single hash-for-home
  223. * page, since this means atomic operations are less
  224. * likely to encounter a TLB fault and thus should
  225. * in general perform faster. You may wish to disable
  226. * this in situations where few hash-for-home tiles
  227. * are configured.
  228. */
  229. BUG_ON((unsigned long)atomic_locks % PAGE_SIZE != 0);
  230. /* The locks must all fit on one page. */
  231. BUILD_BUG_ON(ATOMIC_HASH_SIZE * sizeof(int) > PAGE_SIZE);
  232. /*
  233. * We use the page offset of the atomic value's address as
  234. * an index into atomic_locks, excluding the low 3 bits.
  235. * That should not produce more indices than ATOMIC_HASH_SIZE.
  236. */
  237. BUILD_BUG_ON((PAGE_SIZE >> 3) > ATOMIC_HASH_SIZE);
  238. #endif /* ATOMIC_LOCKS_FOUND_VIA_TABLE() */
  239. }