atomic64.c 3.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175
  1. /*
  2. * Generic implementation of 64-bit atomics using spinlocks,
  3. * useful on processors that don't have 64-bit atomic instructions.
  4. *
  5. * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
  6. *
  7. * This program is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU General Public License
  9. * as published by the Free Software Foundation; either version
  10. * 2 of the License, or (at your option) any later version.
  11. */
  12. #include <linux/types.h>
  13. #include <linux/cache.h>
  14. #include <linux/spinlock.h>
  15. #include <linux/init.h>
  16. #include <asm/atomic.h>
  17. /*
  18. * We use a hashed array of spinlocks to provide exclusive access
  19. * to each atomic64_t variable. Since this is expected to used on
  20. * systems with small numbers of CPUs (<= 4 or so), we use a
  21. * relatively small array of 16 spinlocks to avoid wasting too much
  22. * memory on the spinlock array.
  23. */
  24. #define NR_LOCKS 16
  25. /*
  26. * Ensure each lock is in a separate cacheline.
  27. */
  28. static union {
  29. spinlock_t lock;
  30. char pad[L1_CACHE_BYTES];
  31. } atomic64_lock[NR_LOCKS] __cacheline_aligned_in_smp;
  32. static inline spinlock_t *lock_addr(const atomic64_t *v)
  33. {
  34. unsigned long addr = (unsigned long) v;
  35. addr >>= L1_CACHE_SHIFT;
  36. addr ^= (addr >> 8) ^ (addr >> 16);
  37. return &atomic64_lock[addr & (NR_LOCKS - 1)].lock;
  38. }
  39. long long atomic64_read(const atomic64_t *v)
  40. {
  41. unsigned long flags;
  42. spinlock_t *lock = lock_addr(v);
  43. long long val;
  44. spin_lock_irqsave(lock, flags);
  45. val = v->counter;
  46. spin_unlock_irqrestore(lock, flags);
  47. return val;
  48. }
  49. void atomic64_set(atomic64_t *v, long long i)
  50. {
  51. unsigned long flags;
  52. spinlock_t *lock = lock_addr(v);
  53. spin_lock_irqsave(lock, flags);
  54. v->counter = i;
  55. spin_unlock_irqrestore(lock, flags);
  56. }
  57. void atomic64_add(long long a, atomic64_t *v)
  58. {
  59. unsigned long flags;
  60. spinlock_t *lock = lock_addr(v);
  61. spin_lock_irqsave(lock, flags);
  62. v->counter += a;
  63. spin_unlock_irqrestore(lock, flags);
  64. }
  65. long long atomic64_add_return(long long a, atomic64_t *v)
  66. {
  67. unsigned long flags;
  68. spinlock_t *lock = lock_addr(v);
  69. long long val;
  70. spin_lock_irqsave(lock, flags);
  71. val = v->counter += a;
  72. spin_unlock_irqrestore(lock, flags);
  73. return val;
  74. }
  75. void atomic64_sub(long long a, atomic64_t *v)
  76. {
  77. unsigned long flags;
  78. spinlock_t *lock = lock_addr(v);
  79. spin_lock_irqsave(lock, flags);
  80. v->counter -= a;
  81. spin_unlock_irqrestore(lock, flags);
  82. }
  83. long long atomic64_sub_return(long long a, atomic64_t *v)
  84. {
  85. unsigned long flags;
  86. spinlock_t *lock = lock_addr(v);
  87. long long val;
  88. spin_lock_irqsave(lock, flags);
  89. val = v->counter -= a;
  90. spin_unlock_irqrestore(lock, flags);
  91. return val;
  92. }
  93. long long atomic64_dec_if_positive(atomic64_t *v)
  94. {
  95. unsigned long flags;
  96. spinlock_t *lock = lock_addr(v);
  97. long long val;
  98. spin_lock_irqsave(lock, flags);
  99. val = v->counter - 1;
  100. if (val >= 0)
  101. v->counter = val;
  102. spin_unlock_irqrestore(lock, flags);
  103. return val;
  104. }
  105. long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n)
  106. {
  107. unsigned long flags;
  108. spinlock_t *lock = lock_addr(v);
  109. long long val;
  110. spin_lock_irqsave(lock, flags);
  111. val = v->counter;
  112. if (val == o)
  113. v->counter = n;
  114. spin_unlock_irqrestore(lock, flags);
  115. return val;
  116. }
  117. long long atomic64_xchg(atomic64_t *v, long long new)
  118. {
  119. unsigned long flags;
  120. spinlock_t *lock = lock_addr(v);
  121. long long val;
  122. spin_lock_irqsave(lock, flags);
  123. val = v->counter;
  124. v->counter = new;
  125. spin_unlock_irqrestore(lock, flags);
  126. return val;
  127. }
  128. int atomic64_add_unless(atomic64_t *v, long long a, long long u)
  129. {
  130. unsigned long flags;
  131. spinlock_t *lock = lock_addr(v);
  132. int ret = 1;
  133. spin_lock_irqsave(lock, flags);
  134. if (v->counter != u) {
  135. v->counter += a;
  136. ret = 0;
  137. }
  138. spin_unlock_irqrestore(lock, flags);
  139. return ret;
  140. }
  141. static int init_atomic64_lock(void)
  142. {
  143. int i;
  144. for (i = 0; i < NR_LOCKS; ++i)
  145. spin_lock_init(&atomic64_lock[i].lock);
  146. return 0;
  147. }
  148. pure_initcall(init_atomic64_lock);