cmpxchg.h 7.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252
  1. #ifndef ASM_X86_CMPXCHG_H
  2. #define ASM_X86_CMPXCHG_H
  3. #include <linux/compiler.h>
  4. #include <asm/alternative.h> /* Provides LOCK_PREFIX */
  5. /*
  6. * Non-existant functions to indicate usage errors at link time
  7. * (or compile-time if the compiler implements __compiletime_error().
  8. */
  9. extern void __xchg_wrong_size(void)
  10. __compiletime_error("Bad argument size for xchg");
  11. extern void __cmpxchg_wrong_size(void)
  12. __compiletime_error("Bad argument size for cmpxchg");
  13. extern void __xadd_wrong_size(void)
  14. __compiletime_error("Bad argument size for xadd");
  15. extern void __add_wrong_size(void)
  16. __compiletime_error("Bad argument size for add");
  17. /*
  18. * Constants for operation sizes. On 32-bit, the 64-bit size it set to
  19. * -1 because sizeof will never return -1, thereby making those switch
  20. * case statements guaranteeed dead code which the compiler will
  21. * eliminate, and allowing the "missing symbol in the default case" to
  22. * indicate a usage error.
  23. */
  24. #define __X86_CASE_B 1
  25. #define __X86_CASE_W 2
  26. #define __X86_CASE_L 4
  27. #ifdef CONFIG_64BIT
  28. #define __X86_CASE_Q 8
  29. #else
  30. #define __X86_CASE_Q -1 /* sizeof will never return -1 */
  31. #endif
  32. /*
  33. * Note: no "lock" prefix even on SMP: xchg always implies lock anyway.
  34. * Since this is generally used to protect other memory information, we
  35. * use "asm volatile" and "memory" clobbers to prevent gcc from moving
  36. * information around.
  37. */
  38. #define __xchg(x, ptr, size) \
  39. ({ \
  40. __typeof(*(ptr)) __x = (x); \
  41. switch (size) { \
  42. case __X86_CASE_B: \
  43. { \
  44. volatile u8 *__ptr = (volatile u8 *)(ptr); \
  45. asm volatile("xchgb %0,%1" \
  46. : "=q" (__x), "+m" (*__ptr) \
  47. : "0" (__x) \
  48. : "memory"); \
  49. break; \
  50. } \
  51. case __X86_CASE_W: \
  52. { \
  53. volatile u16 *__ptr = (volatile u16 *)(ptr); \
  54. asm volatile("xchgw %0,%1" \
  55. : "=r" (__x), "+m" (*__ptr) \
  56. : "0" (__x) \
  57. : "memory"); \
  58. break; \
  59. } \
  60. case __X86_CASE_L: \
  61. { \
  62. volatile u32 *__ptr = (volatile u32 *)(ptr); \
  63. asm volatile("xchgl %0,%1" \
  64. : "=r" (__x), "+m" (*__ptr) \
  65. : "0" (__x) \
  66. : "memory"); \
  67. break; \
  68. } \
  69. case __X86_CASE_Q: \
  70. { \
  71. volatile u64 *__ptr = (volatile u64 *)(ptr); \
  72. asm volatile("xchgq %0,%1" \
  73. : "=r" (__x), "+m" (*__ptr) \
  74. : "0" (__x) \
  75. : "memory"); \
  76. break; \
  77. } \
  78. default: \
  79. __xchg_wrong_size(); \
  80. } \
  81. __x; \
  82. })
  83. #define xchg(ptr, v) \
  84. __xchg((v), (ptr), sizeof(*ptr))
  85. /*
  86. * Atomic compare and exchange. Compare OLD with MEM, if identical,
  87. * store NEW in MEM. Return the initial value in MEM. Success is
  88. * indicated by comparing RETURN with OLD.
  89. */
  90. #define __raw_cmpxchg(ptr, old, new, size, lock) \
  91. ({ \
  92. __typeof__(*(ptr)) __ret; \
  93. __typeof__(*(ptr)) __old = (old); \
  94. __typeof__(*(ptr)) __new = (new); \
  95. switch (size) { \
  96. case __X86_CASE_B: \
  97. { \
  98. volatile u8 *__ptr = (volatile u8 *)(ptr); \
  99. asm volatile(lock "cmpxchgb %2,%1" \
  100. : "=a" (__ret), "+m" (*__ptr) \
  101. : "q" (__new), "0" (__old) \
  102. : "memory"); \
  103. break; \
  104. } \
  105. case __X86_CASE_W: \
  106. { \
  107. volatile u16 *__ptr = (volatile u16 *)(ptr); \
  108. asm volatile(lock "cmpxchgw %2,%1" \
  109. : "=a" (__ret), "+m" (*__ptr) \
  110. : "r" (__new), "0" (__old) \
  111. : "memory"); \
  112. break; \
  113. } \
  114. case __X86_CASE_L: \
  115. { \
  116. volatile u32 *__ptr = (volatile u32 *)(ptr); \
  117. asm volatile(lock "cmpxchgl %2,%1" \
  118. : "=a" (__ret), "+m" (*__ptr) \
  119. : "r" (__new), "0" (__old) \
  120. : "memory"); \
  121. break; \
  122. } \
  123. case __X86_CASE_Q: \
  124. { \
  125. volatile u64 *__ptr = (volatile u64 *)(ptr); \
  126. asm volatile(lock "cmpxchgq %2,%1" \
  127. : "=a" (__ret), "+m" (*__ptr) \
  128. : "r" (__new), "0" (__old) \
  129. : "memory"); \
  130. break; \
  131. } \
  132. default: \
  133. __cmpxchg_wrong_size(); \
  134. } \
  135. __ret; \
  136. })
  137. #define __cmpxchg(ptr, old, new, size) \
  138. __raw_cmpxchg((ptr), (old), (new), (size), LOCK_PREFIX)
  139. #define __sync_cmpxchg(ptr, old, new, size) \
  140. __raw_cmpxchg((ptr), (old), (new), (size), "lock; ")
  141. #define __cmpxchg_local(ptr, old, new, size) \
  142. __raw_cmpxchg((ptr), (old), (new), (size), "")
  143. #ifdef CONFIG_X86_32
  144. # include "cmpxchg_32.h"
  145. #else
  146. # include "cmpxchg_64.h"
  147. #endif
  148. #ifdef __HAVE_ARCH_CMPXCHG
  149. #define cmpxchg(ptr, old, new) \
  150. __cmpxchg((ptr), (old), (new), sizeof(*ptr))
  151. #define sync_cmpxchg(ptr, old, new) \
  152. __sync_cmpxchg((ptr), (old), (new), sizeof(*ptr))
  153. #define cmpxchg_local(ptr, old, new) \
  154. __cmpxchg_local((ptr), (old), (new), sizeof(*ptr))
  155. #endif
  156. #define __xadd(ptr, inc, lock) \
  157. ({ \
  158. __typeof__ (*(ptr)) __ret = (inc); \
  159. switch (sizeof(*(ptr))) { \
  160. case __X86_CASE_B: \
  161. asm volatile (lock "xaddb %b0, %1\n" \
  162. : "+r" (__ret), "+m" (*(ptr)) \
  163. : : "memory", "cc"); \
  164. break; \
  165. case __X86_CASE_W: \
  166. asm volatile (lock "xaddw %w0, %1\n" \
  167. : "+r" (__ret), "+m" (*(ptr)) \
  168. : : "memory", "cc"); \
  169. break; \
  170. case __X86_CASE_L: \
  171. asm volatile (lock "xaddl %0, %1\n" \
  172. : "+r" (__ret), "+m" (*(ptr)) \
  173. : : "memory", "cc"); \
  174. break; \
  175. case __X86_CASE_Q: \
  176. asm volatile (lock "xaddq %q0, %1\n" \
  177. : "+r" (__ret), "+m" (*(ptr)) \
  178. : : "memory", "cc"); \
  179. break; \
  180. default: \
  181. __xadd_wrong_size(); \
  182. } \
  183. __ret; \
  184. })
  185. /*
  186. * xadd() adds "inc" to "*ptr" and atomically returns the previous
  187. * value of "*ptr".
  188. *
  189. * xadd() is locked when multiple CPUs are online
  190. * xadd_sync() is always locked
  191. * xadd_local() is never locked
  192. */
  193. #define xadd(ptr, inc) __xadd((ptr), (inc), LOCK_PREFIX)
  194. #define xadd_sync(ptr, inc) __xadd((ptr), (inc), "lock; ")
  195. #define xadd_local(ptr, inc) __xadd((ptr), (inc), "")
  196. #define __add(ptr, inc, lock) \
  197. ({ \
  198. __typeof__ (*(ptr)) __ret = (inc); \
  199. switch (sizeof(*(ptr))) { \
  200. case __X86_CASE_B: \
  201. asm volatile (lock "addb %b1, %0\n" \
  202. : "+m" (*(ptr)) : "ri" (inc) \
  203. : "memory", "cc"); \
  204. break; \
  205. case __X86_CASE_W: \
  206. asm volatile (lock "addw %w1, %0\n" \
  207. : "+m" (*(ptr)) : "ri" (inc) \
  208. : "memory", "cc"); \
  209. break; \
  210. case __X86_CASE_L: \
  211. asm volatile (lock "addl %1, %0\n" \
  212. : "+m" (*(ptr)) : "ri" (inc) \
  213. : "memory", "cc"); \
  214. break; \
  215. case __X86_CASE_Q: \
  216. asm volatile (lock "addq %1, %0\n" \
  217. : "+m" (*(ptr)) : "ri" (inc) \
  218. : "memory", "cc"); \
  219. break; \
  220. default: \
  221. __add_wrong_size(); \
  222. } \
  223. __ret; \
  224. })
  225. /*
  226. * add_*() adds "inc" to "*ptr"
  227. *
  228. * __add() takes a lock prefix
  229. * add_smp() is locked when multiple CPUs are online
  230. * add_sync() is always locked
  231. */
  232. #define add_smp(ptr, inc) __add((ptr), (inc), LOCK_PREFIX)
  233. #define add_sync(ptr, inc) __add((ptr), (inc), "lock; ")
  234. #endif /* ASM_X86_CMPXCHG_H */