cmpxchg_32.h 6.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217
  1. #ifndef _ASM_X86_CMPXCHG_32_H
  2. #define _ASM_X86_CMPXCHG_32_H
  3. /*
  4. * Note: if you use set64_bit(), __cmpxchg64(), or their variants, you
  5. * you need to test for the feature in boot_cpu_data.
  6. */
  7. /*
  8. * CMPXCHG8B only writes to the target if we had the previous
  9. * value in registers, otherwise it acts as a read and gives us the
  10. * "new previous" value. That is why there is a loop. Preloading
  11. * EDX:EAX is a performance optimization: in the common case it means
  12. * we need only one locked operation.
  13. *
  14. * A SIMD/3DNOW!/MMX/FPU 64-bit store here would require at the very
  15. * least an FPU save and/or %cr0.ts manipulation.
  16. *
  17. * cmpxchg8b must be used with the lock prefix here to allow the
  18. * instruction to be executed atomically. We need to have the reader
  19. * side to see the coherent 64bit value.
  20. */
  21. static inline void set_64bit(volatile u64 *ptr, u64 value)
  22. {
  23. u32 low = value;
  24. u32 high = value >> 32;
  25. u64 prev = *ptr;
  26. asm volatile("\n1:\t"
  27. LOCK_PREFIX "cmpxchg8b %0\n\t"
  28. "jnz 1b"
  29. : "=m" (*ptr), "+A" (prev)
  30. : "b" (low), "c" (high)
  31. : "memory");
  32. }
  33. #ifdef CONFIG_X86_CMPXCHG
  34. #define __HAVE_ARCH_CMPXCHG 1
  35. #endif
  36. #ifdef CONFIG_X86_CMPXCHG64
  37. #define cmpxchg64(ptr, o, n) \
  38. ((__typeof__(*(ptr)))__cmpxchg64((ptr), (unsigned long long)(o), \
  39. (unsigned long long)(n)))
  40. #define cmpxchg64_local(ptr, o, n) \
  41. ((__typeof__(*(ptr)))__cmpxchg64_local((ptr), (unsigned long long)(o), \
  42. (unsigned long long)(n)))
  43. #endif
  44. static inline u64 __cmpxchg64(volatile u64 *ptr, u64 old, u64 new)
  45. {
  46. u64 prev;
  47. asm volatile(LOCK_PREFIX "cmpxchg8b %1"
  48. : "=A" (prev),
  49. "+m" (*ptr)
  50. : "b" ((u32)new),
  51. "c" ((u32)(new >> 32)),
  52. "0" (old)
  53. : "memory");
  54. return prev;
  55. }
  56. static inline u64 __cmpxchg64_local(volatile u64 *ptr, u64 old, u64 new)
  57. {
  58. u64 prev;
  59. asm volatile("cmpxchg8b %1"
  60. : "=A" (prev),
  61. "+m" (*ptr)
  62. : "b" ((u32)new),
  63. "c" ((u32)(new >> 32)),
  64. "0" (old)
  65. : "memory");
  66. return prev;
  67. }
  68. #ifndef CONFIG_X86_CMPXCHG
  69. /*
  70. * Building a kernel capable running on 80386. It may be necessary to
  71. * simulate the cmpxchg on the 80386 CPU. For that purpose we define
  72. * a function for each of the sizes we support.
  73. */
  74. extern unsigned long cmpxchg_386_u8(volatile void *, u8, u8);
  75. extern unsigned long cmpxchg_386_u16(volatile void *, u16, u16);
  76. extern unsigned long cmpxchg_386_u32(volatile void *, u32, u32);
  77. static inline unsigned long cmpxchg_386(volatile void *ptr, unsigned long old,
  78. unsigned long new, int size)
  79. {
  80. switch (size) {
  81. case 1:
  82. return cmpxchg_386_u8(ptr, old, new);
  83. case 2:
  84. return cmpxchg_386_u16(ptr, old, new);
  85. case 4:
  86. return cmpxchg_386_u32(ptr, old, new);
  87. }
  88. return old;
  89. }
  90. #define cmpxchg(ptr, o, n) \
  91. ({ \
  92. __typeof__(*(ptr)) __ret; \
  93. if (likely(boot_cpu_data.x86 > 3)) \
  94. __ret = (__typeof__(*(ptr)))__cmpxchg((ptr), \
  95. (unsigned long)(o), (unsigned long)(n), \
  96. sizeof(*(ptr))); \
  97. else \
  98. __ret = (__typeof__(*(ptr)))cmpxchg_386((ptr), \
  99. (unsigned long)(o), (unsigned long)(n), \
  100. sizeof(*(ptr))); \
  101. __ret; \
  102. })
  103. #define cmpxchg_local(ptr, o, n) \
  104. ({ \
  105. __typeof__(*(ptr)) __ret; \
  106. if (likely(boot_cpu_data.x86 > 3)) \
  107. __ret = (__typeof__(*(ptr)))__cmpxchg_local((ptr), \
  108. (unsigned long)(o), (unsigned long)(n), \
  109. sizeof(*(ptr))); \
  110. else \
  111. __ret = (__typeof__(*(ptr)))cmpxchg_386((ptr), \
  112. (unsigned long)(o), (unsigned long)(n), \
  113. sizeof(*(ptr))); \
  114. __ret; \
  115. })
  116. #endif
  117. #ifndef CONFIG_X86_CMPXCHG64
  118. /*
  119. * Building a kernel capable running on 80386 and 80486. It may be necessary
  120. * to simulate the cmpxchg8b on the 80386 and 80486 CPU.
  121. */
  122. #define cmpxchg64(ptr, o, n) \
  123. ({ \
  124. __typeof__(*(ptr)) __ret; \
  125. __typeof__(*(ptr)) __old = (o); \
  126. __typeof__(*(ptr)) __new = (n); \
  127. alternative_io(LOCK_PREFIX_HERE \
  128. "call cmpxchg8b_emu", \
  129. "lock; cmpxchg8b (%%esi)" , \
  130. X86_FEATURE_CX8, \
  131. "=A" (__ret), \
  132. "S" ((ptr)), "0" (__old), \
  133. "b" ((unsigned int)__new), \
  134. "c" ((unsigned int)(__new>>32)) \
  135. : "memory"); \
  136. __ret; })
  137. #define cmpxchg64_local(ptr, o, n) \
  138. ({ \
  139. __typeof__(*(ptr)) __ret; \
  140. __typeof__(*(ptr)) __old = (o); \
  141. __typeof__(*(ptr)) __new = (n); \
  142. alternative_io("call cmpxchg8b_emu", \
  143. "cmpxchg8b (%%esi)" , \
  144. X86_FEATURE_CX8, \
  145. "=A" (__ret), \
  146. "S" ((ptr)), "0" (__old), \
  147. "b" ((unsigned int)__new), \
  148. "c" ((unsigned int)(__new>>32)) \
  149. : "memory"); \
  150. __ret; })
  151. #endif
  152. #define cmpxchg8b(ptr, o1, o2, n1, n2) \
  153. ({ \
  154. char __ret; \
  155. __typeof__(o2) __dummy; \
  156. __typeof__(*(ptr)) __old1 = (o1); \
  157. __typeof__(o2) __old2 = (o2); \
  158. __typeof__(*(ptr)) __new1 = (n1); \
  159. __typeof__(o2) __new2 = (n2); \
  160. asm volatile(LOCK_PREFIX "cmpxchg8b %2; setz %1" \
  161. : "=d"(__dummy), "=a" (__ret), "+m" (*ptr)\
  162. : "a" (__old1), "d"(__old2), \
  163. "b" (__new1), "c" (__new2) \
  164. : "memory"); \
  165. __ret; })
  166. #define cmpxchg8b_local(ptr, o1, o2, n1, n2) \
  167. ({ \
  168. char __ret; \
  169. __typeof__(o2) __dummy; \
  170. __typeof__(*(ptr)) __old1 = (o1); \
  171. __typeof__(o2) __old2 = (o2); \
  172. __typeof__(*(ptr)) __new1 = (n1); \
  173. __typeof__(o2) __new2 = (n2); \
  174. asm volatile("cmpxchg8b %2; setz %1" \
  175. : "=d"(__dummy), "=a"(__ret), "+m" (*ptr)\
  176. : "a" (__old), "d"(__old2), \
  177. "b" (__new1), "c" (__new2), \
  178. : "memory"); \
  179. __ret; })
  180. #define cmpxchg_double(ptr, o1, o2, n1, n2) \
  181. ({ \
  182. BUILD_BUG_ON(sizeof(*(ptr)) != 4); \
  183. VM_BUG_ON((unsigned long)(ptr) % 8); \
  184. cmpxchg8b((ptr), (o1), (o2), (n1), (n2)); \
  185. })
  186. #define cmpxchg_double_local(ptr, o1, o2, n1, n2) \
  187. ({ \
  188. BUILD_BUG_ON(sizeof(*(ptr)) != 4); \
  189. VM_BUG_ON((unsigned long)(ptr) % 8); \
  190. cmpxchg16b_local((ptr), (o1), (o2), (n1), (n2)); \
  191. })
  192. #define system_has_cmpxchg_double() cpu_has_cx8
  193. #endif /* _ASM_X86_CMPXCHG_32_H */