cmpxchg_32.h 3.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116
  1. #ifndef _ASM_X86_CMPXCHG_32_H
  2. #define _ASM_X86_CMPXCHG_32_H
  3. /*
  4. * Note: if you use set64_bit(), __cmpxchg64(), or their variants, you
  5. * you need to test for the feature in boot_cpu_data.
  6. */
  7. /*
  8. * CMPXCHG8B only writes to the target if we had the previous
  9. * value in registers, otherwise it acts as a read and gives us the
  10. * "new previous" value. That is why there is a loop. Preloading
  11. * EDX:EAX is a performance optimization: in the common case it means
  12. * we need only one locked operation.
  13. *
  14. * A SIMD/3DNOW!/MMX/FPU 64-bit store here would require at the very
  15. * least an FPU save and/or %cr0.ts manipulation.
  16. *
  17. * cmpxchg8b must be used with the lock prefix here to allow the
  18. * instruction to be executed atomically. We need to have the reader
  19. * side to see the coherent 64bit value.
  20. */
  21. static inline void set_64bit(volatile u64 *ptr, u64 value)
  22. {
  23. u32 low = value;
  24. u32 high = value >> 32;
  25. u64 prev = *ptr;
  26. asm volatile("\n1:\t"
  27. LOCK_PREFIX "cmpxchg8b %0\n\t"
  28. "jnz 1b"
  29. : "=m" (*ptr), "+A" (prev)
  30. : "b" (low), "c" (high)
  31. : "memory");
  32. }
  33. #define __HAVE_ARCH_CMPXCHG 1
  34. #ifdef CONFIG_X86_CMPXCHG64
  35. #define cmpxchg64(ptr, o, n) \
  36. ((__typeof__(*(ptr)))__cmpxchg64((ptr), (unsigned long long)(o), \
  37. (unsigned long long)(n)))
  38. #define cmpxchg64_local(ptr, o, n) \
  39. ((__typeof__(*(ptr)))__cmpxchg64_local((ptr), (unsigned long long)(o), \
  40. (unsigned long long)(n)))
  41. #endif
  42. static inline u64 __cmpxchg64(volatile u64 *ptr, u64 old, u64 new)
  43. {
  44. u64 prev;
  45. asm volatile(LOCK_PREFIX "cmpxchg8b %1"
  46. : "=A" (prev),
  47. "+m" (*ptr)
  48. : "b" ((u32)new),
  49. "c" ((u32)(new >> 32)),
  50. "0" (old)
  51. : "memory");
  52. return prev;
  53. }
  54. static inline u64 __cmpxchg64_local(volatile u64 *ptr, u64 old, u64 new)
  55. {
  56. u64 prev;
  57. asm volatile("cmpxchg8b %1"
  58. : "=A" (prev),
  59. "+m" (*ptr)
  60. : "b" ((u32)new),
  61. "c" ((u32)(new >> 32)),
  62. "0" (old)
  63. : "memory");
  64. return prev;
  65. }
  66. #ifndef CONFIG_X86_CMPXCHG64
  67. /*
  68. * Building a kernel capable running on 80386 and 80486. It may be necessary
  69. * to simulate the cmpxchg8b on the 80386 and 80486 CPU.
  70. */
  71. #define cmpxchg64(ptr, o, n) \
  72. ({ \
  73. __typeof__(*(ptr)) __ret; \
  74. __typeof__(*(ptr)) __old = (o); \
  75. __typeof__(*(ptr)) __new = (n); \
  76. alternative_io(LOCK_PREFIX_HERE \
  77. "call cmpxchg8b_emu", \
  78. "lock; cmpxchg8b (%%esi)" , \
  79. X86_FEATURE_CX8, \
  80. "=A" (__ret), \
  81. "S" ((ptr)), "0" (__old), \
  82. "b" ((unsigned int)__new), \
  83. "c" ((unsigned int)(__new>>32)) \
  84. : "memory"); \
  85. __ret; })
  86. #define cmpxchg64_local(ptr, o, n) \
  87. ({ \
  88. __typeof__(*(ptr)) __ret; \
  89. __typeof__(*(ptr)) __old = (o); \
  90. __typeof__(*(ptr)) __new = (n); \
  91. alternative_io("call cmpxchg8b_emu", \
  92. "cmpxchg8b (%%esi)" , \
  93. X86_FEATURE_CX8, \
  94. "=A" (__ret), \
  95. "S" ((ptr)), "0" (__old), \
  96. "b" ((unsigned int)__new), \
  97. "c" ((unsigned int)(__new>>32)) \
  98. : "memory"); \
  99. __ret; })
  100. #endif
  101. #define system_has_cmpxchg_double() cpu_has_cx8
  102. #endif /* _ASM_X86_CMPXCHG_32_H */