cmpxchg_64.h 4.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185
  1. #ifndef _ASM_X86_CMPXCHG_64_H
  2. #define _ASM_X86_CMPXCHG_64_H
  3. #include <asm/alternative.h> /* Provides LOCK_PREFIX */
  4. #define xchg(ptr, v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v), \
  5. (ptr), sizeof(*(ptr))))
  6. #define __xg(x) ((volatile long *)(x))
  7. static inline void set_64bit(volatile unsigned long *ptr, unsigned long val)
  8. {
  9. *ptr = val;
  10. }
  11. #define _set_64bit set_64bit
  12. /*
  13. * Note: no "lock" prefix even on SMP: xchg always implies lock anyway
  14. * Note 2: xchg has side effect, so that attribute volatile is necessary,
  15. * but generally the primitive is invalid, *ptr is output argument. --ANK
  16. */
  17. static inline unsigned long __xchg(unsigned long x, volatile void *ptr,
  18. int size)
  19. {
  20. switch (size) {
  21. case 1:
  22. asm volatile("xchgb %b0,%1"
  23. : "=q" (x)
  24. : "m" (*__xg(ptr)), "0" (x)
  25. : "memory");
  26. break;
  27. case 2:
  28. asm volatile("xchgw %w0,%1"
  29. : "=r" (x)
  30. : "m" (*__xg(ptr)), "0" (x)
  31. : "memory");
  32. break;
  33. case 4:
  34. asm volatile("xchgl %k0,%1"
  35. : "=r" (x)
  36. : "m" (*__xg(ptr)), "0" (x)
  37. : "memory");
  38. break;
  39. case 8:
  40. asm volatile("xchgq %0,%1"
  41. : "=r" (x)
  42. : "m" (*__xg(ptr)), "0" (x)
  43. : "memory");
  44. break;
  45. }
  46. return x;
  47. }
  48. /*
  49. * Atomic compare and exchange. Compare OLD with MEM, if identical,
  50. * store NEW in MEM. Return the initial value in MEM. Success is
  51. * indicated by comparing RETURN with OLD.
  52. */
  53. #define __HAVE_ARCH_CMPXCHG 1
  54. static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
  55. unsigned long new, int size)
  56. {
  57. unsigned long prev;
  58. switch (size) {
  59. case 1:
  60. asm volatile(LOCK_PREFIX "cmpxchgb %b1,%2"
  61. : "=a"(prev)
  62. : "q"(new), "m"(*__xg(ptr)), "0"(old)
  63. : "memory");
  64. return prev;
  65. case 2:
  66. asm volatile(LOCK_PREFIX "cmpxchgw %w1,%2"
  67. : "=a"(prev)
  68. : "r"(new), "m"(*__xg(ptr)), "0"(old)
  69. : "memory");
  70. return prev;
  71. case 4:
  72. asm volatile(LOCK_PREFIX "cmpxchgl %k1,%2"
  73. : "=a"(prev)
  74. : "r"(new), "m"(*__xg(ptr)), "0"(old)
  75. : "memory");
  76. return prev;
  77. case 8:
  78. asm volatile(LOCK_PREFIX "cmpxchgq %1,%2"
  79. : "=a"(prev)
  80. : "r"(new), "m"(*__xg(ptr)), "0"(old)
  81. : "memory");
  82. return prev;
  83. }
  84. return old;
  85. }
  86. /*
  87. * Always use locked operations when touching memory shared with a
  88. * hypervisor, since the system may be SMP even if the guest kernel
  89. * isn't.
  90. */
  91. static inline unsigned long __sync_cmpxchg(volatile void *ptr,
  92. unsigned long old,
  93. unsigned long new, int size)
  94. {
  95. unsigned long prev;
  96. switch (size) {
  97. case 1:
  98. asm volatile("lock; cmpxchgb %b1,%2"
  99. : "=a"(prev)
  100. : "q"(new), "m"(*__xg(ptr)), "0"(old)
  101. : "memory");
  102. return prev;
  103. case 2:
  104. asm volatile("lock; cmpxchgw %w1,%2"
  105. : "=a"(prev)
  106. : "r"(new), "m"(*__xg(ptr)), "0"(old)
  107. : "memory");
  108. return prev;
  109. case 4:
  110. asm volatile("lock; cmpxchgl %1,%2"
  111. : "=a"(prev)
  112. : "r"(new), "m"(*__xg(ptr)), "0"(old)
  113. : "memory");
  114. return prev;
  115. }
  116. return old;
  117. }
  118. static inline unsigned long __cmpxchg_local(volatile void *ptr,
  119. unsigned long old,
  120. unsigned long new, int size)
  121. {
  122. unsigned long prev;
  123. switch (size) {
  124. case 1:
  125. asm volatile("cmpxchgb %b1,%2"
  126. : "=a"(prev)
  127. : "q"(new), "m"(*__xg(ptr)), "0"(old)
  128. : "memory");
  129. return prev;
  130. case 2:
  131. asm volatile("cmpxchgw %w1,%2"
  132. : "=a"(prev)
  133. : "r"(new), "m"(*__xg(ptr)), "0"(old)
  134. : "memory");
  135. return prev;
  136. case 4:
  137. asm volatile("cmpxchgl %k1,%2"
  138. : "=a"(prev)
  139. : "r"(new), "m"(*__xg(ptr)), "0"(old)
  140. : "memory");
  141. return prev;
  142. case 8:
  143. asm volatile("cmpxchgq %1,%2"
  144. : "=a"(prev)
  145. : "r"(new), "m"(*__xg(ptr)), "0"(old)
  146. : "memory");
  147. return prev;
  148. }
  149. return old;
  150. }
  151. #define cmpxchg(ptr, o, n) \
  152. ((__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(o), \
  153. (unsigned long)(n), sizeof(*(ptr))))
  154. #define cmpxchg64(ptr, o, n) \
  155. ({ \
  156. BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
  157. cmpxchg((ptr), (o), (n)); \
  158. })
  159. #define cmpxchg_local(ptr, o, n) \
  160. ((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o), \
  161. (unsigned long)(n), \
  162. sizeof(*(ptr))))
  163. #define sync_cmpxchg(ptr, o, n) \
  164. ((__typeof__(*(ptr)))__sync_cmpxchg((ptr), (unsigned long)(o), \
  165. (unsigned long)(n), \
  166. sizeof(*(ptr))))
  167. #define cmpxchg64_local(ptr, o, n) \
  168. ({ \
  169. BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
  170. cmpxchg_local((ptr), (o), (n)); \
  171. })
  172. #endif /* _ASM_X86_CMPXCHG_64_H */