bitops.h 8.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321
  1. #ifndef _ASM_X86_BITOPS_H
  2. #define _ASM_X86_BITOPS_H
  3. /*
  4. * Copyright 1992, Linus Torvalds.
  5. */
  6. #ifndef _LINUX_BITOPS_H
  7. #error only <linux/bitops.h> can be included directly
  8. #endif
  9. #include <linux/compiler.h>
  10. #include <asm/alternative.h>
  11. /*
  12. * These have to be done with inline assembly: that way the bit-setting
  13. * is guaranteed to be atomic. All bit operations return 0 if the bit
  14. * was cleared before the operation and != 0 if it was not.
  15. *
  16. * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
  17. */
  18. #if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 1)
  19. /* Technically wrong, but this avoids compilation errors on some gcc
  20. versions. */
  21. #define ADDR "=m" (*(volatile long *) addr)
  22. #else
  23. #define ADDR "+m" (*(volatile long *) addr)
  24. #endif
  25. /**
  26. * set_bit - Atomically set a bit in memory
  27. * @nr: the bit to set
  28. * @addr: the address to start counting from
  29. *
  30. * This function is atomic and may not be reordered. See __set_bit()
  31. * if you do not require the atomic guarantees.
  32. *
  33. * Note: there are no guarantees that this function will not be reordered
  34. * on non x86 architectures, so if you are writing portable code,
  35. * make sure not to rely on its reordering guarantees.
  36. *
  37. * Note that @nr may be almost arbitrarily large; this function is not
  38. * restricted to acting on a single-word quantity.
  39. */
  40. static inline void set_bit(int nr, volatile void *addr)
  41. {
  42. asm volatile(LOCK_PREFIX "bts %1,%0"
  43. : ADDR
  44. : "Ir" (nr) : "memory");
  45. }
  46. /**
  47. * __set_bit - Set a bit in memory
  48. * @nr: the bit to set
  49. * @addr: the address to start counting from
  50. *
  51. * Unlike set_bit(), this function is non-atomic and may be reordered.
  52. * If it's called on the same region of memory simultaneously, the effect
  53. * may be that only one operation succeeds.
  54. */
  55. static inline void __set_bit(int nr, volatile void *addr)
  56. {
  57. asm volatile("bts %1,%0"
  58. : ADDR
  59. : "Ir" (nr) : "memory");
  60. }
  61. /**
  62. * clear_bit - Clears a bit in memory
  63. * @nr: Bit to clear
  64. * @addr: Address to start counting from
  65. *
  66. * clear_bit() is atomic and may not be reordered. However, it does
  67. * not contain a memory barrier, so if it is used for locking purposes,
  68. * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
  69. * in order to ensure changes are visible on other processors.
  70. */
  71. static inline void clear_bit(int nr, volatile void *addr)
  72. {
  73. asm volatile(LOCK_PREFIX "btr %1,%0"
  74. : ADDR
  75. : "Ir" (nr));
  76. }
  77. /*
  78. * clear_bit_unlock - Clears a bit in memory
  79. * @nr: Bit to clear
  80. * @addr: Address to start counting from
  81. *
  82. * clear_bit() is atomic and implies release semantics before the memory
  83. * operation. It can be used for an unlock.
  84. */
  85. static inline void clear_bit_unlock(unsigned nr, volatile void *addr)
  86. {
  87. barrier();
  88. clear_bit(nr, addr);
  89. }
  90. static inline void __clear_bit(int nr, volatile void *addr)
  91. {
  92. asm volatile("btr %1,%0" : ADDR : "Ir" (nr));
  93. }
  94. /*
  95. * __clear_bit_unlock - Clears a bit in memory
  96. * @nr: Bit to clear
  97. * @addr: Address to start counting from
  98. *
  99. * __clear_bit() is non-atomic and implies release semantics before the memory
  100. * operation. It can be used for an unlock if no other CPUs can concurrently
  101. * modify other bits in the word.
  102. *
  103. * No memory barrier is required here, because x86 cannot reorder stores past
  104. * older loads. Same principle as spin_unlock.
  105. */
  106. static inline void __clear_bit_unlock(unsigned nr, volatile void *addr)
  107. {
  108. barrier();
  109. __clear_bit(nr, addr);
  110. }
  111. #define smp_mb__before_clear_bit() barrier()
  112. #define smp_mb__after_clear_bit() barrier()
  113. /**
  114. * __change_bit - Toggle a bit in memory
  115. * @nr: the bit to change
  116. * @addr: the address to start counting from
  117. *
  118. * Unlike change_bit(), this function is non-atomic and may be reordered.
  119. * If it's called on the same region of memory simultaneously, the effect
  120. * may be that only one operation succeeds.
  121. */
  122. static inline void __change_bit(int nr, volatile void *addr)
  123. {
  124. asm volatile("btc %1,%0" : ADDR : "Ir" (nr));
  125. }
  126. /**
  127. * change_bit - Toggle a bit in memory
  128. * @nr: Bit to change
  129. * @addr: Address to start counting from
  130. *
  131. * change_bit() is atomic and may not be reordered.
  132. * Note that @nr may be almost arbitrarily large; this function is not
  133. * restricted to acting on a single-word quantity.
  134. */
  135. static inline void change_bit(int nr, volatile void *addr)
  136. {
  137. asm volatile(LOCK_PREFIX "btc %1,%0"
  138. : ADDR : "Ir" (nr));
  139. }
  140. /**
  141. * test_and_set_bit - Set a bit and return its old value
  142. * @nr: Bit to set
  143. * @addr: Address to count from
  144. *
  145. * This operation is atomic and cannot be reordered.
  146. * It also implies a memory barrier.
  147. */
  148. static inline int test_and_set_bit(int nr, volatile void *addr)
  149. {
  150. int oldbit;
  151. asm volatile(LOCK_PREFIX "bts %2,%1\n\t"
  152. "sbb %0,%0"
  153. : "=r" (oldbit), ADDR
  154. : "Ir" (nr) : "memory");
  155. return oldbit;
  156. }
  157. /**
  158. * test_and_set_bit_lock - Set a bit and return its old value for lock
  159. * @nr: Bit to set
  160. * @addr: Address to count from
  161. *
  162. * This is the same as test_and_set_bit on x86.
  163. */
  164. static inline int test_and_set_bit_lock(int nr, volatile void *addr)
  165. {
  166. return test_and_set_bit(nr, addr);
  167. }
  168. /**
  169. * __test_and_set_bit - Set a bit and return its old value
  170. * @nr: Bit to set
  171. * @addr: Address to count from
  172. *
  173. * This operation is non-atomic and can be reordered.
  174. * If two examples of this operation race, one can appear to succeed
  175. * but actually fail. You must protect multiple accesses with a lock.
  176. */
  177. static inline int __test_and_set_bit(int nr, volatile void *addr)
  178. {
  179. int oldbit;
  180. asm("bts %2,%1\n\t"
  181. "sbb %0,%0"
  182. : "=r" (oldbit), ADDR
  183. : "Ir" (nr));
  184. return oldbit;
  185. }
  186. /**
  187. * test_and_clear_bit - Clear a bit and return its old value
  188. * @nr: Bit to clear
  189. * @addr: Address to count from
  190. *
  191. * This operation is atomic and cannot be reordered.
  192. * It also implies a memory barrier.
  193. */
  194. static inline int test_and_clear_bit(int nr, volatile void *addr)
  195. {
  196. int oldbit;
  197. asm volatile(LOCK_PREFIX "btr %2,%1\n\t"
  198. "sbb %0,%0"
  199. : "=r" (oldbit), ADDR
  200. : "Ir" (nr) : "memory");
  201. return oldbit;
  202. }
  203. /**
  204. * __test_and_clear_bit - Clear a bit and return its old value
  205. * @nr: Bit to clear
  206. * @addr: Address to count from
  207. *
  208. * This operation is non-atomic and can be reordered.
  209. * If two examples of this operation race, one can appear to succeed
  210. * but actually fail. You must protect multiple accesses with a lock.
  211. */
  212. static inline int __test_and_clear_bit(int nr, volatile void *addr)
  213. {
  214. int oldbit;
  215. asm volatile("btr %2,%1\n\t"
  216. "sbb %0,%0"
  217. : "=r" (oldbit), ADDR
  218. : "Ir" (nr));
  219. return oldbit;
  220. }
  221. /* WARNING: non atomic and it can be reordered! */
  222. static inline int __test_and_change_bit(int nr, volatile void *addr)
  223. {
  224. int oldbit;
  225. asm volatile("btc %2,%1\n\t"
  226. "sbb %0,%0"
  227. : "=r" (oldbit), ADDR
  228. : "Ir" (nr) : "memory");
  229. return oldbit;
  230. }
  231. /**
  232. * test_and_change_bit - Change a bit and return its old value
  233. * @nr: Bit to change
  234. * @addr: Address to count from
  235. *
  236. * This operation is atomic and cannot be reordered.
  237. * It also implies a memory barrier.
  238. */
  239. static inline int test_and_change_bit(int nr, volatile void *addr)
  240. {
  241. int oldbit;
  242. asm volatile(LOCK_PREFIX "btc %2,%1\n\t"
  243. "sbb %0,%0"
  244. : "=r" (oldbit), ADDR
  245. : "Ir" (nr) : "memory");
  246. return oldbit;
  247. }
  248. static inline int constant_test_bit(int nr, const volatile void *addr)
  249. {
  250. return ((1UL << (nr % BITS_PER_LONG)) &
  251. (((unsigned long *)addr)[nr / BITS_PER_LONG])) != 0;
  252. }
  253. static inline int variable_test_bit(int nr, volatile const void *addr)
  254. {
  255. int oldbit;
  256. asm volatile("bt %2,%1\n\t"
  257. "sbb %0,%0"
  258. : "=r" (oldbit)
  259. : "m" (*(unsigned long *)addr), "Ir" (nr));
  260. return oldbit;
  261. }
  262. #if 0 /* Fool kernel-doc since it doesn't do macros yet */
  263. /**
  264. * test_bit - Determine whether a bit is set
  265. * @nr: bit number to test
  266. * @addr: Address to start counting from
  267. */
  268. static int test_bit(int nr, const volatile unsigned long *addr);
  269. #endif
  270. #define test_bit(nr,addr) \
  271. (__builtin_constant_p(nr) ? \
  272. constant_test_bit((nr),(addr)) : \
  273. variable_test_bit((nr),(addr)))
  274. #undef ADDR
  275. #ifdef CONFIG_X86_32
  276. # include "bitops_32.h"
  277. #else
  278. # include "bitops_64.h"
  279. #endif
  280. #endif /* _ASM_X86_BITOPS_H */