bitops.h 8.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315
  1. #ifndef _ASM_X86_BITOPS_H
  2. #define _ASM_X86_BITOPS_H
  3. /*
  4. * Copyright 1992, Linus Torvalds.
  5. */
  6. #ifndef _LINUX_BITOPS_H
  7. #error only <linux/bitops.h> can be included directly
  8. #endif
  9. #include <linux/compiler.h>
  10. #include <asm/alternative.h>
  11. /*
  12. * These have to be done with inline assembly: that way the bit-setting
  13. * is guaranteed to be atomic. All bit operations return 0 if the bit
  14. * was cleared before the operation and != 0 if it was not.
  15. *
  16. * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
  17. */
  18. #if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 1)
  19. /* Technically wrong, but this avoids compilation errors on some gcc
  20. versions. */
  21. #define ADDR "=m" (*(volatile long *)addr)
  22. #define BIT_ADDR "=m" (((volatile int *)addr)[nr >> 5])
  23. #else
  24. #define ADDR "+m" (*(volatile long *) addr)
  25. #define BIT_ADDR "+m" (((volatile int *)addr)[nr >> 5])
  26. #endif
  27. #define BASE_ADDR "m" (*(volatile int *)addr)
  28. /**
  29. * set_bit - Atomically set a bit in memory
  30. * @nr: the bit to set
  31. * @addr: the address to start counting from
  32. *
  33. * This function is atomic and may not be reordered. See __set_bit()
  34. * if you do not require the atomic guarantees.
  35. *
  36. * Note: there are no guarantees that this function will not be reordered
  37. * on non x86 architectures, so if you are writing portable code,
  38. * make sure not to rely on its reordering guarantees.
  39. *
  40. * Note that @nr may be almost arbitrarily large; this function is not
  41. * restricted to acting on a single-word quantity.
  42. */
  43. static inline void set_bit(int nr, volatile void *addr)
  44. {
  45. asm volatile(LOCK_PREFIX "bts %1,%0" : ADDR : "Ir" (nr) : "memory");
  46. }
  47. /**
  48. * __set_bit - Set a bit in memory
  49. * @nr: the bit to set
  50. * @addr: the address to start counting from
  51. *
  52. * Unlike set_bit(), this function is non-atomic and may be reordered.
  53. * If it's called on the same region of memory simultaneously, the effect
  54. * may be that only one operation succeeds.
  55. */
  56. static inline void __set_bit(int nr, volatile void *addr)
  57. {
  58. asm volatile("bts %1,%0"
  59. : ADDR
  60. : "Ir" (nr) : "memory");
  61. }
  62. /**
  63. * clear_bit - Clears a bit in memory
  64. * @nr: Bit to clear
  65. * @addr: Address to start counting from
  66. *
  67. * clear_bit() is atomic and may not be reordered. However, it does
  68. * not contain a memory barrier, so if it is used for locking purposes,
  69. * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
  70. * in order to ensure changes are visible on other processors.
  71. */
  72. static inline void clear_bit(int nr, volatile void *addr)
  73. {
  74. asm volatile(LOCK_PREFIX "btr %1,%2" : BIT_ADDR : "Ir" (nr), BASE_ADDR);
  75. }
  76. /*
  77. * clear_bit_unlock - Clears a bit in memory
  78. * @nr: Bit to clear
  79. * @addr: Address to start counting from
  80. *
  81. * clear_bit() is atomic and implies release semantics before the memory
  82. * operation. It can be used for an unlock.
  83. */
  84. static inline void clear_bit_unlock(unsigned nr, volatile void *addr)
  85. {
  86. barrier();
  87. clear_bit(nr, addr);
  88. }
  89. static inline void __clear_bit(int nr, volatile void *addr)
  90. {
  91. asm volatile("btr %1,%2" : BIT_ADDR : "Ir" (nr), BASE_ADDR);
  92. }
  93. /*
  94. * __clear_bit_unlock - Clears a bit in memory
  95. * @nr: Bit to clear
  96. * @addr: Address to start counting from
  97. *
  98. * __clear_bit() is non-atomic and implies release semantics before the memory
  99. * operation. It can be used for an unlock if no other CPUs can concurrently
  100. * modify other bits in the word.
  101. *
  102. * No memory barrier is required here, because x86 cannot reorder stores past
  103. * older loads. Same principle as spin_unlock.
  104. */
  105. static inline void __clear_bit_unlock(unsigned nr, volatile void *addr)
  106. {
  107. barrier();
  108. __clear_bit(nr, addr);
  109. }
  110. #define smp_mb__before_clear_bit() barrier()
  111. #define smp_mb__after_clear_bit() barrier()
  112. /**
  113. * __change_bit - Toggle a bit in memory
  114. * @nr: the bit to change
  115. * @addr: the address to start counting from
  116. *
  117. * Unlike change_bit(), this function is non-atomic and may be reordered.
  118. * If it's called on the same region of memory simultaneously, the effect
  119. * may be that only one operation succeeds.
  120. */
  121. static inline void __change_bit(int nr, volatile void *addr)
  122. {
  123. asm volatile("btc %1,%2" : BIT_ADDR : "Ir" (nr), BASE_ADDR);
  124. }
  125. /**
  126. * change_bit - Toggle a bit in memory
  127. * @nr: Bit to change
  128. * @addr: Address to start counting from
  129. *
  130. * change_bit() is atomic and may not be reordered.
  131. * Note that @nr may be almost arbitrarily large; this function is not
  132. * restricted to acting on a single-word quantity.
  133. */
  134. static inline void change_bit(int nr, volatile void *addr)
  135. {
  136. asm volatile(LOCK_PREFIX "btc %1,%2" : BIT_ADDR : "Ir" (nr), BASE_ADDR);
  137. }
  138. /**
  139. * test_and_set_bit - Set a bit and return its old value
  140. * @nr: Bit to set
  141. * @addr: Address to count from
  142. *
  143. * This operation is atomic and cannot be reordered.
  144. * It also implies a memory barrier.
  145. */
  146. static inline int test_and_set_bit(int nr, volatile void *addr)
  147. {
  148. int oldbit;
  149. asm volatile(LOCK_PREFIX "bts %2,%1\n\t"
  150. "sbb %0,%0" : "=r" (oldbit), ADDR : "Ir" (nr) : "memory");
  151. return oldbit;
  152. }
  153. /**
  154. * test_and_set_bit_lock - Set a bit and return its old value for lock
  155. * @nr: Bit to set
  156. * @addr: Address to count from
  157. *
  158. * This is the same as test_and_set_bit on x86.
  159. */
  160. static inline int test_and_set_bit_lock(int nr, volatile void *addr)
  161. {
  162. return test_and_set_bit(nr, addr);
  163. }
  164. /**
  165. * __test_and_set_bit - Set a bit and return its old value
  166. * @nr: Bit to set
  167. * @addr: Address to count from
  168. *
  169. * This operation is non-atomic and can be reordered.
  170. * If two examples of this operation race, one can appear to succeed
  171. * but actually fail. You must protect multiple accesses with a lock.
  172. */
  173. static inline int __test_and_set_bit(int nr, volatile void *addr)
  174. {
  175. int oldbit;
  176. asm volatile("bts %2,%3\n\t"
  177. "sbb %0,%0"
  178. : "=r" (oldbit), BIT_ADDR : "Ir" (nr), BASE_ADDR);
  179. return oldbit;
  180. }
  181. /**
  182. * test_and_clear_bit - Clear a bit and return its old value
  183. * @nr: Bit to clear
  184. * @addr: Address to count from
  185. *
  186. * This operation is atomic and cannot be reordered.
  187. * It also implies a memory barrier.
  188. */
  189. static inline int test_and_clear_bit(int nr, volatile void *addr)
  190. {
  191. int oldbit;
  192. asm volatile(LOCK_PREFIX "btr %2,%1\n\t"
  193. "sbb %0,%0"
  194. : "=r" (oldbit), ADDR : "Ir" (nr) : "memory");
  195. return oldbit;
  196. }
  197. /**
  198. * __test_and_clear_bit - Clear a bit and return its old value
  199. * @nr: Bit to clear
  200. * @addr: Address to count from
  201. *
  202. * This operation is non-atomic and can be reordered.
  203. * If two examples of this operation race, one can appear to succeed
  204. * but actually fail. You must protect multiple accesses with a lock.
  205. */
  206. static inline int __test_and_clear_bit(int nr, volatile void *addr)
  207. {
  208. int oldbit;
  209. asm volatile("btr %2,%3\n\t"
  210. "sbb %0,%0"
  211. : "=r" (oldbit), BIT_ADDR : "Ir" (nr), BASE_ADDR);
  212. return oldbit;
  213. }
  214. /* WARNING: non atomic and it can be reordered! */
  215. static inline int __test_and_change_bit(int nr, volatile void *addr)
  216. {
  217. int oldbit;
  218. asm volatile("btc %2,%3\n\t"
  219. "sbb %0,%0"
  220. : "=r" (oldbit), BIT_ADDR : "Ir" (nr), BASE_ADDR);
  221. return oldbit;
  222. }
  223. /**
  224. * test_and_change_bit - Change a bit and return its old value
  225. * @nr: Bit to change
  226. * @addr: Address to count from
  227. *
  228. * This operation is atomic and cannot be reordered.
  229. * It also implies a memory barrier.
  230. */
  231. static inline int test_and_change_bit(int nr, volatile void *addr)
  232. {
  233. int oldbit;
  234. asm volatile(LOCK_PREFIX "btc %2,%1\n\t"
  235. "sbb %0,%0"
  236. : "=r" (oldbit), ADDR : "Ir" (nr) : "memory");
  237. return oldbit;
  238. }
  239. static inline int constant_test_bit(int nr, const volatile void *addr)
  240. {
  241. return ((1UL << (nr % BITS_PER_LONG)) &
  242. (((unsigned long *)addr)[nr / BITS_PER_LONG])) != 0;
  243. }
  244. static inline int variable_test_bit(int nr, volatile const void *addr)
  245. {
  246. int oldbit;
  247. asm volatile("bt %2,%3\n\t"
  248. "sbb %0,%0"
  249. : "=r" (oldbit)
  250. : "m" (((volatile const int *)addr)[nr >> 5]),
  251. "Ir" (nr), BASE_ADDR);
  252. return oldbit;
  253. }
  254. #if 0 /* Fool kernel-doc since it doesn't do macros yet */
  255. /**
  256. * test_bit - Determine whether a bit is set
  257. * @nr: bit number to test
  258. * @addr: Address to start counting from
  259. */
  260. static int test_bit(int nr, const volatile unsigned long *addr);
  261. #endif
  262. #define test_bit(nr,addr) \
  263. (__builtin_constant_p(nr) ? \
  264. constant_test_bit((nr),(addr)) : \
  265. variable_test_bit((nr),(addr)))
  266. #undef BASE_ADDR
  267. #undef BIT_ADDR
  268. #undef ADDR
  269. #ifdef CONFIG_X86_32
  270. # include "bitops_32.h"
  271. #else
  272. # include "bitops_64.h"
  273. #endif
  274. #endif /* _ASM_X86_BITOPS_H */