bitops.h 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435
  1. #ifndef _ASM_X86_BITOPS_H
  2. #define _ASM_X86_BITOPS_H
  3. /*
  4. * Copyright 1992, Linus Torvalds.
  5. */
  6. #ifndef _LINUX_BITOPS_H
  7. #error only <linux/bitops.h> can be included directly
  8. #endif
  9. #include <linux/compiler.h>
  10. #include <asm/alternative.h>
  11. /*
  12. * These have to be done with inline assembly: that way the bit-setting
  13. * is guaranteed to be atomic. All bit operations return 0 if the bit
  14. * was cleared before the operation and != 0 if it was not.
  15. *
  16. * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
  17. */
  18. #if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 1)
  19. /* Technically wrong, but this avoids compilation errors on some gcc
  20. versions. */
  21. #define ADDR "=m" (*(volatile long *) addr)
  22. #else
  23. #define ADDR "+m" (*(volatile long *) addr)
  24. #endif
  25. /**
  26. * set_bit - Atomically set a bit in memory
  27. * @nr: the bit to set
  28. * @addr: the address to start counting from
  29. *
  30. * This function is atomic and may not be reordered. See __set_bit()
  31. * if you do not require the atomic guarantees.
  32. *
  33. * Note: there are no guarantees that this function will not be reordered
  34. * on non x86 architectures, so if you are writing portable code,
  35. * make sure not to rely on its reordering guarantees.
  36. *
  37. * Note that @nr may be almost arbitrarily large; this function is not
  38. * restricted to acting on a single-word quantity.
  39. */
  40. static inline void set_bit(int nr, volatile void *addr)
  41. {
  42. asm volatile(LOCK_PREFIX "bts %1,%0" : ADDR : "Ir" (nr) : "memory");
  43. }
  44. /**
  45. * __set_bit - Set a bit in memory
  46. * @nr: the bit to set
  47. * @addr: the address to start counting from
  48. *
  49. * Unlike set_bit(), this function is non-atomic and may be reordered.
  50. * If it's called on the same region of memory simultaneously, the effect
  51. * may be that only one operation succeeds.
  52. */
  53. static inline void __set_bit(int nr, volatile void *addr)
  54. {
  55. asm volatile("bts %1,%0" : ADDR : "Ir" (nr) : "memory");
  56. }
  57. /**
  58. * clear_bit - Clears a bit in memory
  59. * @nr: Bit to clear
  60. * @addr: Address to start counting from
  61. *
  62. * clear_bit() is atomic and may not be reordered. However, it does
  63. * not contain a memory barrier, so if it is used for locking purposes,
  64. * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
  65. * in order to ensure changes are visible on other processors.
  66. */
  67. static inline void clear_bit(int nr, volatile void *addr)
  68. {
  69. asm volatile(LOCK_PREFIX "btr %1,%0" : ADDR : "Ir" (nr));
  70. }
  71. /*
  72. * clear_bit_unlock - Clears a bit in memory
  73. * @nr: Bit to clear
  74. * @addr: Address to start counting from
  75. *
  76. * clear_bit() is atomic and implies release semantics before the memory
  77. * operation. It can be used for an unlock.
  78. */
  79. static inline void clear_bit_unlock(unsigned nr, volatile void *addr)
  80. {
  81. barrier();
  82. clear_bit(nr, addr);
  83. }
  84. static inline void __clear_bit(int nr, volatile void *addr)
  85. {
  86. asm volatile("btr %1,%0" : ADDR : "Ir" (nr));
  87. }
  88. /*
  89. * __clear_bit_unlock - Clears a bit in memory
  90. * @nr: Bit to clear
  91. * @addr: Address to start counting from
  92. *
  93. * __clear_bit() is non-atomic and implies release semantics before the memory
  94. * operation. It can be used for an unlock if no other CPUs can concurrently
  95. * modify other bits in the word.
  96. *
  97. * No memory barrier is required here, because x86 cannot reorder stores past
  98. * older loads. Same principle as spin_unlock.
  99. */
  100. static inline void __clear_bit_unlock(unsigned nr, volatile void *addr)
  101. {
  102. barrier();
  103. __clear_bit(nr, addr);
  104. }
  105. #define smp_mb__before_clear_bit() barrier()
  106. #define smp_mb__after_clear_bit() barrier()
  107. /**
  108. * __change_bit - Toggle a bit in memory
  109. * @nr: the bit to change
  110. * @addr: the address to start counting from
  111. *
  112. * Unlike change_bit(), this function is non-atomic and may be reordered.
  113. * If it's called on the same region of memory simultaneously, the effect
  114. * may be that only one operation succeeds.
  115. */
  116. static inline void __change_bit(int nr, volatile void *addr)
  117. {
  118. asm volatile("btc %1,%0" : ADDR : "Ir" (nr));
  119. }
  120. /**
  121. * change_bit - Toggle a bit in memory
  122. * @nr: Bit to change
  123. * @addr: Address to start counting from
  124. *
  125. * change_bit() is atomic and may not be reordered.
  126. * Note that @nr may be almost arbitrarily large; this function is not
  127. * restricted to acting on a single-word quantity.
  128. */
  129. static inline void change_bit(int nr, volatile void *addr)
  130. {
  131. asm volatile(LOCK_PREFIX "btc %1,%0" : ADDR : "Ir" (nr));
  132. }
  133. /**
  134. * test_and_set_bit - Set a bit and return its old value
  135. * @nr: Bit to set
  136. * @addr: Address to count from
  137. *
  138. * This operation is atomic and cannot be reordered.
  139. * It also implies a memory barrier.
  140. */
  141. static inline int test_and_set_bit(int nr, volatile void *addr)
  142. {
  143. int oldbit;
  144. asm volatile(LOCK_PREFIX "bts %2,%1\n\t"
  145. "sbb %0,%0" : "=r" (oldbit), ADDR : "Ir" (nr) : "memory");
  146. return oldbit;
  147. }
  148. /**
  149. * test_and_set_bit_lock - Set a bit and return its old value for lock
  150. * @nr: Bit to set
  151. * @addr: Address to count from
  152. *
  153. * This is the same as test_and_set_bit on x86.
  154. */
  155. static inline int test_and_set_bit_lock(int nr, volatile void *addr)
  156. {
  157. return test_and_set_bit(nr, addr);
  158. }
  159. /**
  160. * __test_and_set_bit - Set a bit and return its old value
  161. * @nr: Bit to set
  162. * @addr: Address to count from
  163. *
  164. * This operation is non-atomic and can be reordered.
  165. * If two examples of this operation race, one can appear to succeed
  166. * but actually fail. You must protect multiple accesses with a lock.
  167. */
  168. static inline int __test_and_set_bit(int nr, volatile void *addr)
  169. {
  170. int oldbit;
  171. asm("bts %2,%1\n\t"
  172. "sbb %0,%0"
  173. : "=r" (oldbit), ADDR
  174. : "Ir" (nr));
  175. return oldbit;
  176. }
  177. /**
  178. * test_and_clear_bit - Clear a bit and return its old value
  179. * @nr: Bit to clear
  180. * @addr: Address to count from
  181. *
  182. * This operation is atomic and cannot be reordered.
  183. * It also implies a memory barrier.
  184. */
  185. static inline int test_and_clear_bit(int nr, volatile void *addr)
  186. {
  187. int oldbit;
  188. asm volatile(LOCK_PREFIX "btr %2,%1\n\t"
  189. "sbb %0,%0"
  190. : "=r" (oldbit), ADDR : "Ir" (nr) : "memory");
  191. return oldbit;
  192. }
  193. /**
  194. * __test_and_clear_bit - Clear a bit and return its old value
  195. * @nr: Bit to clear
  196. * @addr: Address to count from
  197. *
  198. * This operation is non-atomic and can be reordered.
  199. * If two examples of this operation race, one can appear to succeed
  200. * but actually fail. You must protect multiple accesses with a lock.
  201. */
  202. static inline int __test_and_clear_bit(int nr, volatile void *addr)
  203. {
  204. int oldbit;
  205. asm volatile("btr %2,%1\n\t"
  206. "sbb %0,%0"
  207. : "=r" (oldbit), ADDR
  208. : "Ir" (nr));
  209. return oldbit;
  210. }
  211. /* WARNING: non atomic and it can be reordered! */
  212. static inline int __test_and_change_bit(int nr, volatile void *addr)
  213. {
  214. int oldbit;
  215. asm volatile("btc %2,%1\n\t"
  216. "sbb %0,%0"
  217. : "=r" (oldbit), ADDR
  218. : "Ir" (nr) : "memory");
  219. return oldbit;
  220. }
  221. /**
  222. * test_and_change_bit - Change a bit and return its old value
  223. * @nr: Bit to change
  224. * @addr: Address to count from
  225. *
  226. * This operation is atomic and cannot be reordered.
  227. * It also implies a memory barrier.
  228. */
  229. static inline int test_and_change_bit(int nr, volatile void *addr)
  230. {
  231. int oldbit;
  232. asm volatile(LOCK_PREFIX "btc %2,%1\n\t"
  233. "sbb %0,%0"
  234. : "=r" (oldbit), ADDR : "Ir" (nr) : "memory");
  235. return oldbit;
  236. }
  237. static inline int constant_test_bit(int nr, const volatile void *addr)
  238. {
  239. return ((1UL << (nr % BITS_PER_LONG)) &
  240. (((unsigned long *)addr)[nr / BITS_PER_LONG])) != 0;
  241. }
  242. static inline int variable_test_bit(int nr, volatile const void *addr)
  243. {
  244. int oldbit;
  245. asm volatile("bt %2,%1\n\t"
  246. "sbb %0,%0"
  247. : "=r" (oldbit)
  248. : "m" (*(unsigned long *)addr), "Ir" (nr));
  249. return oldbit;
  250. }
  251. #if 0 /* Fool kernel-doc since it doesn't do macros yet */
  252. /**
  253. * test_bit - Determine whether a bit is set
  254. * @nr: bit number to test
  255. * @addr: Address to start counting from
  256. */
  257. static int test_bit(int nr, const volatile unsigned long *addr);
  258. #endif
  259. #define test_bit(nr, addr) \
  260. (__builtin_constant_p((nr)) \
  261. ? constant_test_bit((nr), (addr)) \
  262. : variable_test_bit((nr), (addr)))
  263. /**
  264. * __ffs - find first set bit in word
  265. * @word: The word to search
  266. *
  267. * Undefined if no bit exists, so code should check against 0 first.
  268. */
  269. static inline unsigned long __ffs(unsigned long word)
  270. {
  271. asm("bsf %1,%0"
  272. : "=r" (word)
  273. : "rm" (word));
  274. return word;
  275. }
  276. /**
  277. * ffz - find first zero bit in word
  278. * @word: The word to search
  279. *
  280. * Undefined if no zero exists, so code should check against ~0UL first.
  281. */
  282. static inline unsigned long ffz(unsigned long word)
  283. {
  284. asm("bsf %1,%0"
  285. : "=r" (word)
  286. : "r" (~word));
  287. return word;
  288. }
  289. /*
  290. * __fls: find last set bit in word
  291. * @word: The word to search
  292. *
  293. * Undefined if no zero exists, so code should check against ~0UL first.
  294. */
  295. static inline unsigned long __fls(unsigned long word)
  296. {
  297. asm("bsr %1,%0"
  298. : "=r" (word)
  299. : "rm" (word));
  300. return word;
  301. }
  302. #ifdef __KERNEL__
  303. /**
  304. * ffs - find first set bit in word
  305. * @x: the word to search
  306. *
  307. * This is defined the same way as the libc and compiler builtin ffs
  308. * routines, therefore differs in spirit from the other bitops.
  309. *
  310. * ffs(value) returns 0 if value is 0 or the position of the first
  311. * set bit if value is nonzero. The first (least significant) bit
  312. * is at position 1.
  313. */
  314. static inline int ffs(int x)
  315. {
  316. int r;
  317. #ifdef CONFIG_X86_CMOV
  318. asm("bsfl %1,%0\n\t"
  319. "cmovzl %2,%0"
  320. : "=r" (r) : "rm" (x), "r" (-1));
  321. #else
  322. asm("bsfl %1,%0\n\t"
  323. "jnz 1f\n\t"
  324. "movl $-1,%0\n"
  325. "1:" : "=r" (r) : "rm" (x));
  326. #endif
  327. return r + 1;
  328. }
  329. /**
  330. * fls - find last set bit in word
  331. * @x: the word to search
  332. *
  333. * This is defined in a similar way as the libc and compiler builtin
  334. * ffs, but returns the position of the most significant set bit.
  335. *
  336. * fls(value) returns 0 if value is 0 or the position of the last
  337. * set bit if value is nonzero. The last (most significant) bit is
  338. * at position 32.
  339. */
  340. static inline int fls(int x)
  341. {
  342. int r;
  343. #ifdef CONFIG_X86_CMOV
  344. asm("bsrl %1,%0\n\t"
  345. "cmovzl %2,%0"
  346. : "=&r" (r) : "rm" (x), "rm" (-1));
  347. #else
  348. asm("bsrl %1,%0\n\t"
  349. "jnz 1f\n\t"
  350. "movl $-1,%0\n"
  351. "1:" : "=r" (r) : "rm" (x));
  352. #endif
  353. return r + 1;
  354. }
  355. #endif /* __KERNEL__ */
  356. #undef ADDR
  357. static inline void set_bit_string(unsigned long *bitmap,
  358. unsigned long i, int len)
  359. {
  360. unsigned long end = i + len;
  361. while (i < end) {
  362. __set_bit(i, bitmap);
  363. i++;
  364. }
  365. }
  366. #ifdef __KERNEL__
  367. #include <asm-generic/bitops/sched.h>
  368. #define ARCH_HAS_FAST_MULTIPLIER 1
  369. #include <asm-generic/bitops/hweight.h>
  370. #endif /* __KERNEL__ */
  371. #include <asm-generic/bitops/fls64.h>
  372. #ifdef __KERNEL__
  373. #include <asm-generic/bitops/ext2-non-atomic.h>
  374. #define ext2_set_bit_atomic(lock, nr, addr) \
  375. test_and_set_bit((nr), (unsigned long *)(addr))
  376. #define ext2_clear_bit_atomic(lock, nr, addr) \
  377. test_and_clear_bit((nr), (unsigned long *)(addr))
  378. #include <asm-generic/bitops/minix.h>
  379. #endif /* __KERNEL__ */
  380. #endif /* _ASM_X86_BITOPS_H */