bitops.h 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438
  1. #ifndef _ASM_X86_BITOPS_H
  2. #define _ASM_X86_BITOPS_H
  3. /*
  4. * Copyright 1992, Linus Torvalds.
  5. */
  6. #ifndef _LINUX_BITOPS_H
  7. #error only <linux/bitops.h> can be included directly
  8. #endif
  9. #include <linux/compiler.h>
  10. #include <asm/alternative.h>
  11. /*
  12. * These have to be done with inline assembly: that way the bit-setting
  13. * is guaranteed to be atomic. All bit operations return 0 if the bit
  14. * was cleared before the operation and != 0 if it was not.
  15. *
  16. * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
  17. */
  18. #if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 1)
  19. /* Technically wrong, but this avoids compilation errors on some gcc
  20. versions. */
  21. #define ADDR "=m" (*(volatile long *)addr)
  22. #define BIT_ADDR "=m" (((volatile int *)addr)[nr >> 5])
  23. #else
  24. #define ADDR "+m" (*(volatile long *) addr)
  25. #define BIT_ADDR "+m" (((volatile int *)addr)[nr >> 5])
  26. #endif
  27. #define BASE_ADDR "m" (*(volatile int *)addr)
  28. /**
  29. * set_bit - Atomically set a bit in memory
  30. * @nr: the bit to set
  31. * @addr: the address to start counting from
  32. *
  33. * This function is atomic and may not be reordered. See __set_bit()
  34. * if you do not require the atomic guarantees.
  35. *
  36. * Note: there are no guarantees that this function will not be reordered
  37. * on non x86 architectures, so if you are writing portable code,
  38. * make sure not to rely on its reordering guarantees.
  39. *
  40. * Note that @nr may be almost arbitrarily large; this function is not
  41. * restricted to acting on a single-word quantity.
  42. */
  43. static inline void set_bit(int nr, volatile void *addr)
  44. {
  45. asm volatile(LOCK_PREFIX "bts %1,%0" : ADDR : "Ir" (nr) : "memory");
  46. }
  47. /**
  48. * __set_bit - Set a bit in memory
  49. * @nr: the bit to set
  50. * @addr: the address to start counting from
  51. *
  52. * Unlike set_bit(), this function is non-atomic and may be reordered.
  53. * If it's called on the same region of memory simultaneously, the effect
  54. * may be that only one operation succeeds.
  55. */
  56. static inline void __set_bit(int nr, volatile void *addr)
  57. {
  58. asm volatile("bts %1,%0" : ADDR : "Ir" (nr) : "memory");
  59. }
  60. /**
  61. * clear_bit - Clears a bit in memory
  62. * @nr: Bit to clear
  63. * @addr: Address to start counting from
  64. *
  65. * clear_bit() is atomic and may not be reordered. However, it does
  66. * not contain a memory barrier, so if it is used for locking purposes,
  67. * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
  68. * in order to ensure changes are visible on other processors.
  69. */
  70. static inline void clear_bit(int nr, volatile void *addr)
  71. {
  72. asm volatile(LOCK_PREFIX "btr %1,%2" : BIT_ADDR : "Ir" (nr), BASE_ADDR);
  73. }
  74. /*
  75. * clear_bit_unlock - Clears a bit in memory
  76. * @nr: Bit to clear
  77. * @addr: Address to start counting from
  78. *
  79. * clear_bit() is atomic and implies release semantics before the memory
  80. * operation. It can be used for an unlock.
  81. */
  82. static inline void clear_bit_unlock(unsigned nr, volatile void *addr)
  83. {
  84. barrier();
  85. clear_bit(nr, addr);
  86. }
  87. static inline void __clear_bit(int nr, volatile void *addr)
  88. {
  89. asm volatile("btr %1,%2" : BIT_ADDR : "Ir" (nr), BASE_ADDR);
  90. }
  91. /*
  92. * __clear_bit_unlock - Clears a bit in memory
  93. * @nr: Bit to clear
  94. * @addr: Address to start counting from
  95. *
  96. * __clear_bit() is non-atomic and implies release semantics before the memory
  97. * operation. It can be used for an unlock if no other CPUs can concurrently
  98. * modify other bits in the word.
  99. *
  100. * No memory barrier is required here, because x86 cannot reorder stores past
  101. * older loads. Same principle as spin_unlock.
  102. */
  103. static inline void __clear_bit_unlock(unsigned nr, volatile void *addr)
  104. {
  105. barrier();
  106. __clear_bit(nr, addr);
  107. }
  108. #define smp_mb__before_clear_bit() barrier()
  109. #define smp_mb__after_clear_bit() barrier()
  110. /**
  111. * __change_bit - Toggle a bit in memory
  112. * @nr: the bit to change
  113. * @addr: the address to start counting from
  114. *
  115. * Unlike change_bit(), this function is non-atomic and may be reordered.
  116. * If it's called on the same region of memory simultaneously, the effect
  117. * may be that only one operation succeeds.
  118. */
  119. static inline void __change_bit(int nr, volatile void *addr)
  120. {
  121. asm volatile("btc %1,%2" : BIT_ADDR : "Ir" (nr), BASE_ADDR);
  122. }
  123. /**
  124. * change_bit - Toggle a bit in memory
  125. * @nr: Bit to change
  126. * @addr: Address to start counting from
  127. *
  128. * change_bit() is atomic and may not be reordered.
  129. * Note that @nr may be almost arbitrarily large; this function is not
  130. * restricted to acting on a single-word quantity.
  131. */
  132. static inline void change_bit(int nr, volatile void *addr)
  133. {
  134. asm volatile(LOCK_PREFIX "btc %1,%2" : BIT_ADDR : "Ir" (nr), BASE_ADDR);
  135. }
  136. /**
  137. * test_and_set_bit - Set a bit and return its old value
  138. * @nr: Bit to set
  139. * @addr: Address to count from
  140. *
  141. * This operation is atomic and cannot be reordered.
  142. * It also implies a memory barrier.
  143. */
  144. static inline int test_and_set_bit(int nr, volatile void *addr)
  145. {
  146. int oldbit;
  147. asm volatile(LOCK_PREFIX "bts %2,%1\n\t"
  148. "sbb %0,%0" : "=r" (oldbit), ADDR : "Ir" (nr) : "memory");
  149. return oldbit;
  150. }
  151. /**
  152. * test_and_set_bit_lock - Set a bit and return its old value for lock
  153. * @nr: Bit to set
  154. * @addr: Address to count from
  155. *
  156. * This is the same as test_and_set_bit on x86.
  157. */
  158. static inline int test_and_set_bit_lock(int nr, volatile void *addr)
  159. {
  160. return test_and_set_bit(nr, addr);
  161. }
  162. /**
  163. * __test_and_set_bit - Set a bit and return its old value
  164. * @nr: Bit to set
  165. * @addr: Address to count from
  166. *
  167. * This operation is non-atomic and can be reordered.
  168. * If two examples of this operation race, one can appear to succeed
  169. * but actually fail. You must protect multiple accesses with a lock.
  170. */
  171. static inline int __test_and_set_bit(int nr, volatile void *addr)
  172. {
  173. int oldbit;
  174. asm volatile("bts %2,%3\n\t"
  175. "sbb %0,%0"
  176. : "=r" (oldbit), BIT_ADDR : "Ir" (nr), BASE_ADDR);
  177. return oldbit;
  178. }
  179. /**
  180. * test_and_clear_bit - Clear a bit and return its old value
  181. * @nr: Bit to clear
  182. * @addr: Address to count from
  183. *
  184. * This operation is atomic and cannot be reordered.
  185. * It also implies a memory barrier.
  186. */
  187. static inline int test_and_clear_bit(int nr, volatile void *addr)
  188. {
  189. int oldbit;
  190. asm volatile(LOCK_PREFIX "btr %2,%1\n\t"
  191. "sbb %0,%0"
  192. : "=r" (oldbit), ADDR : "Ir" (nr) : "memory");
  193. return oldbit;
  194. }
  195. /**
  196. * __test_and_clear_bit - Clear a bit and return its old value
  197. * @nr: Bit to clear
  198. * @addr: Address to count from
  199. *
  200. * This operation is non-atomic and can be reordered.
  201. * If two examples of this operation race, one can appear to succeed
  202. * but actually fail. You must protect multiple accesses with a lock.
  203. */
  204. static inline int __test_and_clear_bit(int nr, volatile void *addr)
  205. {
  206. int oldbit;
  207. asm volatile("btr %2,%3\n\t"
  208. "sbb %0,%0"
  209. : "=r" (oldbit), BIT_ADDR : "Ir" (nr), BASE_ADDR);
  210. return oldbit;
  211. }
  212. /* WARNING: non atomic and it can be reordered! */
  213. static inline int __test_and_change_bit(int nr, volatile void *addr)
  214. {
  215. int oldbit;
  216. asm volatile("btc %2,%3\n\t"
  217. "sbb %0,%0"
  218. : "=r" (oldbit), BIT_ADDR : "Ir" (nr), BASE_ADDR);
  219. return oldbit;
  220. }
  221. /**
  222. * test_and_change_bit - Change a bit and return its old value
  223. * @nr: Bit to change
  224. * @addr: Address to count from
  225. *
  226. * This operation is atomic and cannot be reordered.
  227. * It also implies a memory barrier.
  228. */
  229. static inline int test_and_change_bit(int nr, volatile void *addr)
  230. {
  231. int oldbit;
  232. asm volatile(LOCK_PREFIX "btc %2,%1\n\t"
  233. "sbb %0,%0"
  234. : "=r" (oldbit), ADDR : "Ir" (nr) : "memory");
  235. return oldbit;
  236. }
  237. static inline int constant_test_bit(int nr, const volatile void *addr)
  238. {
  239. return ((1UL << (nr % BITS_PER_LONG)) &
  240. (((unsigned long *)addr)[nr / BITS_PER_LONG])) != 0;
  241. }
  242. static inline int variable_test_bit(int nr, volatile const void *addr)
  243. {
  244. int oldbit;
  245. asm volatile("bt %2,%3\n\t"
  246. "sbb %0,%0"
  247. : "=r" (oldbit)
  248. : "m" (((volatile const int *)addr)[nr >> 5]),
  249. "Ir" (nr), BASE_ADDR);
  250. return oldbit;
  251. }
  252. #if 0 /* Fool kernel-doc since it doesn't do macros yet */
  253. /**
  254. * test_bit - Determine whether a bit is set
  255. * @nr: bit number to test
  256. * @addr: Address to start counting from
  257. */
  258. static int test_bit(int nr, const volatile unsigned long *addr);
  259. #endif
  260. #define test_bit(nr, addr) \
  261. (__builtin_constant_p((nr)) \
  262. ? constant_test_bit((nr), (addr)) \
  263. : variable_test_bit((nr), (addr)))
  264. /**
  265. * __ffs - find first set bit in word
  266. * @word: The word to search
  267. *
  268. * Undefined if no bit exists, so code should check against 0 first.
  269. */
  270. static inline unsigned long __ffs(unsigned long word)
  271. {
  272. asm("bsf %1,%0"
  273. : "=r" (word)
  274. : "rm" (word));
  275. return word;
  276. }
  277. /**
  278. * ffz - find first zero bit in word
  279. * @word: The word to search
  280. *
  281. * Undefined if no zero exists, so code should check against ~0UL first.
  282. */
  283. static inline unsigned long ffz(unsigned long word)
  284. {
  285. asm("bsf %1,%0"
  286. : "=r" (word)
  287. : "r" (~word));
  288. return word;
  289. }
  290. /*
  291. * __fls: find last set bit in word
  292. * @word: The word to search
  293. *
  294. * Undefined if no zero exists, so code should check against ~0UL first.
  295. */
  296. static inline unsigned long __fls(unsigned long word)
  297. {
  298. asm("bsr %1,%0"
  299. : "=r" (word)
  300. : "rm" (word));
  301. return word;
  302. }
  303. #ifdef __KERNEL__
  304. /**
  305. * ffs - find first set bit in word
  306. * @x: the word to search
  307. *
  308. * This is defined the same way as the libc and compiler builtin ffs
  309. * routines, therefore differs in spirit from the other bitops.
  310. *
  311. * ffs(value) returns 0 if value is 0 or the position of the first
  312. * set bit if value is nonzero. The first (least significant) bit
  313. * is at position 1.
  314. */
  315. static inline int ffs(int x)
  316. {
  317. int r;
  318. #ifdef CONFIG_X86_CMOV
  319. asm("bsfl %1,%0\n\t"
  320. "cmovzl %2,%0"
  321. : "=r" (r) : "rm" (x), "r" (-1));
  322. #else
  323. asm("bsfl %1,%0\n\t"
  324. "jnz 1f\n\t"
  325. "movl $-1,%0\n"
  326. "1:" : "=r" (r) : "rm" (x));
  327. #endif
  328. return r + 1;
  329. }
  330. /**
  331. * fls - find last set bit in word
  332. * @x: the word to search
  333. *
  334. * This is defined in a similar way as the libc and compiler builtin
  335. * ffs, but returns the position of the most significant set bit.
  336. *
  337. * fls(value) returns 0 if value is 0 or the position of the last
  338. * set bit if value is nonzero. The last (most significant) bit is
  339. * at position 32.
  340. */
  341. static inline int fls(int x)
  342. {
  343. int r;
  344. #ifdef CONFIG_X86_CMOV
  345. asm("bsrl %1,%0\n\t"
  346. "cmovzl %2,%0"
  347. : "=&r" (r) : "rm" (x), "rm" (-1));
  348. #else
  349. asm("bsrl %1,%0\n\t"
  350. "jnz 1f\n\t"
  351. "movl $-1,%0\n"
  352. "1:" : "=r" (r) : "rm" (x));
  353. #endif
  354. return r + 1;
  355. }
  356. #endif /* __KERNEL__ */
  357. #undef BASE_ADDR
  358. #undef BIT_ADDR
  359. #undef ADDR
  360. static inline void set_bit_string(unsigned long *bitmap,
  361. unsigned long i, int len)
  362. {
  363. unsigned long end = i + len;
  364. while (i < end) {
  365. __set_bit(i, bitmap);
  366. i++;
  367. }
  368. }
  369. #ifdef __KERNEL__
  370. #include <asm-generic/bitops/sched.h>
  371. #define ARCH_HAS_FAST_MULTIPLIER 1
  372. #include <asm-generic/bitops/hweight.h>
  373. #endif /* __KERNEL__ */
  374. #include <asm-generic/bitops/fls64.h>
  375. #ifdef __KERNEL__
  376. #include <asm-generic/bitops/ext2-non-atomic.h>
  377. #define ext2_set_bit_atomic(lock, nr, addr) \
  378. test_and_set_bit((nr), (unsigned long *)(addr))
  379. #define ext2_clear_bit_atomic(lock, nr, addr) \
  380. test_and_clear_bit((nr), (unsigned long *)(addr))
  381. #include <asm-generic/bitops/minix.h>
  382. #endif /* __KERNEL__ */
  383. #endif /* _ASM_X86_BITOPS_H */