bitops.h 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415
  1. #ifndef _X86_64_BITOPS_H
  2. #define _X86_64_BITOPS_H
  3. /*
  4. * Copyright 1992, Linus Torvalds.
  5. */
  6. #include <linux/config.h>
  7. #ifdef CONFIG_SMP
  8. #define LOCK_PREFIX "lock ; "
  9. #else
  10. #define LOCK_PREFIX ""
  11. #endif
  12. #define ADDR (*(volatile long *) addr)
  13. /**
  14. * set_bit - Atomically set a bit in memory
  15. * @nr: the bit to set
  16. * @addr: the address to start counting from
  17. *
  18. * This function is atomic and may not be reordered. See __set_bit()
  19. * if you do not require the atomic guarantees.
  20. * Note that @nr may be almost arbitrarily large; this function is not
  21. * restricted to acting on a single-word quantity.
  22. */
  23. static __inline__ void set_bit(int nr, volatile void * addr)
  24. {
  25. __asm__ __volatile__( LOCK_PREFIX
  26. "btsl %1,%0"
  27. :"=m" (ADDR)
  28. :"dIr" (nr) : "memory");
  29. }
  30. /**
  31. * __set_bit - Set a bit in memory
  32. * @nr: the bit to set
  33. * @addr: the address to start counting from
  34. *
  35. * Unlike set_bit(), this function is non-atomic and may be reordered.
  36. * If it's called on the same region of memory simultaneously, the effect
  37. * may be that only one operation succeeds.
  38. */
  39. static __inline__ void __set_bit(int nr, volatile void * addr)
  40. {
  41. __asm__ volatile(
  42. "btsl %1,%0"
  43. :"=m" (ADDR)
  44. :"dIr" (nr) : "memory");
  45. }
  46. /**
  47. * clear_bit - Clears a bit in memory
  48. * @nr: Bit to clear
  49. * @addr: Address to start counting from
  50. *
  51. * clear_bit() is atomic and may not be reordered. However, it does
  52. * not contain a memory barrier, so if it is used for locking purposes,
  53. * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
  54. * in order to ensure changes are visible on other processors.
  55. */
  56. static __inline__ void clear_bit(int nr, volatile void * addr)
  57. {
  58. __asm__ __volatile__( LOCK_PREFIX
  59. "btrl %1,%0"
  60. :"=m" (ADDR)
  61. :"dIr" (nr));
  62. }
  63. static __inline__ void __clear_bit(int nr, volatile void * addr)
  64. {
  65. __asm__ __volatile__(
  66. "btrl %1,%0"
  67. :"=m" (ADDR)
  68. :"dIr" (nr));
  69. }
  70. #define smp_mb__before_clear_bit() barrier()
  71. #define smp_mb__after_clear_bit() barrier()
  72. /**
  73. * __change_bit - Toggle a bit in memory
  74. * @nr: the bit to change
  75. * @addr: the address to start counting from
  76. *
  77. * Unlike change_bit(), this function is non-atomic and may be reordered.
  78. * If it's called on the same region of memory simultaneously, the effect
  79. * may be that only one operation succeeds.
  80. */
  81. static __inline__ void __change_bit(int nr, volatile void * addr)
  82. {
  83. __asm__ __volatile__(
  84. "btcl %1,%0"
  85. :"=m" (ADDR)
  86. :"dIr" (nr));
  87. }
  88. /**
  89. * change_bit - Toggle a bit in memory
  90. * @nr: Bit to change
  91. * @addr: Address to start counting from
  92. *
  93. * change_bit() is atomic and may not be reordered.
  94. * Note that @nr may be almost arbitrarily large; this function is not
  95. * restricted to acting on a single-word quantity.
  96. */
  97. static __inline__ void change_bit(int nr, volatile void * addr)
  98. {
  99. __asm__ __volatile__( LOCK_PREFIX
  100. "btcl %1,%0"
  101. :"=m" (ADDR)
  102. :"dIr" (nr));
  103. }
  104. /**
  105. * test_and_set_bit - Set a bit and return its old value
  106. * @nr: Bit to set
  107. * @addr: Address to count from
  108. *
  109. * This operation is atomic and cannot be reordered.
  110. * It also implies a memory barrier.
  111. */
  112. static __inline__ int test_and_set_bit(int nr, volatile void * addr)
  113. {
  114. int oldbit;
  115. __asm__ __volatile__( LOCK_PREFIX
  116. "btsl %2,%1\n\tsbbl %0,%0"
  117. :"=r" (oldbit),"=m" (ADDR)
  118. :"dIr" (nr) : "memory");
  119. return oldbit;
  120. }
  121. /**
  122. * __test_and_set_bit - Set a bit and return its old value
  123. * @nr: Bit to set
  124. * @addr: Address to count from
  125. *
  126. * This operation is non-atomic and can be reordered.
  127. * If two examples of this operation race, one can appear to succeed
  128. * but actually fail. You must protect multiple accesses with a lock.
  129. */
  130. static __inline__ int __test_and_set_bit(int nr, volatile void * addr)
  131. {
  132. int oldbit;
  133. __asm__(
  134. "btsl %2,%1\n\tsbbl %0,%0"
  135. :"=r" (oldbit),"=m" (ADDR)
  136. :"dIr" (nr));
  137. return oldbit;
  138. }
  139. /**
  140. * test_and_clear_bit - Clear a bit and return its old value
  141. * @nr: Bit to clear
  142. * @addr: Address to count from
  143. *
  144. * This operation is atomic and cannot be reordered.
  145. * It also implies a memory barrier.
  146. */
  147. static __inline__ int test_and_clear_bit(int nr, volatile void * addr)
  148. {
  149. int oldbit;
  150. __asm__ __volatile__( LOCK_PREFIX
  151. "btrl %2,%1\n\tsbbl %0,%0"
  152. :"=r" (oldbit),"=m" (ADDR)
  153. :"dIr" (nr) : "memory");
  154. return oldbit;
  155. }
  156. /**
  157. * __test_and_clear_bit - Clear a bit and return its old value
  158. * @nr: Bit to clear
  159. * @addr: Address to count from
  160. *
  161. * This operation is non-atomic and can be reordered.
  162. * If two examples of this operation race, one can appear to succeed
  163. * but actually fail. You must protect multiple accesses with a lock.
  164. */
  165. static __inline__ int __test_and_clear_bit(int nr, volatile void * addr)
  166. {
  167. int oldbit;
  168. __asm__(
  169. "btrl %2,%1\n\tsbbl %0,%0"
  170. :"=r" (oldbit),"=m" (ADDR)
  171. :"dIr" (nr));
  172. return oldbit;
  173. }
  174. /* WARNING: non atomic and it can be reordered! */
  175. static __inline__ int __test_and_change_bit(int nr, volatile void * addr)
  176. {
  177. int oldbit;
  178. __asm__ __volatile__(
  179. "btcl %2,%1\n\tsbbl %0,%0"
  180. :"=r" (oldbit),"=m" (ADDR)
  181. :"dIr" (nr) : "memory");
  182. return oldbit;
  183. }
  184. /**
  185. * test_and_change_bit - Change a bit and return its old value
  186. * @nr: Bit to change
  187. * @addr: Address to count from
  188. *
  189. * This operation is atomic and cannot be reordered.
  190. * It also implies a memory barrier.
  191. */
  192. static __inline__ int test_and_change_bit(int nr, volatile void * addr)
  193. {
  194. int oldbit;
  195. __asm__ __volatile__( LOCK_PREFIX
  196. "btcl %2,%1\n\tsbbl %0,%0"
  197. :"=r" (oldbit),"=m" (ADDR)
  198. :"dIr" (nr) : "memory");
  199. return oldbit;
  200. }
  201. #if 0 /* Fool kernel-doc since it doesn't do macros yet */
  202. /**
  203. * test_bit - Determine whether a bit is set
  204. * @nr: bit number to test
  205. * @addr: Address to start counting from
  206. */
  207. static int test_bit(int nr, const volatile void * addr);
  208. #endif
  209. static __inline__ int constant_test_bit(int nr, const volatile void * addr)
  210. {
  211. return ((1UL << (nr & 31)) & (((const volatile unsigned int *) addr)[nr >> 5])) != 0;
  212. }
  213. static __inline__ int variable_test_bit(int nr, volatile const void * addr)
  214. {
  215. int oldbit;
  216. __asm__ __volatile__(
  217. "btl %2,%1\n\tsbbl %0,%0"
  218. :"=r" (oldbit)
  219. :"m" (ADDR),"dIr" (nr));
  220. return oldbit;
  221. }
  222. #define test_bit(nr,addr) \
  223. (__builtin_constant_p(nr) ? \
  224. constant_test_bit((nr),(addr)) : \
  225. variable_test_bit((nr),(addr)))
  226. #undef ADDR
  227. extern long find_first_zero_bit(const unsigned long * addr, unsigned long size);
  228. extern long find_next_zero_bit (const unsigned long * addr, long size, long offset);
  229. extern long find_first_bit(const unsigned long * addr, unsigned long size);
  230. extern long find_next_bit(const unsigned long * addr, long size, long offset);
  231. /* return index of first bet set in val or max when no bit is set */
  232. static inline unsigned long __scanbit(unsigned long val, unsigned long max)
  233. {
  234. asm("bsfq %1,%0 ; cmovz %2,%0" : "=&r" (val) : "r" (val), "r" (max));
  235. return val;
  236. }
  237. #define find_first_bit(addr,size) \
  238. ((__builtin_constant_p(size) && (size) <= BITS_PER_LONG ? \
  239. (__scanbit(*(unsigned long *)addr,(size))) : \
  240. find_first_bit(addr,size)))
  241. #define find_next_bit(addr,size,off) \
  242. ((__builtin_constant_p(size) && (size) <= BITS_PER_LONG ? \
  243. ((off) + (__scanbit((*(unsigned long *)addr) >> (off),(size)-(off)))) : \
  244. find_next_bit(addr,size,off)))
  245. #define find_first_zero_bit(addr,size) \
  246. ((__builtin_constant_p(size) && (size) <= BITS_PER_LONG ? \
  247. (__scanbit(~*(unsigned long *)addr,(size))) : \
  248. find_first_zero_bit(addr,size)))
  249. #define find_next_zero_bit(addr,size,off) \
  250. ((__builtin_constant_p(size) && (size) <= BITS_PER_LONG ? \
  251. ((off)+(__scanbit(~(((*(unsigned long *)addr)) >> (off)),(size)-(off)))) : \
  252. find_next_zero_bit(addr,size,off)))
  253. /*
  254. * Find string of zero bits in a bitmap. -1 when not found.
  255. */
  256. extern unsigned long
  257. find_next_zero_string(unsigned long *bitmap, long start, long nbits, int len);
  258. static inline void set_bit_string(unsigned long *bitmap, unsigned long i,
  259. int len)
  260. {
  261. unsigned long end = i + len;
  262. while (i < end) {
  263. __set_bit(i, bitmap);
  264. i++;
  265. }
  266. }
  267. static inline void __clear_bit_string(unsigned long *bitmap, unsigned long i,
  268. int len)
  269. {
  270. unsigned long end = i + len;
  271. while (i < end) {
  272. __clear_bit(i, bitmap);
  273. i++;
  274. }
  275. }
  276. /**
  277. * ffz - find first zero in word.
  278. * @word: The word to search
  279. *
  280. * Undefined if no zero exists, so code should check against ~0UL first.
  281. */
  282. static __inline__ unsigned long ffz(unsigned long word)
  283. {
  284. __asm__("bsfq %1,%0"
  285. :"=r" (word)
  286. :"r" (~word));
  287. return word;
  288. }
  289. /**
  290. * __ffs - find first bit in word.
  291. * @word: The word to search
  292. *
  293. * Undefined if no bit exists, so code should check against 0 first.
  294. */
  295. static __inline__ unsigned long __ffs(unsigned long word)
  296. {
  297. __asm__("bsfq %1,%0"
  298. :"=r" (word)
  299. :"rm" (word));
  300. return word;
  301. }
  302. #ifdef __KERNEL__
  303. static inline int sched_find_first_bit(const unsigned long *b)
  304. {
  305. if (b[0])
  306. return __ffs(b[0]);
  307. if (b[1])
  308. return __ffs(b[1]) + 64;
  309. return __ffs(b[2]) + 128;
  310. }
  311. /**
  312. * ffs - find first bit set
  313. * @x: the word to search
  314. *
  315. * This is defined the same way as
  316. * the libc and compiler builtin ffs routines, therefore
  317. * differs in spirit from the above ffz (man ffs).
  318. */
  319. static __inline__ int ffs(int x)
  320. {
  321. int r;
  322. __asm__("bsfl %1,%0\n\t"
  323. "cmovzl %2,%0"
  324. : "=r" (r) : "rm" (x), "r" (-1));
  325. return r+1;
  326. }
  327. /**
  328. * hweightN - returns the hamming weight of a N-bit word
  329. * @x: the word to weigh
  330. *
  331. * The Hamming Weight of a number is the total number of bits set in it.
  332. */
  333. #define hweight64(x) generic_hweight64(x)
  334. #define hweight32(x) generic_hweight32(x)
  335. #define hweight16(x) generic_hweight16(x)
  336. #define hweight8(x) generic_hweight8(x)
  337. #endif /* __KERNEL__ */
  338. #ifdef __KERNEL__
  339. #define ext2_set_bit(nr,addr) \
  340. __test_and_set_bit((nr),(unsigned long*)addr)
  341. #define ext2_set_bit_atomic(lock,nr,addr) \
  342. test_and_set_bit((nr),(unsigned long*)addr)
  343. #define ext2_clear_bit(nr, addr) \
  344. __test_and_clear_bit((nr),(unsigned long*)addr)
  345. #define ext2_clear_bit_atomic(lock,nr,addr) \
  346. test_and_clear_bit((nr),(unsigned long*)addr)
  347. #define ext2_test_bit(nr, addr) test_bit((nr),(unsigned long*)addr)
  348. #define ext2_find_first_zero_bit(addr, size) \
  349. find_first_zero_bit((unsigned long*)addr, size)
  350. #define ext2_find_next_zero_bit(addr, size, off) \
  351. find_next_zero_bit((unsigned long*)addr, size, off)
  352. /* Bitmap functions for the minix filesystem. */
  353. #define minix_test_and_set_bit(nr,addr) __test_and_set_bit(nr,(void*)addr)
  354. #define minix_set_bit(nr,addr) __set_bit(nr,(void*)addr)
  355. #define minix_test_and_clear_bit(nr,addr) __test_and_clear_bit(nr,(void*)addr)
  356. #define minix_test_bit(nr,addr) test_bit(nr,(void*)addr)
  357. #define minix_find_first_zero_bit(addr,size) \
  358. find_first_zero_bit((void*)addr,size)
  359. /* find last set bit */
  360. #define fls(x) generic_fls(x)
  361. #endif /* __KERNEL__ */
  362. #endif /* _X86_64_BITOPS_H */