bitops_64.h 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475
  1. #ifndef _X86_64_BITOPS_H
  2. #define _X86_64_BITOPS_H
  3. /*
  4. * Copyright 1992, Linus Torvalds.
  5. */
  6. #ifndef _LINUX_BITOPS_H
  7. #error only <linux/bitops.h> can be included directly
  8. #endif
  9. #include <asm/alternative.h>
  10. #if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 1)
  11. /* Technically wrong, but this avoids compilation errors on some gcc
  12. versions. */
  13. #define ADDR "=m" (*(volatile long *) addr)
  14. #else
  15. #define ADDR "+m" (*(volatile long *) addr)
  16. #endif
  17. /**
  18. * set_bit - Atomically set a bit in memory
  19. * @nr: the bit to set
  20. * @addr: the address to start counting from
  21. *
  22. * This function is atomic and may not be reordered. See __set_bit()
  23. * if you do not require the atomic guarantees.
  24. * Note that @nr may be almost arbitrarily large; this function is not
  25. * restricted to acting on a single-word quantity.
  26. */
  27. static inline void set_bit(int nr, volatile void *addr)
  28. {
  29. __asm__ __volatile__( LOCK_PREFIX
  30. "btsl %1,%0"
  31. :ADDR
  32. :"dIr" (nr) : "memory");
  33. }
  34. /**
  35. * __set_bit - Set a bit in memory
  36. * @nr: the bit to set
  37. * @addr: the address to start counting from
  38. *
  39. * Unlike set_bit(), this function is non-atomic and may be reordered.
  40. * If it's called on the same region of memory simultaneously, the effect
  41. * may be that only one operation succeeds.
  42. */
  43. static inline void __set_bit(int nr, volatile void *addr)
  44. {
  45. __asm__ volatile(
  46. "btsl %1,%0"
  47. :ADDR
  48. :"dIr" (nr) : "memory");
  49. }
  50. /**
  51. * clear_bit - Clears a bit in memory
  52. * @nr: Bit to clear
  53. * @addr: Address to start counting from
  54. *
  55. * clear_bit() is atomic and may not be reordered. However, it does
  56. * not contain a memory barrier, so if it is used for locking purposes,
  57. * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
  58. * in order to ensure changes are visible on other processors.
  59. */
  60. static inline void clear_bit(int nr, volatile void *addr)
  61. {
  62. __asm__ __volatile__( LOCK_PREFIX
  63. "btrl %1,%0"
  64. :ADDR
  65. :"dIr" (nr));
  66. }
  67. /*
  68. * clear_bit_unlock - Clears a bit in memory
  69. * @nr: Bit to clear
  70. * @addr: Address to start counting from
  71. *
  72. * clear_bit() is atomic and implies release semantics before the memory
  73. * operation. It can be used for an unlock.
  74. */
  75. static inline void clear_bit_unlock(unsigned long nr, volatile unsigned long *addr)
  76. {
  77. barrier();
  78. clear_bit(nr, addr);
  79. }
  80. static inline void __clear_bit(int nr, volatile void *addr)
  81. {
  82. __asm__ __volatile__(
  83. "btrl %1,%0"
  84. :ADDR
  85. :"dIr" (nr));
  86. }
  87. /*
  88. * __clear_bit_unlock - Clears a bit in memory
  89. * @nr: Bit to clear
  90. * @addr: Address to start counting from
  91. *
  92. * __clear_bit() is non-atomic and implies release semantics before the memory
  93. * operation. It can be used for an unlock if no other CPUs can concurrently
  94. * modify other bits in the word.
  95. *
  96. * No memory barrier is required here, because x86 cannot reorder stores past
  97. * older loads. Same principle as spin_unlock.
  98. */
  99. static inline void __clear_bit_unlock(unsigned long nr, volatile unsigned long *addr)
  100. {
  101. barrier();
  102. __clear_bit(nr, addr);
  103. }
  104. #define smp_mb__before_clear_bit() barrier()
  105. #define smp_mb__after_clear_bit() barrier()
  106. /**
  107. * __change_bit - Toggle a bit in memory
  108. * @nr: the bit to change
  109. * @addr: the address to start counting from
  110. *
  111. * Unlike change_bit(), this function is non-atomic and may be reordered.
  112. * If it's called on the same region of memory simultaneously, the effect
  113. * may be that only one operation succeeds.
  114. */
  115. static inline void __change_bit(int nr, volatile void *addr)
  116. {
  117. __asm__ __volatile__(
  118. "btcl %1,%0"
  119. :ADDR
  120. :"dIr" (nr));
  121. }
  122. /**
  123. * change_bit - Toggle a bit in memory
  124. * @nr: Bit to change
  125. * @addr: Address to start counting from
  126. *
  127. * change_bit() is atomic and may not be reordered.
  128. * Note that @nr may be almost arbitrarily large; this function is not
  129. * restricted to acting on a single-word quantity.
  130. */
  131. static inline void change_bit(int nr, volatile void *addr)
  132. {
  133. __asm__ __volatile__( LOCK_PREFIX
  134. "btcl %1,%0"
  135. :ADDR
  136. :"dIr" (nr));
  137. }
  138. /**
  139. * test_and_set_bit - Set a bit and return its old value
  140. * @nr: Bit to set
  141. * @addr: Address to count from
  142. *
  143. * This operation is atomic and cannot be reordered.
  144. * It also implies a memory barrier.
  145. */
  146. static inline int test_and_set_bit(int nr, volatile void *addr)
  147. {
  148. int oldbit;
  149. __asm__ __volatile__( LOCK_PREFIX
  150. "btsl %2,%1\n\tsbbl %0,%0"
  151. :"=r" (oldbit),ADDR
  152. :"dIr" (nr) : "memory");
  153. return oldbit;
  154. }
  155. /**
  156. * test_and_set_bit_lock - Set a bit and return its old value for lock
  157. * @nr: Bit to set
  158. * @addr: Address to count from
  159. *
  160. * This is the same as test_and_set_bit on x86.
  161. */
  162. static inline int test_and_set_bit_lock(int nr, volatile void *addr)
  163. {
  164. return test_and_set_bit(nr, addr);
  165. }
  166. /**
  167. * __test_and_set_bit - Set a bit and return its old value
  168. * @nr: Bit to set
  169. * @addr: Address to count from
  170. *
  171. * This operation is non-atomic and can be reordered.
  172. * If two examples of this operation race, one can appear to succeed
  173. * but actually fail. You must protect multiple accesses with a lock.
  174. */
  175. static inline int __test_and_set_bit(int nr, volatile void *addr)
  176. {
  177. int oldbit;
  178. __asm__(
  179. "btsl %2,%1\n\tsbbl %0,%0"
  180. :"=r" (oldbit),ADDR
  181. :"dIr" (nr));
  182. return oldbit;
  183. }
  184. /**
  185. * test_and_clear_bit - Clear a bit and return its old value
  186. * @nr: Bit to clear
  187. * @addr: Address to count from
  188. *
  189. * This operation is atomic and cannot be reordered.
  190. * It also implies a memory barrier.
  191. */
  192. static inline int test_and_clear_bit(int nr, volatile void *addr)
  193. {
  194. int oldbit;
  195. __asm__ __volatile__( LOCK_PREFIX
  196. "btrl %2,%1\n\tsbbl %0,%0"
  197. :"=r" (oldbit),ADDR
  198. :"dIr" (nr) : "memory");
  199. return oldbit;
  200. }
  201. /**
  202. * __test_and_clear_bit - Clear a bit and return its old value
  203. * @nr: Bit to clear
  204. * @addr: Address to count from
  205. *
  206. * This operation is non-atomic and can be reordered.
  207. * If two examples of this operation race, one can appear to succeed
  208. * but actually fail. You must protect multiple accesses with a lock.
  209. */
  210. static inline int __test_and_clear_bit(int nr, volatile void *addr)
  211. {
  212. int oldbit;
  213. __asm__(
  214. "btrl %2,%1\n\tsbbl %0,%0"
  215. :"=r" (oldbit),ADDR
  216. :"dIr" (nr));
  217. return oldbit;
  218. }
  219. /* WARNING: non atomic and it can be reordered! */
  220. static inline int __test_and_change_bit(int nr, volatile void *addr)
  221. {
  222. int oldbit;
  223. __asm__ __volatile__(
  224. "btcl %2,%1\n\tsbbl %0,%0"
  225. :"=r" (oldbit),ADDR
  226. :"dIr" (nr) : "memory");
  227. return oldbit;
  228. }
  229. /**
  230. * test_and_change_bit - Change a bit and return its old value
  231. * @nr: Bit to change
  232. * @addr: Address to count from
  233. *
  234. * This operation is atomic and cannot be reordered.
  235. * It also implies a memory barrier.
  236. */
  237. static inline int test_and_change_bit(int nr, volatile void *addr)
  238. {
  239. int oldbit;
  240. __asm__ __volatile__( LOCK_PREFIX
  241. "btcl %2,%1\n\tsbbl %0,%0"
  242. :"=r" (oldbit),ADDR
  243. :"dIr" (nr) : "memory");
  244. return oldbit;
  245. }
  246. #if 0 /* Fool kernel-doc since it doesn't do macros yet */
  247. /**
  248. * test_bit - Determine whether a bit is set
  249. * @nr: bit number to test
  250. * @addr: Address to start counting from
  251. */
  252. static int test_bit(int nr, const volatile void *addr);
  253. #endif
  254. static inline int constant_test_bit(int nr, const volatile void *addr)
  255. {
  256. return ((1UL << (nr & 31)) & (((const volatile unsigned int *) addr)[nr >> 5])) != 0;
  257. }
  258. static inline int variable_test_bit(int nr, volatile const void *addr)
  259. {
  260. int oldbit;
  261. __asm__ __volatile__(
  262. "btl %2,%1\n\tsbbl %0,%0"
  263. :"=r" (oldbit)
  264. :"m" (*(volatile long *)addr),"dIr" (nr));
  265. return oldbit;
  266. }
  267. #define test_bit(nr,addr) \
  268. (__builtin_constant_p(nr) ? \
  269. constant_test_bit((nr),(addr)) : \
  270. variable_test_bit((nr),(addr)))
  271. #undef ADDR
  272. extern long find_first_zero_bit(const unsigned long *addr, unsigned long size);
  273. extern long find_next_zero_bit(const unsigned long *addr, long size, long offset);
  274. extern long find_first_bit(const unsigned long *addr, unsigned long size);
  275. extern long find_next_bit(const unsigned long *addr, long size, long offset);
  276. /* return index of first bet set in val or max when no bit is set */
  277. static inline long __scanbit(unsigned long val, unsigned long max)
  278. {
  279. asm("bsfq %1,%0 ; cmovz %2,%0" : "=&r" (val) : "r" (val), "r" (max));
  280. return val;
  281. }
  282. #define find_first_bit(addr,size) \
  283. ((__builtin_constant_p(size) && (size) <= BITS_PER_LONG ? \
  284. (__scanbit(*(unsigned long *)addr,(size))) : \
  285. find_first_bit(addr,size)))
  286. #define find_next_bit(addr,size,off) \
  287. ((__builtin_constant_p(size) && (size) <= BITS_PER_LONG ? \
  288. ((off) + (__scanbit((*(unsigned long *)addr) >> (off),(size)-(off)))) : \
  289. find_next_bit(addr,size,off)))
  290. #define find_first_zero_bit(addr,size) \
  291. ((__builtin_constant_p(size) && (size) <= BITS_PER_LONG ? \
  292. (__scanbit(~*(unsigned long *)addr,(size))) : \
  293. find_first_zero_bit(addr,size)))
  294. #define find_next_zero_bit(addr,size,off) \
  295. ((__builtin_constant_p(size) && (size) <= BITS_PER_LONG ? \
  296. ((off)+(__scanbit(~(((*(unsigned long *)addr)) >> (off)),(size)-(off)))) : \
  297. find_next_zero_bit(addr,size,off)))
  298. /*
  299. * Find string of zero bits in a bitmap. -1 when not found.
  300. */
  301. extern unsigned long
  302. find_next_zero_string(unsigned long *bitmap, long start, long nbits, int len);
  303. static inline void set_bit_string(unsigned long *bitmap, unsigned long i,
  304. int len)
  305. {
  306. unsigned long end = i + len;
  307. while (i < end) {
  308. __set_bit(i, bitmap);
  309. i++;
  310. }
  311. }
  312. static inline void __clear_bit_string(unsigned long *bitmap, unsigned long i,
  313. int len)
  314. {
  315. unsigned long end = i + len;
  316. while (i < end) {
  317. __clear_bit(i, bitmap);
  318. i++;
  319. }
  320. }
  321. /**
  322. * ffz - find first zero in word.
  323. * @word: The word to search
  324. *
  325. * Undefined if no zero exists, so code should check against ~0UL first.
  326. */
  327. static inline unsigned long ffz(unsigned long word)
  328. {
  329. __asm__("bsfq %1,%0"
  330. :"=r" (word)
  331. :"r" (~word));
  332. return word;
  333. }
  334. /**
  335. * __ffs - find first bit in word.
  336. * @word: The word to search
  337. *
  338. * Undefined if no bit exists, so code should check against 0 first.
  339. */
  340. static inline unsigned long __ffs(unsigned long word)
  341. {
  342. __asm__("bsfq %1,%0"
  343. :"=r" (word)
  344. :"rm" (word));
  345. return word;
  346. }
  347. /*
  348. * __fls: find last bit set.
  349. * @word: The word to search
  350. *
  351. * Undefined if no zero exists, so code should check against ~0UL first.
  352. */
  353. static inline unsigned long __fls(unsigned long word)
  354. {
  355. __asm__("bsrq %1,%0"
  356. :"=r" (word)
  357. :"rm" (word));
  358. return word;
  359. }
  360. #ifdef __KERNEL__
  361. #include <asm-generic/bitops/sched.h>
  362. /**
  363. * ffs - find first bit set
  364. * @x: the word to search
  365. *
  366. * This is defined the same way as
  367. * the libc and compiler builtin ffs routines, therefore
  368. * differs in spirit from the above ffz (man ffs).
  369. */
  370. static inline int ffs(int x)
  371. {
  372. int r;
  373. __asm__("bsfl %1,%0\n\t"
  374. "cmovzl %2,%0"
  375. : "=r" (r) : "rm" (x), "r" (-1));
  376. return r+1;
  377. }
  378. /**
  379. * fls64 - find last bit set in 64 bit word
  380. * @x: the word to search
  381. *
  382. * This is defined the same way as fls.
  383. */
  384. static inline int fls64(__u64 x)
  385. {
  386. if (x == 0)
  387. return 0;
  388. return __fls(x) + 1;
  389. }
  390. /**
  391. * fls - find last bit set
  392. * @x: the word to search
  393. *
  394. * This is defined the same way as ffs.
  395. */
  396. static inline int fls(int x)
  397. {
  398. int r;
  399. __asm__("bsrl %1,%0\n\t"
  400. "cmovzl %2,%0"
  401. : "=&r" (r) : "rm" (x), "rm" (-1));
  402. return r+1;
  403. }
  404. #define ARCH_HAS_FAST_MULTIPLIER 1
  405. #include <asm-generic/bitops/hweight.h>
  406. #endif /* __KERNEL__ */
  407. #ifdef __KERNEL__
  408. #include <asm-generic/bitops/ext2-non-atomic.h>
  409. #define ext2_set_bit_atomic(lock,nr,addr) \
  410. test_and_set_bit((nr),(unsigned long*)addr)
  411. #define ext2_clear_bit_atomic(lock,nr,addr) \
  412. test_and_clear_bit((nr),(unsigned long*)addr)
  413. #include <asm-generic/bitops/minix.h>
  414. #endif /* __KERNEL__ */
  415. #endif /* _X86_64_BITOPS_H */