bitops_64.h 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472
  1. #ifndef _X86_64_BITOPS_H
  2. #define _X86_64_BITOPS_H
  3. /*
  4. * Copyright 1992, Linus Torvalds.
  5. */
  6. #ifndef _LINUX_BITOPS_H
  7. #error only <linux/bitops.h> can be included directly
  8. #endif
  9. #include <asm/alternative.h>
  10. #if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 1)
  11. /* Technically wrong, but this avoids compilation errors on some gcc
  12. versions. */
  13. #define ADDR "=m" (*(volatile long *) addr)
  14. #else
  15. #define ADDR "+m" (*(volatile long *) addr)
  16. #endif
  17. /**
  18. * set_bit - Atomically set a bit in memory
  19. * @nr: the bit to set
  20. * @addr: the address to start counting from
  21. *
  22. * This function is atomic and may not be reordered. See __set_bit()
  23. * if you do not require the atomic guarantees.
  24. * Note that @nr may be almost arbitrarily large; this function is not
  25. * restricted to acting on a single-word quantity.
  26. */
  27. static __inline__ void set_bit(int nr, volatile void * addr)
  28. {
  29. __asm__ __volatile__( LOCK_PREFIX
  30. "btsl %1,%0"
  31. :ADDR
  32. :"dIr" (nr) : "memory");
  33. }
  34. /**
  35. * __set_bit - Set a bit in memory
  36. * @nr: the bit to set
  37. * @addr: the address to start counting from
  38. *
  39. * Unlike set_bit(), this function is non-atomic and may be reordered.
  40. * If it's called on the same region of memory simultaneously, the effect
  41. * may be that only one operation succeeds.
  42. */
  43. static __inline__ void __set_bit(int nr, volatile void * addr)
  44. {
  45. __asm__ volatile(
  46. "btsl %1,%0"
  47. :ADDR
  48. :"dIr" (nr) : "memory");
  49. }
  50. /**
  51. * clear_bit - Clears a bit in memory
  52. * @nr: Bit to clear
  53. * @addr: Address to start counting from
  54. *
  55. * clear_bit() is atomic and may not be reordered. However, it does
  56. * not contain a memory barrier, so if it is used for locking purposes,
  57. * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
  58. * in order to ensure changes are visible on other processors.
  59. */
  60. static __inline__ void clear_bit(int nr, volatile void * addr)
  61. {
  62. __asm__ __volatile__( LOCK_PREFIX
  63. "btrl %1,%0"
  64. :ADDR
  65. :"dIr" (nr));
  66. }
  67. /*
  68. * clear_bit_unlock - Clears a bit in memory
  69. * @nr: Bit to clear
  70. * @addr: Address to start counting from
  71. *
  72. * clear_bit() is atomic and implies release semantics before the memory
  73. * operation. It can be used for an unlock.
  74. */
  75. static inline void clear_bit_unlock(unsigned long nr, volatile unsigned long *addr)
  76. {
  77. barrier();
  78. clear_bit(nr, addr);
  79. }
  80. static __inline__ void __clear_bit(int nr, volatile void * addr)
  81. {
  82. __asm__ __volatile__(
  83. "btrl %1,%0"
  84. :ADDR
  85. :"dIr" (nr));
  86. }
  87. /*
  88. * __clear_bit_unlock - Clears a bit in memory
  89. * @nr: Bit to clear
  90. * @addr: Address to start counting from
  91. *
  92. * __clear_bit() is non-atomic and implies release semantics before the memory
  93. * operation. It can be used for an unlock if no other CPUs can concurrently
  94. * modify other bits in the word.
  95. *
  96. * No memory barrier is required here, because x86 cannot reorder stores past
  97. * older loads. Same principle as spin_unlock.
  98. */
  99. static inline void __clear_bit_unlock(unsigned long nr, volatile unsigned long *addr)
  100. {
  101. barrier();
  102. __clear_bit(nr, addr);
  103. }
  104. #define smp_mb__before_clear_bit() barrier()
  105. #define smp_mb__after_clear_bit() barrier()
  106. /**
  107. * __change_bit - Toggle a bit in memory
  108. * @nr: the bit to change
  109. * @addr: the address to start counting from
  110. *
  111. * Unlike change_bit(), this function is non-atomic and may be reordered.
  112. * If it's called on the same region of memory simultaneously, the effect
  113. * may be that only one operation succeeds.
  114. */
  115. static __inline__ void __change_bit(int nr, volatile void * addr)
  116. {
  117. __asm__ __volatile__(
  118. "btcl %1,%0"
  119. :ADDR
  120. :"dIr" (nr));
  121. }
  122. /**
  123. * change_bit - Toggle a bit in memory
  124. * @nr: Bit to change
  125. * @addr: Address to start counting from
  126. *
  127. * change_bit() is atomic and may not be reordered.
  128. * Note that @nr may be almost arbitrarily large; this function is not
  129. * restricted to acting on a single-word quantity.
  130. */
  131. static __inline__ void change_bit(int nr, volatile void * addr)
  132. {
  133. __asm__ __volatile__( LOCK_PREFIX
  134. "btcl %1,%0"
  135. :ADDR
  136. :"dIr" (nr));
  137. }
  138. /**
  139. * test_and_set_bit - Set a bit and return its old value
  140. * @nr: Bit to set
  141. * @addr: Address to count from
  142. *
  143. * This operation is atomic and cannot be reordered.
  144. * It also implies a memory barrier.
  145. */
  146. static __inline__ int test_and_set_bit(int nr, volatile void * addr)
  147. {
  148. int oldbit;
  149. __asm__ __volatile__( LOCK_PREFIX
  150. "btsl %2,%1\n\tsbbl %0,%0"
  151. :"=r" (oldbit),ADDR
  152. :"dIr" (nr) : "memory");
  153. return oldbit;
  154. }
  155. /**
  156. * test_and_set_bit_lock - Set a bit and return its old value for lock
  157. * @nr: Bit to set
  158. * @addr: Address to count from
  159. *
  160. * This is the same as test_and_set_bit on x86
  161. */
  162. #define test_and_set_bit_lock test_and_set_bit
  163. /**
  164. * __test_and_set_bit - Set a bit and return its old value
  165. * @nr: Bit to set
  166. * @addr: Address to count from
  167. *
  168. * This operation is non-atomic and can be reordered.
  169. * If two examples of this operation race, one can appear to succeed
  170. * but actually fail. You must protect multiple accesses with a lock.
  171. */
  172. static __inline__ int __test_and_set_bit(int nr, volatile void * addr)
  173. {
  174. int oldbit;
  175. __asm__(
  176. "btsl %2,%1\n\tsbbl %0,%0"
  177. :"=r" (oldbit),ADDR
  178. :"dIr" (nr));
  179. return oldbit;
  180. }
  181. /**
  182. * test_and_clear_bit - Clear a bit and return its old value
  183. * @nr: Bit to clear
  184. * @addr: Address to count from
  185. *
  186. * This operation is atomic and cannot be reordered.
  187. * It also implies a memory barrier.
  188. */
  189. static __inline__ int test_and_clear_bit(int nr, volatile void * addr)
  190. {
  191. int oldbit;
  192. __asm__ __volatile__( LOCK_PREFIX
  193. "btrl %2,%1\n\tsbbl %0,%0"
  194. :"=r" (oldbit),ADDR
  195. :"dIr" (nr) : "memory");
  196. return oldbit;
  197. }
  198. /**
  199. * __test_and_clear_bit - Clear a bit and return its old value
  200. * @nr: Bit to clear
  201. * @addr: Address to count from
  202. *
  203. * This operation is non-atomic and can be reordered.
  204. * If two examples of this operation race, one can appear to succeed
  205. * but actually fail. You must protect multiple accesses with a lock.
  206. */
  207. static __inline__ int __test_and_clear_bit(int nr, volatile void * addr)
  208. {
  209. int oldbit;
  210. __asm__(
  211. "btrl %2,%1\n\tsbbl %0,%0"
  212. :"=r" (oldbit),ADDR
  213. :"dIr" (nr));
  214. return oldbit;
  215. }
  216. /* WARNING: non atomic and it can be reordered! */
  217. static __inline__ int __test_and_change_bit(int nr, volatile void * addr)
  218. {
  219. int oldbit;
  220. __asm__ __volatile__(
  221. "btcl %2,%1\n\tsbbl %0,%0"
  222. :"=r" (oldbit),ADDR
  223. :"dIr" (nr) : "memory");
  224. return oldbit;
  225. }
  226. /**
  227. * test_and_change_bit - Change a bit and return its old value
  228. * @nr: Bit to change
  229. * @addr: Address to count from
  230. *
  231. * This operation is atomic and cannot be reordered.
  232. * It also implies a memory barrier.
  233. */
  234. static __inline__ int test_and_change_bit(int nr, volatile void * addr)
  235. {
  236. int oldbit;
  237. __asm__ __volatile__( LOCK_PREFIX
  238. "btcl %2,%1\n\tsbbl %0,%0"
  239. :"=r" (oldbit),ADDR
  240. :"dIr" (nr) : "memory");
  241. return oldbit;
  242. }
  243. #if 0 /* Fool kernel-doc since it doesn't do macros yet */
  244. /**
  245. * test_bit - Determine whether a bit is set
  246. * @nr: bit number to test
  247. * @addr: Address to start counting from
  248. */
  249. static int test_bit(int nr, const volatile void * addr);
  250. #endif
  251. static __inline__ int constant_test_bit(int nr, const volatile void * addr)
  252. {
  253. return ((1UL << (nr & 31)) & (((const volatile unsigned int *) addr)[nr >> 5])) != 0;
  254. }
  255. static __inline__ int variable_test_bit(int nr, volatile const void * addr)
  256. {
  257. int oldbit;
  258. __asm__ __volatile__(
  259. "btl %2,%1\n\tsbbl %0,%0"
  260. :"=r" (oldbit)
  261. :"m" (*(volatile long *)addr),"dIr" (nr));
  262. return oldbit;
  263. }
  264. #define test_bit(nr,addr) \
  265. (__builtin_constant_p(nr) ? \
  266. constant_test_bit((nr),(addr)) : \
  267. variable_test_bit((nr),(addr)))
  268. #undef ADDR
  269. extern long find_first_zero_bit(const unsigned long * addr, unsigned long size);
  270. extern long find_next_zero_bit (const unsigned long * addr, long size, long offset);
  271. extern long find_first_bit(const unsigned long * addr, unsigned long size);
  272. extern long find_next_bit(const unsigned long * addr, long size, long offset);
  273. /* return index of first bet set in val or max when no bit is set */
  274. static inline long __scanbit(unsigned long val, unsigned long max)
  275. {
  276. asm("bsfq %1,%0 ; cmovz %2,%0" : "=&r" (val) : "r" (val), "r" (max));
  277. return val;
  278. }
  279. #define find_first_bit(addr,size) \
  280. ((__builtin_constant_p(size) && (size) <= BITS_PER_LONG ? \
  281. (__scanbit(*(unsigned long *)addr,(size))) : \
  282. find_first_bit(addr,size)))
  283. #define find_next_bit(addr,size,off) \
  284. ((__builtin_constant_p(size) && (size) <= BITS_PER_LONG ? \
  285. ((off) + (__scanbit((*(unsigned long *)addr) >> (off),(size)-(off)))) : \
  286. find_next_bit(addr,size,off)))
  287. #define find_first_zero_bit(addr,size) \
  288. ((__builtin_constant_p(size) && (size) <= BITS_PER_LONG ? \
  289. (__scanbit(~*(unsigned long *)addr,(size))) : \
  290. find_first_zero_bit(addr,size)))
  291. #define find_next_zero_bit(addr,size,off) \
  292. ((__builtin_constant_p(size) && (size) <= BITS_PER_LONG ? \
  293. ((off)+(__scanbit(~(((*(unsigned long *)addr)) >> (off)),(size)-(off)))) : \
  294. find_next_zero_bit(addr,size,off)))
  295. /*
  296. * Find string of zero bits in a bitmap. -1 when not found.
  297. */
  298. extern unsigned long
  299. find_next_zero_string(unsigned long *bitmap, long start, long nbits, int len);
  300. static inline void set_bit_string(unsigned long *bitmap, unsigned long i,
  301. int len)
  302. {
  303. unsigned long end = i + len;
  304. while (i < end) {
  305. __set_bit(i, bitmap);
  306. i++;
  307. }
  308. }
  309. static inline void __clear_bit_string(unsigned long *bitmap, unsigned long i,
  310. int len)
  311. {
  312. unsigned long end = i + len;
  313. while (i < end) {
  314. __clear_bit(i, bitmap);
  315. i++;
  316. }
  317. }
  318. /**
  319. * ffz - find first zero in word.
  320. * @word: The word to search
  321. *
  322. * Undefined if no zero exists, so code should check against ~0UL first.
  323. */
  324. static __inline__ unsigned long ffz(unsigned long word)
  325. {
  326. __asm__("bsfq %1,%0"
  327. :"=r" (word)
  328. :"r" (~word));
  329. return word;
  330. }
  331. /**
  332. * __ffs - find first bit in word.
  333. * @word: The word to search
  334. *
  335. * Undefined if no bit exists, so code should check against 0 first.
  336. */
  337. static __inline__ unsigned long __ffs(unsigned long word)
  338. {
  339. __asm__("bsfq %1,%0"
  340. :"=r" (word)
  341. :"rm" (word));
  342. return word;
  343. }
  344. /*
  345. * __fls: find last bit set.
  346. * @word: The word to search
  347. *
  348. * Undefined if no zero exists, so code should check against ~0UL first.
  349. */
  350. static __inline__ unsigned long __fls(unsigned long word)
  351. {
  352. __asm__("bsrq %1,%0"
  353. :"=r" (word)
  354. :"rm" (word));
  355. return word;
  356. }
  357. #ifdef __KERNEL__
  358. #include <asm-generic/bitops/sched.h>
  359. /**
  360. * ffs - find first bit set
  361. * @x: the word to search
  362. *
  363. * This is defined the same way as
  364. * the libc and compiler builtin ffs routines, therefore
  365. * differs in spirit from the above ffz (man ffs).
  366. */
  367. static __inline__ int ffs(int x)
  368. {
  369. int r;
  370. __asm__("bsfl %1,%0\n\t"
  371. "cmovzl %2,%0"
  372. : "=r" (r) : "rm" (x), "r" (-1));
  373. return r+1;
  374. }
  375. /**
  376. * fls64 - find last bit set in 64 bit word
  377. * @x: the word to search
  378. *
  379. * This is defined the same way as fls.
  380. */
  381. static __inline__ int fls64(__u64 x)
  382. {
  383. if (x == 0)
  384. return 0;
  385. return __fls(x) + 1;
  386. }
  387. /**
  388. * fls - find last bit set
  389. * @x: the word to search
  390. *
  391. * This is defined the same way as ffs.
  392. */
  393. static __inline__ int fls(int x)
  394. {
  395. int r;
  396. __asm__("bsrl %1,%0\n\t"
  397. "cmovzl %2,%0"
  398. : "=&r" (r) : "rm" (x), "rm" (-1));
  399. return r+1;
  400. }
  401. #define ARCH_HAS_FAST_MULTIPLIER 1
  402. #include <asm-generic/bitops/hweight.h>
  403. #endif /* __KERNEL__ */
  404. #ifdef __KERNEL__
  405. #include <asm-generic/bitops/ext2-non-atomic.h>
  406. #define ext2_set_bit_atomic(lock,nr,addr) \
  407. test_and_set_bit((nr),(unsigned long*)addr)
  408. #define ext2_clear_bit_atomic(lock,nr,addr) \
  409. test_and_clear_bit((nr),(unsigned long*)addr)
  410. #include <asm-generic/bitops/minix.h>
  411. #endif /* __KERNEL__ */
  412. #endif /* _X86_64_BITOPS_H */