bitops_64.h 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432
  1. #ifndef _X86_64_BITOPS_H
  2. #define _X86_64_BITOPS_H
  3. /*
  4. * Copyright 1992, Linus Torvalds.
  5. */
  6. #ifndef _LINUX_BITOPS_H
  7. #error only <linux/bitops.h> can be included directly
  8. #endif
  9. #include <asm/alternative.h>
  10. #if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 1)
  11. /* Technically wrong, but this avoids compilation errors on some gcc
  12. versions. */
  13. #define ADDR "=m" (*(volatile long *) addr)
  14. #else
  15. #define ADDR "+m" (*(volatile long *) addr)
  16. #endif
  17. /**
  18. * set_bit - Atomically set a bit in memory
  19. * @nr: the bit to set
  20. * @addr: the address to start counting from
  21. *
  22. * This function is atomic and may not be reordered. See __set_bit()
  23. * if you do not require the atomic guarantees.
  24. * Note that @nr may be almost arbitrarily large; this function is not
  25. * restricted to acting on a single-word quantity.
  26. */
  27. static __inline__ void set_bit(int nr, volatile void * addr)
  28. {
  29. __asm__ __volatile__( LOCK_PREFIX
  30. "btsl %1,%0"
  31. :ADDR
  32. :"dIr" (nr) : "memory");
  33. }
  34. /**
  35. * __set_bit - Set a bit in memory
  36. * @nr: the bit to set
  37. * @addr: the address to start counting from
  38. *
  39. * Unlike set_bit(), this function is non-atomic and may be reordered.
  40. * If it's called on the same region of memory simultaneously, the effect
  41. * may be that only one operation succeeds.
  42. */
  43. static __inline__ void __set_bit(int nr, volatile void * addr)
  44. {
  45. __asm__ volatile(
  46. "btsl %1,%0"
  47. :ADDR
  48. :"dIr" (nr) : "memory");
  49. }
  50. /**
  51. * clear_bit - Clears a bit in memory
  52. * @nr: Bit to clear
  53. * @addr: Address to start counting from
  54. *
  55. * clear_bit() is atomic and may not be reordered. However, it does
  56. * not contain a memory barrier, so if it is used for locking purposes,
  57. * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
  58. * in order to ensure changes are visible on other processors.
  59. */
  60. static __inline__ void clear_bit(int nr, volatile void * addr)
  61. {
  62. __asm__ __volatile__( LOCK_PREFIX
  63. "btrl %1,%0"
  64. :ADDR
  65. :"dIr" (nr));
  66. }
  67. static __inline__ void __clear_bit(int nr, volatile void * addr)
  68. {
  69. __asm__ __volatile__(
  70. "btrl %1,%0"
  71. :ADDR
  72. :"dIr" (nr));
  73. }
  74. #define smp_mb__before_clear_bit() barrier()
  75. #define smp_mb__after_clear_bit() barrier()
  76. /**
  77. * __change_bit - Toggle a bit in memory
  78. * @nr: the bit to change
  79. * @addr: the address to start counting from
  80. *
  81. * Unlike change_bit(), this function is non-atomic and may be reordered.
  82. * If it's called on the same region of memory simultaneously, the effect
  83. * may be that only one operation succeeds.
  84. */
  85. static __inline__ void __change_bit(int nr, volatile void * addr)
  86. {
  87. __asm__ __volatile__(
  88. "btcl %1,%0"
  89. :ADDR
  90. :"dIr" (nr));
  91. }
  92. /**
  93. * change_bit - Toggle a bit in memory
  94. * @nr: Bit to change
  95. * @addr: Address to start counting from
  96. *
  97. * change_bit() is atomic and may not be reordered.
  98. * Note that @nr may be almost arbitrarily large; this function is not
  99. * restricted to acting on a single-word quantity.
  100. */
  101. static __inline__ void change_bit(int nr, volatile void * addr)
  102. {
  103. __asm__ __volatile__( LOCK_PREFIX
  104. "btcl %1,%0"
  105. :ADDR
  106. :"dIr" (nr));
  107. }
  108. /**
  109. * test_and_set_bit - Set a bit and return its old value
  110. * @nr: Bit to set
  111. * @addr: Address to count from
  112. *
  113. * This operation is atomic and cannot be reordered.
  114. * It also implies a memory barrier.
  115. */
  116. static __inline__ int test_and_set_bit(int nr, volatile void * addr)
  117. {
  118. int oldbit;
  119. __asm__ __volatile__( LOCK_PREFIX
  120. "btsl %2,%1\n\tsbbl %0,%0"
  121. :"=r" (oldbit),ADDR
  122. :"dIr" (nr) : "memory");
  123. return oldbit;
  124. }
  125. /**
  126. * __test_and_set_bit - Set a bit and return its old value
  127. * @nr: Bit to set
  128. * @addr: Address to count from
  129. *
  130. * This operation is non-atomic and can be reordered.
  131. * If two examples of this operation race, one can appear to succeed
  132. * but actually fail. You must protect multiple accesses with a lock.
  133. */
  134. static __inline__ int __test_and_set_bit(int nr, volatile void * addr)
  135. {
  136. int oldbit;
  137. __asm__(
  138. "btsl %2,%1\n\tsbbl %0,%0"
  139. :"=r" (oldbit),ADDR
  140. :"dIr" (nr));
  141. return oldbit;
  142. }
  143. /**
  144. * test_and_clear_bit - Clear a bit and return its old value
  145. * @nr: Bit to clear
  146. * @addr: Address to count from
  147. *
  148. * This operation is atomic and cannot be reordered.
  149. * It also implies a memory barrier.
  150. */
  151. static __inline__ int test_and_clear_bit(int nr, volatile void * addr)
  152. {
  153. int oldbit;
  154. __asm__ __volatile__( LOCK_PREFIX
  155. "btrl %2,%1\n\tsbbl %0,%0"
  156. :"=r" (oldbit),ADDR
  157. :"dIr" (nr) : "memory");
  158. return oldbit;
  159. }
  160. /**
  161. * __test_and_clear_bit - Clear a bit and return its old value
  162. * @nr: Bit to clear
  163. * @addr: Address to count from
  164. *
  165. * This operation is non-atomic and can be reordered.
  166. * If two examples of this operation race, one can appear to succeed
  167. * but actually fail. You must protect multiple accesses with a lock.
  168. */
  169. static __inline__ int __test_and_clear_bit(int nr, volatile void * addr)
  170. {
  171. int oldbit;
  172. __asm__(
  173. "btrl %2,%1\n\tsbbl %0,%0"
  174. :"=r" (oldbit),ADDR
  175. :"dIr" (nr));
  176. return oldbit;
  177. }
  178. /* WARNING: non atomic and it can be reordered! */
  179. static __inline__ int __test_and_change_bit(int nr, volatile void * addr)
  180. {
  181. int oldbit;
  182. __asm__ __volatile__(
  183. "btcl %2,%1\n\tsbbl %0,%0"
  184. :"=r" (oldbit),ADDR
  185. :"dIr" (nr) : "memory");
  186. return oldbit;
  187. }
  188. /**
  189. * test_and_change_bit - Change a bit and return its old value
  190. * @nr: Bit to change
  191. * @addr: Address to count from
  192. *
  193. * This operation is atomic and cannot be reordered.
  194. * It also implies a memory barrier.
  195. */
  196. static __inline__ int test_and_change_bit(int nr, volatile void * addr)
  197. {
  198. int oldbit;
  199. __asm__ __volatile__( LOCK_PREFIX
  200. "btcl %2,%1\n\tsbbl %0,%0"
  201. :"=r" (oldbit),ADDR
  202. :"dIr" (nr) : "memory");
  203. return oldbit;
  204. }
  205. #if 0 /* Fool kernel-doc since it doesn't do macros yet */
  206. /**
  207. * test_bit - Determine whether a bit is set
  208. * @nr: bit number to test
  209. * @addr: Address to start counting from
  210. */
  211. static int test_bit(int nr, const volatile void * addr);
  212. #endif
  213. static __inline__ int constant_test_bit(int nr, const volatile void * addr)
  214. {
  215. return ((1UL << (nr & 31)) & (((const volatile unsigned int *) addr)[nr >> 5])) != 0;
  216. }
  217. static __inline__ int variable_test_bit(int nr, volatile const void * addr)
  218. {
  219. int oldbit;
  220. __asm__ __volatile__(
  221. "btl %2,%1\n\tsbbl %0,%0"
  222. :"=r" (oldbit)
  223. :"m" (*(volatile long *)addr),"dIr" (nr));
  224. return oldbit;
  225. }
  226. #define test_bit(nr,addr) \
  227. (__builtin_constant_p(nr) ? \
  228. constant_test_bit((nr),(addr)) : \
  229. variable_test_bit((nr),(addr)))
  230. #undef ADDR
  231. extern long find_first_zero_bit(const unsigned long * addr, unsigned long size);
  232. extern long find_next_zero_bit (const unsigned long * addr, long size, long offset);
  233. extern long find_first_bit(const unsigned long * addr, unsigned long size);
  234. extern long find_next_bit(const unsigned long * addr, long size, long offset);
  235. /* return index of first bet set in val or max when no bit is set */
  236. static inline long __scanbit(unsigned long val, unsigned long max)
  237. {
  238. asm("bsfq %1,%0 ; cmovz %2,%0" : "=&r" (val) : "r" (val), "r" (max));
  239. return val;
  240. }
  241. #define find_first_bit(addr,size) \
  242. ((__builtin_constant_p(size) && (size) <= BITS_PER_LONG ? \
  243. (__scanbit(*(unsigned long *)addr,(size))) : \
  244. find_first_bit(addr,size)))
  245. #define find_next_bit(addr,size,off) \
  246. ((__builtin_constant_p(size) && (size) <= BITS_PER_LONG ? \
  247. ((off) + (__scanbit((*(unsigned long *)addr) >> (off),(size)-(off)))) : \
  248. find_next_bit(addr,size,off)))
  249. #define find_first_zero_bit(addr,size) \
  250. ((__builtin_constant_p(size) && (size) <= BITS_PER_LONG ? \
  251. (__scanbit(~*(unsigned long *)addr,(size))) : \
  252. find_first_zero_bit(addr,size)))
  253. #define find_next_zero_bit(addr,size,off) \
  254. ((__builtin_constant_p(size) && (size) <= BITS_PER_LONG ? \
  255. ((off)+(__scanbit(~(((*(unsigned long *)addr)) >> (off)),(size)-(off)))) : \
  256. find_next_zero_bit(addr,size,off)))
  257. /*
  258. * Find string of zero bits in a bitmap. -1 when not found.
  259. */
  260. extern unsigned long
  261. find_next_zero_string(unsigned long *bitmap, long start, long nbits, int len);
  262. static inline void set_bit_string(unsigned long *bitmap, unsigned long i,
  263. int len)
  264. {
  265. unsigned long end = i + len;
  266. while (i < end) {
  267. __set_bit(i, bitmap);
  268. i++;
  269. }
  270. }
  271. static inline void __clear_bit_string(unsigned long *bitmap, unsigned long i,
  272. int len)
  273. {
  274. unsigned long end = i + len;
  275. while (i < end) {
  276. __clear_bit(i, bitmap);
  277. i++;
  278. }
  279. }
  280. /**
  281. * ffz - find first zero in word.
  282. * @word: The word to search
  283. *
  284. * Undefined if no zero exists, so code should check against ~0UL first.
  285. */
  286. static __inline__ unsigned long ffz(unsigned long word)
  287. {
  288. __asm__("bsfq %1,%0"
  289. :"=r" (word)
  290. :"r" (~word));
  291. return word;
  292. }
  293. /**
  294. * __ffs - find first bit in word.
  295. * @word: The word to search
  296. *
  297. * Undefined if no bit exists, so code should check against 0 first.
  298. */
  299. static __inline__ unsigned long __ffs(unsigned long word)
  300. {
  301. __asm__("bsfq %1,%0"
  302. :"=r" (word)
  303. :"rm" (word));
  304. return word;
  305. }
  306. /*
  307. * __fls: find last bit set.
  308. * @word: The word to search
  309. *
  310. * Undefined if no zero exists, so code should check against ~0UL first.
  311. */
  312. static __inline__ unsigned long __fls(unsigned long word)
  313. {
  314. __asm__("bsrq %1,%0"
  315. :"=r" (word)
  316. :"rm" (word));
  317. return word;
  318. }
  319. #ifdef __KERNEL__
  320. #include <asm-generic/bitops/sched.h>
  321. /**
  322. * ffs - find first bit set
  323. * @x: the word to search
  324. *
  325. * This is defined the same way as
  326. * the libc and compiler builtin ffs routines, therefore
  327. * differs in spirit from the above ffz (man ffs).
  328. */
  329. static __inline__ int ffs(int x)
  330. {
  331. int r;
  332. __asm__("bsfl %1,%0\n\t"
  333. "cmovzl %2,%0"
  334. : "=r" (r) : "rm" (x), "r" (-1));
  335. return r+1;
  336. }
  337. /**
  338. * fls64 - find last bit set in 64 bit word
  339. * @x: the word to search
  340. *
  341. * This is defined the same way as fls.
  342. */
  343. static __inline__ int fls64(__u64 x)
  344. {
  345. if (x == 0)
  346. return 0;
  347. return __fls(x) + 1;
  348. }
  349. /**
  350. * fls - find last bit set
  351. * @x: the word to search
  352. *
  353. * This is defined the same way as ffs.
  354. */
  355. static __inline__ int fls(int x)
  356. {
  357. int r;
  358. __asm__("bsrl %1,%0\n\t"
  359. "cmovzl %2,%0"
  360. : "=&r" (r) : "rm" (x), "rm" (-1));
  361. return r+1;
  362. }
  363. #define ARCH_HAS_FAST_MULTIPLIER 1
  364. #include <asm-generic/bitops/hweight.h>
  365. #include <asm-generic/bitops/lock.h>
  366. #endif /* __KERNEL__ */
  367. #ifdef __KERNEL__
  368. #include <asm-generic/bitops/ext2-non-atomic.h>
  369. #define ext2_set_bit_atomic(lock,nr,addr) \
  370. test_and_set_bit((nr),(unsigned long*)addr)
  371. #define ext2_clear_bit_atomic(lock,nr,addr) \
  372. test_and_clear_bit((nr),(unsigned long*)addr)
  373. #include <asm-generic/bitops/minix.h>
  374. #endif /* __KERNEL__ */
  375. #endif /* _X86_64_BITOPS_H */