bitops_32.h 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423
  1. #ifndef _I386_BITOPS_H
  2. #define _I386_BITOPS_H
  3. /*
  4. * Copyright 1992, Linus Torvalds.
  5. */
  6. #include <linux/compiler.h>
  7. #include <asm/alternative.h>
  8. /*
  9. * These have to be done with inline assembly: that way the bit-setting
  10. * is guaranteed to be atomic. All bit operations return 0 if the bit
  11. * was cleared before the operation and != 0 if it was not.
  12. *
  13. * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
  14. */
  15. #define ADDR (*(volatile long *) addr)
  16. /**
  17. * set_bit - Atomically set a bit in memory
  18. * @nr: the bit to set
  19. * @addr: the address to start counting from
  20. *
  21. * This function is atomic and may not be reordered. See __set_bit()
  22. * if you do not require the atomic guarantees.
  23. *
  24. * Note: there are no guarantees that this function will not be reordered
  25. * on non x86 architectures, so if you are writing portable code,
  26. * make sure not to rely on its reordering guarantees.
  27. *
  28. * Note that @nr may be almost arbitrarily large; this function is not
  29. * restricted to acting on a single-word quantity.
  30. */
  31. static inline void set_bit(int nr, volatile unsigned long * addr)
  32. {
  33. __asm__ __volatile__( LOCK_PREFIX
  34. "btsl %1,%0"
  35. :"+m" (ADDR)
  36. :"Ir" (nr));
  37. }
  38. /**
  39. * __set_bit - Set a bit in memory
  40. * @nr: the bit to set
  41. * @addr: the address to start counting from
  42. *
  43. * Unlike set_bit(), this function is non-atomic and may be reordered.
  44. * If it's called on the same region of memory simultaneously, the effect
  45. * may be that only one operation succeeds.
  46. */
  47. static inline void __set_bit(int nr, volatile unsigned long * addr)
  48. {
  49. __asm__(
  50. "btsl %1,%0"
  51. :"+m" (ADDR)
  52. :"Ir" (nr));
  53. }
  54. /**
  55. * clear_bit - Clears a bit in memory
  56. * @nr: Bit to clear
  57. * @addr: Address to start counting from
  58. *
  59. * clear_bit() is atomic and may not be reordered. However, it does
  60. * not contain a memory barrier, so if it is used for locking purposes,
  61. * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
  62. * in order to ensure changes are visible on other processors.
  63. */
  64. static inline void clear_bit(int nr, volatile unsigned long * addr)
  65. {
  66. __asm__ __volatile__( LOCK_PREFIX
  67. "btrl %1,%0"
  68. :"+m" (ADDR)
  69. :"Ir" (nr));
  70. }
  71. static inline void __clear_bit(int nr, volatile unsigned long * addr)
  72. {
  73. __asm__ __volatile__(
  74. "btrl %1,%0"
  75. :"+m" (ADDR)
  76. :"Ir" (nr));
  77. }
  78. #define smp_mb__before_clear_bit() barrier()
  79. #define smp_mb__after_clear_bit() barrier()
  80. /**
  81. * __change_bit - Toggle a bit in memory
  82. * @nr: the bit to change
  83. * @addr: the address to start counting from
  84. *
  85. * Unlike change_bit(), this function is non-atomic and may be reordered.
  86. * If it's called on the same region of memory simultaneously, the effect
  87. * may be that only one operation succeeds.
  88. */
  89. static inline void __change_bit(int nr, volatile unsigned long * addr)
  90. {
  91. __asm__ __volatile__(
  92. "btcl %1,%0"
  93. :"+m" (ADDR)
  94. :"Ir" (nr));
  95. }
  96. /**
  97. * change_bit - Toggle a bit in memory
  98. * @nr: Bit to change
  99. * @addr: Address to start counting from
  100. *
  101. * change_bit() is atomic and may not be reordered. It may be
  102. * reordered on other architectures than x86.
  103. * Note that @nr may be almost arbitrarily large; this function is not
  104. * restricted to acting on a single-word quantity.
  105. */
  106. static inline void change_bit(int nr, volatile unsigned long * addr)
  107. {
  108. __asm__ __volatile__( LOCK_PREFIX
  109. "btcl %1,%0"
  110. :"+m" (ADDR)
  111. :"Ir" (nr));
  112. }
  113. /**
  114. * test_and_set_bit - Set a bit and return its old value
  115. * @nr: Bit to set
  116. * @addr: Address to count from
  117. *
  118. * This operation is atomic and cannot be reordered.
  119. * It may be reordered on other architectures than x86.
  120. * It also implies a memory barrier.
  121. */
  122. static inline int test_and_set_bit(int nr, volatile unsigned long * addr)
  123. {
  124. int oldbit;
  125. __asm__ __volatile__( LOCK_PREFIX
  126. "btsl %2,%1\n\tsbbl %0,%0"
  127. :"=r" (oldbit),"+m" (ADDR)
  128. :"Ir" (nr) : "memory");
  129. return oldbit;
  130. }
  131. /**
  132. * __test_and_set_bit - Set a bit and return its old value
  133. * @nr: Bit to set
  134. * @addr: Address to count from
  135. *
  136. * This operation is non-atomic and can be reordered.
  137. * If two examples of this operation race, one can appear to succeed
  138. * but actually fail. You must protect multiple accesses with a lock.
  139. */
  140. static inline int __test_and_set_bit(int nr, volatile unsigned long * addr)
  141. {
  142. int oldbit;
  143. __asm__(
  144. "btsl %2,%1\n\tsbbl %0,%0"
  145. :"=r" (oldbit),"+m" (ADDR)
  146. :"Ir" (nr));
  147. return oldbit;
  148. }
  149. /**
  150. * test_and_clear_bit - Clear a bit and return its old value
  151. * @nr: Bit to clear
  152. * @addr: Address to count from
  153. *
  154. * This operation is atomic and cannot be reordered.
  155. * It can be reorderdered on other architectures other than x86.
  156. * It also implies a memory barrier.
  157. */
  158. static inline int test_and_clear_bit(int nr, volatile unsigned long * addr)
  159. {
  160. int oldbit;
  161. __asm__ __volatile__( LOCK_PREFIX
  162. "btrl %2,%1\n\tsbbl %0,%0"
  163. :"=r" (oldbit),"+m" (ADDR)
  164. :"Ir" (nr) : "memory");
  165. return oldbit;
  166. }
  167. /**
  168. * __test_and_clear_bit - Clear a bit and return its old value
  169. * @nr: Bit to clear
  170. * @addr: Address to count from
  171. *
  172. * This operation is non-atomic and can be reordered.
  173. * If two examples of this operation race, one can appear to succeed
  174. * but actually fail. You must protect multiple accesses with a lock.
  175. */
  176. static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr)
  177. {
  178. int oldbit;
  179. __asm__(
  180. "btrl %2,%1\n\tsbbl %0,%0"
  181. :"=r" (oldbit),"+m" (ADDR)
  182. :"Ir" (nr));
  183. return oldbit;
  184. }
  185. /* WARNING: non atomic and it can be reordered! */
  186. static inline int __test_and_change_bit(int nr, volatile unsigned long *addr)
  187. {
  188. int oldbit;
  189. __asm__ __volatile__(
  190. "btcl %2,%1\n\tsbbl %0,%0"
  191. :"=r" (oldbit),"+m" (ADDR)
  192. :"Ir" (nr) : "memory");
  193. return oldbit;
  194. }
  195. /**
  196. * test_and_change_bit - Change a bit and return its old value
  197. * @nr: Bit to change
  198. * @addr: Address to count from
  199. *
  200. * This operation is atomic and cannot be reordered.
  201. * It also implies a memory barrier.
  202. */
  203. static inline int test_and_change_bit(int nr, volatile unsigned long* addr)
  204. {
  205. int oldbit;
  206. __asm__ __volatile__( LOCK_PREFIX
  207. "btcl %2,%1\n\tsbbl %0,%0"
  208. :"=r" (oldbit),"+m" (ADDR)
  209. :"Ir" (nr) : "memory");
  210. return oldbit;
  211. }
  212. #if 0 /* Fool kernel-doc since it doesn't do macros yet */
  213. /**
  214. * test_bit - Determine whether a bit is set
  215. * @nr: bit number to test
  216. * @addr: Address to start counting from
  217. */
  218. static int test_bit(int nr, const volatile void * addr);
  219. #endif
  220. static __always_inline int constant_test_bit(int nr, const volatile unsigned long *addr)
  221. {
  222. return ((1UL << (nr & 31)) & (addr[nr >> 5])) != 0;
  223. }
  224. static inline int variable_test_bit(int nr, const volatile unsigned long * addr)
  225. {
  226. int oldbit;
  227. __asm__ __volatile__(
  228. "btl %2,%1\n\tsbbl %0,%0"
  229. :"=r" (oldbit)
  230. :"m" (ADDR),"Ir" (nr));
  231. return oldbit;
  232. }
  233. #define test_bit(nr,addr) \
  234. (__builtin_constant_p(nr) ? \
  235. constant_test_bit((nr),(addr)) : \
  236. variable_test_bit((nr),(addr)))
  237. #undef ADDR
  238. /**
  239. * find_first_zero_bit - find the first zero bit in a memory region
  240. * @addr: The address to start the search at
  241. * @size: The maximum size to search
  242. *
  243. * Returns the bit-number of the first zero bit, not the number of the byte
  244. * containing a bit.
  245. */
  246. static inline int find_first_zero_bit(const unsigned long *addr, unsigned size)
  247. {
  248. int d0, d1, d2;
  249. int res;
  250. if (!size)
  251. return 0;
  252. /* This looks at memory. Mark it volatile to tell gcc not to move it around */
  253. __asm__ __volatile__(
  254. "movl $-1,%%eax\n\t"
  255. "xorl %%edx,%%edx\n\t"
  256. "repe; scasl\n\t"
  257. "je 1f\n\t"
  258. "xorl -4(%%edi),%%eax\n\t"
  259. "subl $4,%%edi\n\t"
  260. "bsfl %%eax,%%edx\n"
  261. "1:\tsubl %%ebx,%%edi\n\t"
  262. "shll $3,%%edi\n\t"
  263. "addl %%edi,%%edx"
  264. :"=d" (res), "=&c" (d0), "=&D" (d1), "=&a" (d2)
  265. :"1" ((size + 31) >> 5), "2" (addr), "b" (addr) : "memory");
  266. return res;
  267. }
  268. /**
  269. * find_next_zero_bit - find the first zero bit in a memory region
  270. * @addr: The address to base the search on
  271. * @offset: The bitnumber to start searching at
  272. * @size: The maximum size to search
  273. */
  274. int find_next_zero_bit(const unsigned long *addr, int size, int offset);
  275. /**
  276. * __ffs - find first bit in word.
  277. * @word: The word to search
  278. *
  279. * Undefined if no bit exists, so code should check against 0 first.
  280. */
  281. static inline unsigned long __ffs(unsigned long word)
  282. {
  283. __asm__("bsfl %1,%0"
  284. :"=r" (word)
  285. :"rm" (word));
  286. return word;
  287. }
  288. /**
  289. * find_first_bit - find the first set bit in a memory region
  290. * @addr: The address to start the search at
  291. * @size: The maximum size to search
  292. *
  293. * Returns the bit-number of the first set bit, not the number of the byte
  294. * containing a bit.
  295. */
  296. static inline unsigned find_first_bit(const unsigned long *addr, unsigned size)
  297. {
  298. unsigned x = 0;
  299. while (x < size) {
  300. unsigned long val = *addr++;
  301. if (val)
  302. return __ffs(val) + x;
  303. x += (sizeof(*addr)<<3);
  304. }
  305. return x;
  306. }
  307. /**
  308. * find_next_bit - find the first set bit in a memory region
  309. * @addr: The address to base the search on
  310. * @offset: The bitnumber to start searching at
  311. * @size: The maximum size to search
  312. */
  313. int find_next_bit(const unsigned long *addr, int size, int offset);
  314. /**
  315. * ffz - find first zero in word.
  316. * @word: The word to search
  317. *
  318. * Undefined if no zero exists, so code should check against ~0UL first.
  319. */
  320. static inline unsigned long ffz(unsigned long word)
  321. {
  322. __asm__("bsfl %1,%0"
  323. :"=r" (word)
  324. :"r" (~word));
  325. return word;
  326. }
  327. #ifdef __KERNEL__
  328. #include <asm-generic/bitops/sched.h>
  329. /**
  330. * ffs - find first bit set
  331. * @x: the word to search
  332. *
  333. * This is defined the same way as
  334. * the libc and compiler builtin ffs routines, therefore
  335. * differs in spirit from the above ffz() (man ffs).
  336. */
  337. static inline int ffs(int x)
  338. {
  339. int r;
  340. __asm__("bsfl %1,%0\n\t"
  341. "jnz 1f\n\t"
  342. "movl $-1,%0\n"
  343. "1:" : "=r" (r) : "rm" (x));
  344. return r+1;
  345. }
  346. /**
  347. * fls - find last bit set
  348. * @x: the word to search
  349. *
  350. * This is defined the same way as ffs().
  351. */
  352. static inline int fls(int x)
  353. {
  354. int r;
  355. __asm__("bsrl %1,%0\n\t"
  356. "jnz 1f\n\t"
  357. "movl $-1,%0\n"
  358. "1:" : "=r" (r) : "rm" (x));
  359. return r+1;
  360. }
  361. #include <asm-generic/bitops/hweight.h>
  362. #endif /* __KERNEL__ */
  363. #include <asm-generic/bitops/fls64.h>
  364. #ifdef __KERNEL__
  365. #include <asm-generic/bitops/ext2-non-atomic.h>
  366. #define ext2_set_bit_atomic(lock,nr,addr) \
  367. test_and_set_bit((nr),(unsigned long*)addr)
  368. #define ext2_clear_bit_atomic(lock,nr, addr) \
  369. test_and_clear_bit((nr),(unsigned long*)addr)
  370. #include <asm-generic/bitops/minix.h>
  371. #endif /* __KERNEL__ */
  372. #endif /* _I386_BITOPS_H */