bitops.h 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * Copyright (c) 1994 - 1997, 1999, 2000 Ralf Baechle (ralf@gnu.org)
  7. * Copyright (c) 1999, 2000 Silicon Graphics, Inc.
  8. */
  9. #ifndef _ASM_BITOPS_H
  10. #define _ASM_BITOPS_H
  11. #include <linux/config.h>
  12. #include <linux/compiler.h>
  13. #include <linux/types.h>
  14. #include <asm/byteorder.h> /* sigh ... */
  15. #include <asm/cpu-features.h>
  16. #if (_MIPS_SZLONG == 32)
  17. #define SZLONG_LOG 5
  18. #define SZLONG_MASK 31UL
  19. #define __LL "ll "
  20. #define __SC "sc "
  21. #define cpu_to_lelongp(x) cpu_to_le32p((__u32 *) (x))
  22. #elif (_MIPS_SZLONG == 64)
  23. #define SZLONG_LOG 6
  24. #define SZLONG_MASK 63UL
  25. #define __LL "lld "
  26. #define __SC "scd "
  27. #define cpu_to_lelongp(x) cpu_to_le64p((__u64 *) (x))
  28. #endif
  29. #ifdef __KERNEL__
  30. #include <asm/interrupt.h>
  31. #include <asm/sgidefs.h>
  32. #include <asm/war.h>
  33. /*
  34. * clear_bit() doesn't provide any barrier for the compiler.
  35. */
  36. #define smp_mb__before_clear_bit() smp_mb()
  37. #define smp_mb__after_clear_bit() smp_mb()
  38. /*
  39. * Only disable interrupt for kernel mode stuff to keep usermode stuff
  40. * that dares to use kernel include files alive.
  41. */
  42. #define __bi_flags unsigned long flags
  43. #define __bi_local_irq_save(x) local_irq_save(x)
  44. #define __bi_local_irq_restore(x) local_irq_restore(x)
  45. #else
  46. #define __bi_flags
  47. #define __bi_local_irq_save(x)
  48. #define __bi_local_irq_restore(x)
  49. #endif /* __KERNEL__ */
  50. /*
  51. * set_bit - Atomically set a bit in memory
  52. * @nr: the bit to set
  53. * @addr: the address to start counting from
  54. *
  55. * This function is atomic and may not be reordered. See __set_bit()
  56. * if you do not require the atomic guarantees.
  57. * Note that @nr may be almost arbitrarily large; this function is not
  58. * restricted to acting on a single-word quantity.
  59. */
  60. static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
  61. {
  62. unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
  63. unsigned long temp;
  64. if (cpu_has_llsc && R10000_LLSC_WAR) {
  65. __asm__ __volatile__(
  66. "1: " __LL "%0, %1 # set_bit \n"
  67. " or %0, %2 \n"
  68. " "__SC "%0, %1 \n"
  69. " beqzl %0, 1b \n"
  70. : "=&r" (temp), "=m" (*m)
  71. : "ir" (1UL << (nr & SZLONG_MASK)), "m" (*m));
  72. } else if (cpu_has_llsc) {
  73. __asm__ __volatile__(
  74. "1: " __LL "%0, %1 # set_bit \n"
  75. " or %0, %2 \n"
  76. " "__SC "%0, %1 \n"
  77. " beqz %0, 1b \n"
  78. : "=&r" (temp), "=m" (*m)
  79. : "ir" (1UL << (nr & SZLONG_MASK)), "m" (*m));
  80. } else {
  81. volatile unsigned long *a = addr;
  82. unsigned long mask;
  83. __bi_flags;
  84. a += nr >> SZLONG_LOG;
  85. mask = 1UL << (nr & SZLONG_MASK);
  86. __bi_local_irq_save(flags);
  87. *a |= mask;
  88. __bi_local_irq_restore(flags);
  89. }
  90. }
  91. /*
  92. * __set_bit - Set a bit in memory
  93. * @nr: the bit to set
  94. * @addr: the address to start counting from
  95. *
  96. * Unlike set_bit(), this function is non-atomic and may be reordered.
  97. * If it's called on the same region of memory simultaneously, the effect
  98. * may be that only one operation succeeds.
  99. */
  100. static inline void __set_bit(unsigned long nr, volatile unsigned long * addr)
  101. {
  102. unsigned long * m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
  103. *m |= 1UL << (nr & SZLONG_MASK);
  104. }
  105. /*
  106. * clear_bit - Clears a bit in memory
  107. * @nr: Bit to clear
  108. * @addr: Address to start counting from
  109. *
  110. * clear_bit() is atomic and may not be reordered. However, it does
  111. * not contain a memory barrier, so if it is used for locking purposes,
  112. * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
  113. * in order to ensure changes are visible on other processors.
  114. */
  115. static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
  116. {
  117. unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
  118. unsigned long temp;
  119. if (cpu_has_llsc && R10000_LLSC_WAR) {
  120. __asm__ __volatile__(
  121. "1: " __LL "%0, %1 # clear_bit \n"
  122. " and %0, %2 \n"
  123. " " __SC "%0, %1 \n"
  124. " beqzl %0, 1b \n"
  125. : "=&r" (temp), "=m" (*m)
  126. : "ir" (~(1UL << (nr & SZLONG_MASK))), "m" (*m));
  127. } else if (cpu_has_llsc) {
  128. __asm__ __volatile__(
  129. "1: " __LL "%0, %1 # clear_bit \n"
  130. " and %0, %2 \n"
  131. " " __SC "%0, %1 \n"
  132. " beqz %0, 1b \n"
  133. : "=&r" (temp), "=m" (*m)
  134. : "ir" (~(1UL << (nr & SZLONG_MASK))), "m" (*m));
  135. } else {
  136. volatile unsigned long *a = addr;
  137. unsigned long mask;
  138. __bi_flags;
  139. a += nr >> SZLONG_LOG;
  140. mask = 1UL << (nr & SZLONG_MASK);
  141. __bi_local_irq_save(flags);
  142. *a &= ~mask;
  143. __bi_local_irq_restore(flags);
  144. }
  145. }
  146. /*
  147. * __clear_bit - Clears a bit in memory
  148. * @nr: Bit to clear
  149. * @addr: Address to start counting from
  150. *
  151. * Unlike clear_bit(), this function is non-atomic and may be reordered.
  152. * If it's called on the same region of memory simultaneously, the effect
  153. * may be that only one operation succeeds.
  154. */
  155. static inline void __clear_bit(unsigned long nr, volatile unsigned long * addr)
  156. {
  157. unsigned long * m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
  158. *m &= ~(1UL << (nr & SZLONG_MASK));
  159. }
  160. /*
  161. * change_bit - Toggle a bit in memory
  162. * @nr: Bit to change
  163. * @addr: Address to start counting from
  164. *
  165. * change_bit() is atomic and may not be reordered.
  166. * Note that @nr may be almost arbitrarily large; this function is not
  167. * restricted to acting on a single-word quantity.
  168. */
  169. static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
  170. {
  171. if (cpu_has_llsc && R10000_LLSC_WAR) {
  172. unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
  173. unsigned long temp;
  174. __asm__ __volatile__(
  175. "1: " __LL "%0, %1 # change_bit \n"
  176. " xor %0, %2 \n"
  177. " "__SC "%0, %1 \n"
  178. " beqzl %0, 1b \n"
  179. : "=&r" (temp), "=m" (*m)
  180. : "ir" (1UL << (nr & SZLONG_MASK)), "m" (*m));
  181. } else if (cpu_has_llsc) {
  182. unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
  183. unsigned long temp;
  184. __asm__ __volatile__(
  185. "1: " __LL "%0, %1 # change_bit \n"
  186. " xor %0, %2 \n"
  187. " "__SC "%0, %1 \n"
  188. " beqz %0, 1b \n"
  189. : "=&r" (temp), "=m" (*m)
  190. : "ir" (1UL << (nr & SZLONG_MASK)), "m" (*m));
  191. } else {
  192. volatile unsigned long *a = addr;
  193. unsigned long mask;
  194. __bi_flags;
  195. a += nr >> SZLONG_LOG;
  196. mask = 1UL << (nr & SZLONG_MASK);
  197. __bi_local_irq_save(flags);
  198. *a ^= mask;
  199. __bi_local_irq_restore(flags);
  200. }
  201. }
  202. /*
  203. * __change_bit - Toggle a bit in memory
  204. * @nr: the bit to change
  205. * @addr: the address to start counting from
  206. *
  207. * Unlike change_bit(), this function is non-atomic and may be reordered.
  208. * If it's called on the same region of memory simultaneously, the effect
  209. * may be that only one operation succeeds.
  210. */
  211. static inline void __change_bit(unsigned long nr, volatile unsigned long * addr)
  212. {
  213. unsigned long * m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
  214. *m ^= 1UL << (nr & SZLONG_MASK);
  215. }
  216. /*
  217. * test_and_set_bit - Set a bit and return its old value
  218. * @nr: Bit to set
  219. * @addr: Address to count from
  220. *
  221. * This operation is atomic and cannot be reordered.
  222. * It also implies a memory barrier.
  223. */
  224. static inline int test_and_set_bit(unsigned long nr,
  225. volatile unsigned long *addr)
  226. {
  227. if (cpu_has_llsc && R10000_LLSC_WAR) {
  228. unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
  229. unsigned long temp, res;
  230. __asm__ __volatile__(
  231. "1: " __LL "%0, %1 # test_and_set_bit \n"
  232. " or %2, %0, %3 \n"
  233. " " __SC "%2, %1 \n"
  234. " beqzl %2, 1b \n"
  235. " and %2, %0, %3 \n"
  236. #ifdef CONFIG_SMP
  237. "sync \n"
  238. #endif
  239. : "=&r" (temp), "=m" (*m), "=&r" (res)
  240. : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m)
  241. : "memory");
  242. return res != 0;
  243. } else if (cpu_has_llsc) {
  244. unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
  245. unsigned long temp, res;
  246. __asm__ __volatile__(
  247. " .set noreorder # test_and_set_bit \n"
  248. "1: " __LL "%0, %1 \n"
  249. " or %2, %0, %3 \n"
  250. " " __SC "%2, %1 \n"
  251. " beqz %2, 1b \n"
  252. " and %2, %0, %3 \n"
  253. #ifdef CONFIG_SMP
  254. "sync \n"
  255. #endif
  256. ".set\treorder"
  257. : "=&r" (temp), "=m" (*m), "=&r" (res)
  258. : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m)
  259. : "memory");
  260. return res != 0;
  261. } else {
  262. volatile unsigned long *a = addr;
  263. unsigned long mask;
  264. int retval;
  265. __bi_flags;
  266. a += nr >> SZLONG_LOG;
  267. mask = 1UL << (nr & SZLONG_MASK);
  268. __bi_local_irq_save(flags);
  269. retval = (mask & *a) != 0;
  270. *a |= mask;
  271. __bi_local_irq_restore(flags);
  272. return retval;
  273. }
  274. }
  275. /*
  276. * __test_and_set_bit - Set a bit and return its old value
  277. * @nr: Bit to set
  278. * @addr: Address to count from
  279. *
  280. * This operation is non-atomic and can be reordered.
  281. * If two examples of this operation race, one can appear to succeed
  282. * but actually fail. You must protect multiple accesses with a lock.
  283. */
  284. static inline int __test_and_set_bit(unsigned long nr,
  285. volatile unsigned long *addr)
  286. {
  287. volatile unsigned long *a = addr;
  288. unsigned long mask;
  289. int retval;
  290. a += nr >> SZLONG_LOG;
  291. mask = 1UL << (nr & SZLONG_MASK);
  292. retval = (mask & *a) != 0;
  293. *a |= mask;
  294. return retval;
  295. }
  296. /*
  297. * test_and_clear_bit - Clear a bit and return its old value
  298. * @nr: Bit to clear
  299. * @addr: Address to count from
  300. *
  301. * This operation is atomic and cannot be reordered.
  302. * It also implies a memory barrier.
  303. */
  304. static inline int test_and_clear_bit(unsigned long nr,
  305. volatile unsigned long *addr)
  306. {
  307. if (cpu_has_llsc && R10000_LLSC_WAR) {
  308. unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
  309. unsigned long temp, res;
  310. __asm__ __volatile__(
  311. "1: " __LL "%0, %1 # test_and_clear_bit \n"
  312. " or %2, %0, %3 \n"
  313. " xor %2, %3 \n"
  314. __SC "%2, %1 \n"
  315. " beqzl %2, 1b \n"
  316. " and %2, %0, %3 \n"
  317. #ifdef CONFIG_SMP
  318. " sync \n"
  319. #endif
  320. : "=&r" (temp), "=m" (*m), "=&r" (res)
  321. : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m)
  322. : "memory");
  323. return res != 0;
  324. } else if (cpu_has_llsc) {
  325. unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
  326. unsigned long temp, res;
  327. __asm__ __volatile__(
  328. " .set noreorder # test_and_clear_bit \n"
  329. "1: " __LL "%0, %1 \n"
  330. " or %2, %0, %3 \n"
  331. " xor %2, %3 \n"
  332. __SC "%2, %1 \n"
  333. " beqz %2, 1b \n"
  334. " and %2, %0, %3 \n"
  335. #ifdef CONFIG_SMP
  336. " sync \n"
  337. #endif
  338. " .set reorder \n"
  339. : "=&r" (temp), "=m" (*m), "=&r" (res)
  340. : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m)
  341. : "memory");
  342. return res != 0;
  343. } else {
  344. volatile unsigned long *a = addr;
  345. unsigned long mask;
  346. int retval;
  347. __bi_flags;
  348. a += nr >> SZLONG_LOG;
  349. mask = 1UL << (nr & SZLONG_MASK);
  350. __bi_local_irq_save(flags);
  351. retval = (mask & *a) != 0;
  352. *a &= ~mask;
  353. __bi_local_irq_restore(flags);
  354. return retval;
  355. }
  356. }
  357. /*
  358. * __test_and_clear_bit - Clear a bit and return its old value
  359. * @nr: Bit to clear
  360. * @addr: Address to count from
  361. *
  362. * This operation is non-atomic and can be reordered.
  363. * If two examples of this operation race, one can appear to succeed
  364. * but actually fail. You must protect multiple accesses with a lock.
  365. */
  366. static inline int __test_and_clear_bit(unsigned long nr,
  367. volatile unsigned long * addr)
  368. {
  369. volatile unsigned long *a = addr;
  370. unsigned long mask;
  371. int retval;
  372. a += (nr >> SZLONG_LOG);
  373. mask = 1UL << (nr & SZLONG_MASK);
  374. retval = ((mask & *a) != 0);
  375. *a &= ~mask;
  376. return retval;
  377. }
  378. /*
  379. * test_and_change_bit - Change a bit and return its old value
  380. * @nr: Bit to change
  381. * @addr: Address to count from
  382. *
  383. * This operation is atomic and cannot be reordered.
  384. * It also implies a memory barrier.
  385. */
  386. static inline int test_and_change_bit(unsigned long nr,
  387. volatile unsigned long *addr)
  388. {
  389. if (cpu_has_llsc && R10000_LLSC_WAR) {
  390. unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
  391. unsigned long temp, res;
  392. __asm__ __volatile__(
  393. "1: " __LL " %0, %1 # test_and_change_bit \n"
  394. " xor %2, %0, %3 \n"
  395. " "__SC "%2, %1 \n"
  396. " beqzl %2, 1b \n"
  397. " and %2, %0, %3 \n"
  398. #ifdef CONFIG_SMP
  399. " sync \n"
  400. #endif
  401. : "=&r" (temp), "=m" (*m), "=&r" (res)
  402. : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m)
  403. : "memory");
  404. return res != 0;
  405. } else if (cpu_has_llsc) {
  406. unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
  407. unsigned long temp, res;
  408. __asm__ __volatile__(
  409. " .set noreorder # test_and_change_bit \n"
  410. "1: " __LL " %0, %1 \n"
  411. " xor %2, %0, %3 \n"
  412. " "__SC "\t%2, %1 \n"
  413. " beqz %2, 1b \n"
  414. " and %2, %0, %3 \n"
  415. #ifdef CONFIG_SMP
  416. " sync \n"
  417. #endif
  418. " .set reorder \n"
  419. : "=&r" (temp), "=m" (*m), "=&r" (res)
  420. : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m)
  421. : "memory");
  422. return res != 0;
  423. } else {
  424. volatile unsigned long *a = addr;
  425. unsigned long mask, retval;
  426. __bi_flags;
  427. a += nr >> SZLONG_LOG;
  428. mask = 1UL << (nr & SZLONG_MASK);
  429. __bi_local_irq_save(flags);
  430. retval = (mask & *a) != 0;
  431. *a ^= mask;
  432. __bi_local_irq_restore(flags);
  433. return retval;
  434. }
  435. }
  436. /*
  437. * __test_and_change_bit - Change a bit and return its old value
  438. * @nr: Bit to change
  439. * @addr: Address to count from
  440. *
  441. * This operation is non-atomic and can be reordered.
  442. * If two examples of this operation race, one can appear to succeed
  443. * but actually fail. You must protect multiple accesses with a lock.
  444. */
  445. static inline int __test_and_change_bit(unsigned long nr,
  446. volatile unsigned long *addr)
  447. {
  448. volatile unsigned long *a = addr;
  449. unsigned long mask;
  450. int retval;
  451. a += (nr >> SZLONG_LOG);
  452. mask = 1UL << (nr & SZLONG_MASK);
  453. retval = ((mask & *a) != 0);
  454. *a ^= mask;
  455. return retval;
  456. }
  457. #undef __bi_flags
  458. #undef __bi_local_irq_save
  459. #undef __bi_local_irq_restore
  460. /*
  461. * test_bit - Determine whether a bit is set
  462. * @nr: bit number to test
  463. * @addr: Address to start counting from
  464. */
  465. static inline int test_bit(unsigned long nr, const volatile unsigned long *addr)
  466. {
  467. return 1UL & (addr[nr >> SZLONG_LOG] >> (nr & SZLONG_MASK));
  468. }
  469. /*
  470. * ffz - find first zero in word.
  471. * @word: The word to search
  472. *
  473. * Undefined if no zero exists, so code should check against ~0UL first.
  474. */
  475. static inline unsigned long ffz(unsigned long word)
  476. {
  477. int b = 0, s;
  478. word = ~word;
  479. #ifdef CONFIG_32BIT
  480. s = 16; if (word << 16 != 0) s = 0; b += s; word >>= s;
  481. s = 8; if (word << 24 != 0) s = 0; b += s; word >>= s;
  482. s = 4; if (word << 28 != 0) s = 0; b += s; word >>= s;
  483. s = 2; if (word << 30 != 0) s = 0; b += s; word >>= s;
  484. s = 1; if (word << 31 != 0) s = 0; b += s;
  485. #endif
  486. #ifdef CONFIG_64BIT
  487. s = 32; if (word << 32 != 0) s = 0; b += s; word >>= s;
  488. s = 16; if (word << 48 != 0) s = 0; b += s; word >>= s;
  489. s = 8; if (word << 56 != 0) s = 0; b += s; word >>= s;
  490. s = 4; if (word << 60 != 0) s = 0; b += s; word >>= s;
  491. s = 2; if (word << 62 != 0) s = 0; b += s; word >>= s;
  492. s = 1; if (word << 63 != 0) s = 0; b += s;
  493. #endif
  494. return b;
  495. }
  496. /*
  497. * __ffs - find first bit in word.
  498. * @word: The word to search
  499. *
  500. * Undefined if no bit exists, so code should check against 0 first.
  501. */
  502. static inline unsigned long __ffs(unsigned long word)
  503. {
  504. return ffz(~word);
  505. }
  506. /*
  507. * fls: find last bit set.
  508. */
  509. #define fls(x) generic_fls(x)
  510. /*
  511. * find_next_zero_bit - find the first zero bit in a memory region
  512. * @addr: The address to base the search on
  513. * @offset: The bitnumber to start searching at
  514. * @size: The maximum size to search
  515. */
  516. static inline unsigned long find_next_zero_bit(const unsigned long *addr,
  517. unsigned long size, unsigned long offset)
  518. {
  519. const unsigned long *p = addr + (offset >> SZLONG_LOG);
  520. unsigned long result = offset & ~SZLONG_MASK;
  521. unsigned long tmp;
  522. if (offset >= size)
  523. return size;
  524. size -= result;
  525. offset &= SZLONG_MASK;
  526. if (offset) {
  527. tmp = *(p++);
  528. tmp |= ~0UL >> (_MIPS_SZLONG-offset);
  529. if (size < _MIPS_SZLONG)
  530. goto found_first;
  531. if (~tmp)
  532. goto found_middle;
  533. size -= _MIPS_SZLONG;
  534. result += _MIPS_SZLONG;
  535. }
  536. while (size & ~SZLONG_MASK) {
  537. if (~(tmp = *(p++)))
  538. goto found_middle;
  539. result += _MIPS_SZLONG;
  540. size -= _MIPS_SZLONG;
  541. }
  542. if (!size)
  543. return result;
  544. tmp = *p;
  545. found_first:
  546. tmp |= ~0UL << size;
  547. if (tmp == ~0UL) /* Are any bits zero? */
  548. return result + size; /* Nope. */
  549. found_middle:
  550. return result + ffz(tmp);
  551. }
  552. #define find_first_zero_bit(addr, size) \
  553. find_next_zero_bit((addr), (size), 0)
  554. /*
  555. * find_next_bit - find the next set bit in a memory region
  556. * @addr: The address to base the search on
  557. * @offset: The bitnumber to start searching at
  558. * @size: The maximum size to search
  559. */
  560. static inline unsigned long find_next_bit(const unsigned long *addr,
  561. unsigned long size, unsigned long offset)
  562. {
  563. const unsigned long *p = addr + (offset >> SZLONG_LOG);
  564. unsigned long result = offset & ~SZLONG_MASK;
  565. unsigned long tmp;
  566. if (offset >= size)
  567. return size;
  568. size -= result;
  569. offset &= SZLONG_MASK;
  570. if (offset) {
  571. tmp = *(p++);
  572. tmp &= ~0UL << offset;
  573. if (size < _MIPS_SZLONG)
  574. goto found_first;
  575. if (tmp)
  576. goto found_middle;
  577. size -= _MIPS_SZLONG;
  578. result += _MIPS_SZLONG;
  579. }
  580. while (size & ~SZLONG_MASK) {
  581. if ((tmp = *(p++)))
  582. goto found_middle;
  583. result += _MIPS_SZLONG;
  584. size -= _MIPS_SZLONG;
  585. }
  586. if (!size)
  587. return result;
  588. tmp = *p;
  589. found_first:
  590. tmp &= ~0UL >> (_MIPS_SZLONG - size);
  591. if (tmp == 0UL) /* Are any bits set? */
  592. return result + size; /* Nope. */
  593. found_middle:
  594. return result + __ffs(tmp);
  595. }
  596. /*
  597. * find_first_bit - find the first set bit in a memory region
  598. * @addr: The address to start the search at
  599. * @size: The maximum size to search
  600. *
  601. * Returns the bit-number of the first set bit, not the number of the byte
  602. * containing a bit.
  603. */
  604. #define find_first_bit(addr, size) \
  605. find_next_bit((addr), (size), 0)
  606. #ifdef __KERNEL__
  607. /*
  608. * Every architecture must define this function. It's the fastest
  609. * way of searching a 140-bit bitmap where the first 100 bits are
  610. * unlikely to be set. It's guaranteed that at least one of the 140
  611. * bits is cleared.
  612. */
  613. static inline int sched_find_first_bit(const unsigned long *b)
  614. {
  615. #ifdef CONFIG_32BIT
  616. if (unlikely(b[0]))
  617. return __ffs(b[0]);
  618. if (unlikely(b[1]))
  619. return __ffs(b[1]) + 32;
  620. if (unlikely(b[2]))
  621. return __ffs(b[2]) + 64;
  622. if (b[3])
  623. return __ffs(b[3]) + 96;
  624. return __ffs(b[4]) + 128;
  625. #endif
  626. #ifdef CONFIG_64BIT
  627. if (unlikely(b[0]))
  628. return __ffs(b[0]);
  629. if (unlikely(b[1]))
  630. return __ffs(b[1]) + 64;
  631. return __ffs(b[2]) + 128;
  632. #endif
  633. }
  634. /*
  635. * ffs - find first bit set
  636. * @x: the word to search
  637. *
  638. * This is defined the same way as
  639. * the libc and compiler builtin ffs routines, therefore
  640. * differs in spirit from the above ffz (man ffs).
  641. */
  642. #define ffs(x) generic_ffs(x)
  643. /*
  644. * hweightN - returns the hamming weight of a N-bit word
  645. * @x: the word to weigh
  646. *
  647. * The Hamming Weight of a number is the total number of bits set in it.
  648. */
  649. #define hweight64(x) generic_hweight64(x)
  650. #define hweight32(x) generic_hweight32(x)
  651. #define hweight16(x) generic_hweight16(x)
  652. #define hweight8(x) generic_hweight8(x)
  653. static inline int __test_and_set_le_bit(unsigned long nr, unsigned long *addr)
  654. {
  655. unsigned char *ADDR = (unsigned char *) addr;
  656. int mask, retval;
  657. ADDR += nr >> 3;
  658. mask = 1 << (nr & 0x07);
  659. retval = (mask & *ADDR) != 0;
  660. *ADDR |= mask;
  661. return retval;
  662. }
  663. static inline int __test_and_clear_le_bit(unsigned long nr, unsigned long *addr)
  664. {
  665. unsigned char *ADDR = (unsigned char *) addr;
  666. int mask, retval;
  667. ADDR += nr >> 3;
  668. mask = 1 << (nr & 0x07);
  669. retval = (mask & *ADDR) != 0;
  670. *ADDR &= ~mask;
  671. return retval;
  672. }
  673. static inline int test_le_bit(unsigned long nr, const unsigned long * addr)
  674. {
  675. const unsigned char *ADDR = (const unsigned char *) addr;
  676. int mask;
  677. ADDR += nr >> 3;
  678. mask = 1 << (nr & 0x07);
  679. return ((mask & *ADDR) != 0);
  680. }
  681. static inline unsigned long find_next_zero_le_bit(unsigned long *addr,
  682. unsigned long size, unsigned long offset)
  683. {
  684. unsigned long *p = ((unsigned long *) addr) + (offset >> SZLONG_LOG);
  685. unsigned long result = offset & ~SZLONG_MASK;
  686. unsigned long tmp;
  687. if (offset >= size)
  688. return size;
  689. size -= result;
  690. offset &= SZLONG_MASK;
  691. if (offset) {
  692. tmp = cpu_to_lelongp(p++);
  693. tmp |= ~0UL >> (_MIPS_SZLONG-offset); /* bug or feature ? */
  694. if (size < _MIPS_SZLONG)
  695. goto found_first;
  696. if (~tmp)
  697. goto found_middle;
  698. size -= _MIPS_SZLONG;
  699. result += _MIPS_SZLONG;
  700. }
  701. while (size & ~SZLONG_MASK) {
  702. if (~(tmp = cpu_to_lelongp(p++)))
  703. goto found_middle;
  704. result += _MIPS_SZLONG;
  705. size -= _MIPS_SZLONG;
  706. }
  707. if (!size)
  708. return result;
  709. tmp = cpu_to_lelongp(p);
  710. found_first:
  711. tmp |= ~0UL << size;
  712. if (tmp == ~0UL) /* Are any bits zero? */
  713. return result + size; /* Nope. */
  714. found_middle:
  715. return result + ffz(tmp);
  716. }
  717. #define find_first_zero_le_bit(addr, size) \
  718. find_next_zero_le_bit((addr), (size), 0)
  719. #define ext2_set_bit(nr,addr) \
  720. __test_and_set_le_bit((nr),(unsigned long*)addr)
  721. #define ext2_clear_bit(nr, addr) \
  722. __test_and_clear_le_bit((nr),(unsigned long*)addr)
  723. #define ext2_set_bit_atomic(lock, nr, addr) \
  724. ({ \
  725. int ret; \
  726. spin_lock(lock); \
  727. ret = ext2_set_bit((nr), (addr)); \
  728. spin_unlock(lock); \
  729. ret; \
  730. })
  731. #define ext2_clear_bit_atomic(lock, nr, addr) \
  732. ({ \
  733. int ret; \
  734. spin_lock(lock); \
  735. ret = ext2_clear_bit((nr), (addr)); \
  736. spin_unlock(lock); \
  737. ret; \
  738. })
  739. #define ext2_test_bit(nr, addr) test_le_bit((nr),(unsigned long*)addr)
  740. #define ext2_find_first_zero_bit(addr, size) \
  741. find_first_zero_le_bit((unsigned long*)addr, size)
  742. #define ext2_find_next_zero_bit(addr, size, off) \
  743. find_next_zero_le_bit((unsigned long*)addr, size, off)
  744. /*
  745. * Bitmap functions for the minix filesystem.
  746. *
  747. * FIXME: These assume that Minix uses the native byte/bitorder.
  748. * This limits the Minix filesystem's value for data exchange very much.
  749. */
  750. #define minix_test_and_set_bit(nr,addr) test_and_set_bit(nr,addr)
  751. #define minix_set_bit(nr,addr) set_bit(nr,addr)
  752. #define minix_test_and_clear_bit(nr,addr) test_and_clear_bit(nr,addr)
  753. #define minix_test_bit(nr,addr) test_bit(nr,addr)
  754. #define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size)
  755. #endif /* __KERNEL__ */
  756. #endif /* _ASM_BITOPS_H */