bitops.h 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * Copyright (c) 1994 - 1997, 1999, 2000 Ralf Baechle (ralf@gnu.org)
  7. * Copyright (c) 1999, 2000 Silicon Graphics, Inc.
  8. */
  9. #ifndef _ASM_BITOPS_H
  10. #define _ASM_BITOPS_H
  11. #include <linux/config.h>
  12. #include <linux/compiler.h>
  13. #include <linux/types.h>
  14. #include <asm/bug.h>
  15. #include <asm/byteorder.h> /* sigh ... */
  16. #include <asm/cpu-features.h>
  17. #if (_MIPS_SZLONG == 32)
  18. #define SZLONG_LOG 5
  19. #define SZLONG_MASK 31UL
  20. #define __LL "ll "
  21. #define __SC "sc "
  22. #define cpu_to_lelongp(x) cpu_to_le32p((__u32 *) (x))
  23. #elif (_MIPS_SZLONG == 64)
  24. #define SZLONG_LOG 6
  25. #define SZLONG_MASK 63UL
  26. #define __LL "lld "
  27. #define __SC "scd "
  28. #define cpu_to_lelongp(x) cpu_to_le64p((__u64 *) (x))
  29. #endif
  30. #ifdef __KERNEL__
  31. #include <asm/interrupt.h>
  32. #include <asm/sgidefs.h>
  33. #include <asm/war.h>
  34. /*
  35. * clear_bit() doesn't provide any barrier for the compiler.
  36. */
  37. #define smp_mb__before_clear_bit() smp_mb()
  38. #define smp_mb__after_clear_bit() smp_mb()
  39. /*
  40. * Only disable interrupt for kernel mode stuff to keep usermode stuff
  41. * that dares to use kernel include files alive.
  42. */
  43. #define __bi_flags unsigned long flags
  44. #define __bi_local_irq_save(x) local_irq_save(x)
  45. #define __bi_local_irq_restore(x) local_irq_restore(x)
  46. #else
  47. #define __bi_flags
  48. #define __bi_local_irq_save(x)
  49. #define __bi_local_irq_restore(x)
  50. #endif /* __KERNEL__ */
  51. /*
  52. * set_bit - Atomically set a bit in memory
  53. * @nr: the bit to set
  54. * @addr: the address to start counting from
  55. *
  56. * This function is atomic and may not be reordered. See __set_bit()
  57. * if you do not require the atomic guarantees.
  58. * Note that @nr may be almost arbitrarily large; this function is not
  59. * restricted to acting on a single-word quantity.
  60. */
  61. static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
  62. {
  63. unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
  64. unsigned long temp;
  65. if (cpu_has_llsc && R10000_LLSC_WAR) {
  66. __asm__ __volatile__(
  67. " .set mips3 \n"
  68. "1: " __LL "%0, %1 # set_bit \n"
  69. " or %0, %2 \n"
  70. " " __SC "%0, %1 \n"
  71. " beqzl %0, 1b \n"
  72. " .set mips0 \n"
  73. : "=&r" (temp), "=m" (*m)
  74. : "ir" (1UL << (nr & SZLONG_MASK)), "m" (*m));
  75. } else if (cpu_has_llsc) {
  76. __asm__ __volatile__(
  77. " .set mips3 \n"
  78. "1: " __LL "%0, %1 # set_bit \n"
  79. " or %0, %2 \n"
  80. " " __SC "%0, %1 \n"
  81. " beqz %0, 1b \n"
  82. " .set mips0 \n"
  83. : "=&r" (temp), "=m" (*m)
  84. : "ir" (1UL << (nr & SZLONG_MASK)), "m" (*m));
  85. } else {
  86. volatile unsigned long *a = addr;
  87. unsigned long mask;
  88. __bi_flags;
  89. a += nr >> SZLONG_LOG;
  90. mask = 1UL << (nr & SZLONG_MASK);
  91. __bi_local_irq_save(flags);
  92. *a |= mask;
  93. __bi_local_irq_restore(flags);
  94. }
  95. }
  96. /*
  97. * __set_bit - Set a bit in memory
  98. * @nr: the bit to set
  99. * @addr: the address to start counting from
  100. *
  101. * Unlike set_bit(), this function is non-atomic and may be reordered.
  102. * If it's called on the same region of memory simultaneously, the effect
  103. * may be that only one operation succeeds.
  104. */
  105. static inline void __set_bit(unsigned long nr, volatile unsigned long * addr)
  106. {
  107. unsigned long * m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
  108. *m |= 1UL << (nr & SZLONG_MASK);
  109. }
  110. /*
  111. * clear_bit - Clears a bit in memory
  112. * @nr: Bit to clear
  113. * @addr: Address to start counting from
  114. *
  115. * clear_bit() is atomic and may not be reordered. However, it does
  116. * not contain a memory barrier, so if it is used for locking purposes,
  117. * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
  118. * in order to ensure changes are visible on other processors.
  119. */
  120. static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
  121. {
  122. unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
  123. unsigned long temp;
  124. if (cpu_has_llsc && R10000_LLSC_WAR) {
  125. __asm__ __volatile__(
  126. " .set mips3 \n"
  127. "1: " __LL "%0, %1 # clear_bit \n"
  128. " and %0, %2 \n"
  129. " " __SC "%0, %1 \n"
  130. " beqzl %0, 1b \n"
  131. " .set mips0 \n"
  132. : "=&r" (temp), "=m" (*m)
  133. : "ir" (~(1UL << (nr & SZLONG_MASK))), "m" (*m));
  134. } else if (cpu_has_llsc) {
  135. __asm__ __volatile__(
  136. " .set mips3 \n"
  137. "1: " __LL "%0, %1 # clear_bit \n"
  138. " and %0, %2 \n"
  139. " " __SC "%0, %1 \n"
  140. " beqz %0, 1b \n"
  141. " .set mips0 \n"
  142. : "=&r" (temp), "=m" (*m)
  143. : "ir" (~(1UL << (nr & SZLONG_MASK))), "m" (*m));
  144. } else {
  145. volatile unsigned long *a = addr;
  146. unsigned long mask;
  147. __bi_flags;
  148. a += nr >> SZLONG_LOG;
  149. mask = 1UL << (nr & SZLONG_MASK);
  150. __bi_local_irq_save(flags);
  151. *a &= ~mask;
  152. __bi_local_irq_restore(flags);
  153. }
  154. }
  155. /*
  156. * __clear_bit - Clears a bit in memory
  157. * @nr: Bit to clear
  158. * @addr: Address to start counting from
  159. *
  160. * Unlike clear_bit(), this function is non-atomic and may be reordered.
  161. * If it's called on the same region of memory simultaneously, the effect
  162. * may be that only one operation succeeds.
  163. */
  164. static inline void __clear_bit(unsigned long nr, volatile unsigned long * addr)
  165. {
  166. unsigned long * m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
  167. *m &= ~(1UL << (nr & SZLONG_MASK));
  168. }
  169. /*
  170. * change_bit - Toggle a bit in memory
  171. * @nr: Bit to change
  172. * @addr: Address to start counting from
  173. *
  174. * change_bit() is atomic and may not be reordered.
  175. * Note that @nr may be almost arbitrarily large; this function is not
  176. * restricted to acting on a single-word quantity.
  177. */
  178. static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
  179. {
  180. if (cpu_has_llsc && R10000_LLSC_WAR) {
  181. unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
  182. unsigned long temp;
  183. __asm__ __volatile__(
  184. " .set mips3 \n"
  185. "1: " __LL "%0, %1 # change_bit \n"
  186. " xor %0, %2 \n"
  187. " " __SC "%0, %1 \n"
  188. " beqzl %0, 1b \n"
  189. " .set mips0 \n"
  190. : "=&r" (temp), "=m" (*m)
  191. : "ir" (1UL << (nr & SZLONG_MASK)), "m" (*m));
  192. } else if (cpu_has_llsc) {
  193. unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
  194. unsigned long temp;
  195. __asm__ __volatile__(
  196. " .set mips3 \n"
  197. "1: " __LL "%0, %1 # change_bit \n"
  198. " xor %0, %2 \n"
  199. " " __SC "%0, %1 \n"
  200. " beqz %0, 1b \n"
  201. " .set mips0 \n"
  202. : "=&r" (temp), "=m" (*m)
  203. : "ir" (1UL << (nr & SZLONG_MASK)), "m" (*m));
  204. } else {
  205. volatile unsigned long *a = addr;
  206. unsigned long mask;
  207. __bi_flags;
  208. a += nr >> SZLONG_LOG;
  209. mask = 1UL << (nr & SZLONG_MASK);
  210. __bi_local_irq_save(flags);
  211. *a ^= mask;
  212. __bi_local_irq_restore(flags);
  213. }
  214. }
  215. /*
  216. * __change_bit - Toggle a bit in memory
  217. * @nr: the bit to change
  218. * @addr: the address to start counting from
  219. *
  220. * Unlike change_bit(), this function is non-atomic and may be reordered.
  221. * If it's called on the same region of memory simultaneously, the effect
  222. * may be that only one operation succeeds.
  223. */
  224. static inline void __change_bit(unsigned long nr, volatile unsigned long * addr)
  225. {
  226. unsigned long * m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
  227. *m ^= 1UL << (nr & SZLONG_MASK);
  228. }
  229. /*
  230. * test_and_set_bit - Set a bit and return its old value
  231. * @nr: Bit to set
  232. * @addr: Address to count from
  233. *
  234. * This operation is atomic and cannot be reordered.
  235. * It also implies a memory barrier.
  236. */
  237. static inline int test_and_set_bit(unsigned long nr,
  238. volatile unsigned long *addr)
  239. {
  240. if (cpu_has_llsc && R10000_LLSC_WAR) {
  241. unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
  242. unsigned long temp, res;
  243. __asm__ __volatile__(
  244. " .set mips3 \n"
  245. "1: " __LL "%0, %1 # test_and_set_bit \n"
  246. " or %2, %0, %3 \n"
  247. " " __SC "%2, %1 \n"
  248. " beqzl %2, 1b \n"
  249. " and %2, %0, %3 \n"
  250. #ifdef CONFIG_SMP
  251. " sync \n"
  252. #endif
  253. " .set mips0 \n"
  254. : "=&r" (temp), "=m" (*m), "=&r" (res)
  255. : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m)
  256. : "memory");
  257. return res != 0;
  258. } else if (cpu_has_llsc) {
  259. unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
  260. unsigned long temp, res;
  261. __asm__ __volatile__(
  262. " .set push \n"
  263. " .set noreorder \n"
  264. " .set mips3 \n"
  265. "1: " __LL "%0, %1 # test_and_set_bit \n"
  266. " or %2, %0, %3 \n"
  267. " " __SC "%2, %1 \n"
  268. " beqz %2, 1b \n"
  269. " and %2, %0, %3 \n"
  270. #ifdef CONFIG_SMP
  271. " sync \n"
  272. #endif
  273. " .set pop \n"
  274. : "=&r" (temp), "=m" (*m), "=&r" (res)
  275. : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m)
  276. : "memory");
  277. return res != 0;
  278. } else {
  279. volatile unsigned long *a = addr;
  280. unsigned long mask;
  281. int retval;
  282. __bi_flags;
  283. a += nr >> SZLONG_LOG;
  284. mask = 1UL << (nr & SZLONG_MASK);
  285. __bi_local_irq_save(flags);
  286. retval = (mask & *a) != 0;
  287. *a |= mask;
  288. __bi_local_irq_restore(flags);
  289. return retval;
  290. }
  291. }
  292. /*
  293. * __test_and_set_bit - Set a bit and return its old value
  294. * @nr: Bit to set
  295. * @addr: Address to count from
  296. *
  297. * This operation is non-atomic and can be reordered.
  298. * If two examples of this operation race, one can appear to succeed
  299. * but actually fail. You must protect multiple accesses with a lock.
  300. */
  301. static inline int __test_and_set_bit(unsigned long nr,
  302. volatile unsigned long *addr)
  303. {
  304. volatile unsigned long *a = addr;
  305. unsigned long mask;
  306. int retval;
  307. a += nr >> SZLONG_LOG;
  308. mask = 1UL << (nr & SZLONG_MASK);
  309. retval = (mask & *a) != 0;
  310. *a |= mask;
  311. return retval;
  312. }
  313. /*
  314. * test_and_clear_bit - Clear a bit and return its old value
  315. * @nr: Bit to clear
  316. * @addr: Address to count from
  317. *
  318. * This operation is atomic and cannot be reordered.
  319. * It also implies a memory barrier.
  320. */
  321. static inline int test_and_clear_bit(unsigned long nr,
  322. volatile unsigned long *addr)
  323. {
  324. if (cpu_has_llsc && R10000_LLSC_WAR) {
  325. unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
  326. unsigned long temp, res;
  327. __asm__ __volatile__(
  328. " .set mips3 \n"
  329. "1: " __LL "%0, %1 # test_and_clear_bit \n"
  330. " or %2, %0, %3 \n"
  331. " xor %2, %3 \n"
  332. " " __SC "%2, %1 \n"
  333. " beqzl %2, 1b \n"
  334. " and %2, %0, %3 \n"
  335. #ifdef CONFIG_SMP
  336. " sync \n"
  337. #endif
  338. " .set mips0 \n"
  339. : "=&r" (temp), "=m" (*m), "=&r" (res)
  340. : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m)
  341. : "memory");
  342. return res != 0;
  343. } else if (cpu_has_llsc) {
  344. unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
  345. unsigned long temp, res;
  346. __asm__ __volatile__(
  347. " .set push \n"
  348. " .set noreorder \n"
  349. " .set mips3 \n"
  350. "1: " __LL "%0, %1 # test_and_clear_bit \n"
  351. " or %2, %0, %3 \n"
  352. " xor %2, %3 \n"
  353. " " __SC "%2, %1 \n"
  354. " beqz %2, 1b \n"
  355. " and %2, %0, %3 \n"
  356. #ifdef CONFIG_SMP
  357. " sync \n"
  358. #endif
  359. " .set pop \n"
  360. : "=&r" (temp), "=m" (*m), "=&r" (res)
  361. : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m)
  362. : "memory");
  363. return res != 0;
  364. } else {
  365. volatile unsigned long *a = addr;
  366. unsigned long mask;
  367. int retval;
  368. __bi_flags;
  369. a += nr >> SZLONG_LOG;
  370. mask = 1UL << (nr & SZLONG_MASK);
  371. __bi_local_irq_save(flags);
  372. retval = (mask & *a) != 0;
  373. *a &= ~mask;
  374. __bi_local_irq_restore(flags);
  375. return retval;
  376. }
  377. }
  378. /*
  379. * __test_and_clear_bit - Clear a bit and return its old value
  380. * @nr: Bit to clear
  381. * @addr: Address to count from
  382. *
  383. * This operation is non-atomic and can be reordered.
  384. * If two examples of this operation race, one can appear to succeed
  385. * but actually fail. You must protect multiple accesses with a lock.
  386. */
  387. static inline int __test_and_clear_bit(unsigned long nr,
  388. volatile unsigned long * addr)
  389. {
  390. volatile unsigned long *a = addr;
  391. unsigned long mask;
  392. int retval;
  393. a += (nr >> SZLONG_LOG);
  394. mask = 1UL << (nr & SZLONG_MASK);
  395. retval = ((mask & *a) != 0);
  396. *a &= ~mask;
  397. return retval;
  398. }
  399. /*
  400. * test_and_change_bit - Change a bit and return its old value
  401. * @nr: Bit to change
  402. * @addr: Address to count from
  403. *
  404. * This operation is atomic and cannot be reordered.
  405. * It also implies a memory barrier.
  406. */
  407. static inline int test_and_change_bit(unsigned long nr,
  408. volatile unsigned long *addr)
  409. {
  410. if (cpu_has_llsc && R10000_LLSC_WAR) {
  411. unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
  412. unsigned long temp, res;
  413. __asm__ __volatile__(
  414. " .set mips3 \n"
  415. "1: " __LL "%0, %1 # test_and_change_bit \n"
  416. " xor %2, %0, %3 \n"
  417. " " __SC "%2, %1 \n"
  418. " beqzl %2, 1b \n"
  419. " and %2, %0, %3 \n"
  420. #ifdef CONFIG_SMP
  421. " sync \n"
  422. #endif
  423. " .set mips0 \n"
  424. : "=&r" (temp), "=m" (*m), "=&r" (res)
  425. : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m)
  426. : "memory");
  427. return res != 0;
  428. } else if (cpu_has_llsc) {
  429. unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
  430. unsigned long temp, res;
  431. __asm__ __volatile__(
  432. " .set push \n"
  433. " .set noreorder \n"
  434. " .set mips3 \n"
  435. "1: " __LL "%0, %1 # test_and_change_bit \n"
  436. " xor %2, %0, %3 \n"
  437. " " __SC "\t%2, %1 \n"
  438. " beqz %2, 1b \n"
  439. " and %2, %0, %3 \n"
  440. #ifdef CONFIG_SMP
  441. " sync \n"
  442. #endif
  443. " .set pop \n"
  444. : "=&r" (temp), "=m" (*m), "=&r" (res)
  445. : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m)
  446. : "memory");
  447. return res != 0;
  448. } else {
  449. volatile unsigned long *a = addr;
  450. unsigned long mask, retval;
  451. __bi_flags;
  452. a += nr >> SZLONG_LOG;
  453. mask = 1UL << (nr & SZLONG_MASK);
  454. __bi_local_irq_save(flags);
  455. retval = (mask & *a) != 0;
  456. *a ^= mask;
  457. __bi_local_irq_restore(flags);
  458. return retval;
  459. }
  460. }
  461. /*
  462. * __test_and_change_bit - Change a bit and return its old value
  463. * @nr: Bit to change
  464. * @addr: Address to count from
  465. *
  466. * This operation is non-atomic and can be reordered.
  467. * If two examples of this operation race, one can appear to succeed
  468. * but actually fail. You must protect multiple accesses with a lock.
  469. */
  470. static inline int __test_and_change_bit(unsigned long nr,
  471. volatile unsigned long *addr)
  472. {
  473. volatile unsigned long *a = addr;
  474. unsigned long mask;
  475. int retval;
  476. a += (nr >> SZLONG_LOG);
  477. mask = 1UL << (nr & SZLONG_MASK);
  478. retval = ((mask & *a) != 0);
  479. *a ^= mask;
  480. return retval;
  481. }
  482. #undef __bi_flags
  483. #undef __bi_local_irq_save
  484. #undef __bi_local_irq_restore
  485. /*
  486. * test_bit - Determine whether a bit is set
  487. * @nr: bit number to test
  488. * @addr: Address to start counting from
  489. */
  490. static inline int test_bit(unsigned long nr, const volatile unsigned long *addr)
  491. {
  492. return 1UL & (addr[nr >> SZLONG_LOG] >> (nr & SZLONG_MASK));
  493. }
  494. /*
  495. * Return the bit position (0..63) of the most significant 1 bit in a word
  496. * Returns -1 if no 1 bit exists
  497. */
  498. static inline int __ilog2(unsigned long x)
  499. {
  500. int lz;
  501. if (sizeof(x) == 4) {
  502. __asm__ (
  503. " .set push \n"
  504. " .set mips32 \n"
  505. " clz %0, %1 \n"
  506. " .set pop \n"
  507. : "=r" (lz)
  508. : "r" (x));
  509. return 31 - lz;
  510. }
  511. BUG_ON(sizeof(x) != 8);
  512. __asm__ (
  513. " .set push \n"
  514. " .set mips64 \n"
  515. " dclz %0, %1 \n"
  516. " .set pop \n"
  517. : "=r" (lz)
  518. : "r" (x));
  519. return 63 - lz;
  520. }
  521. /*
  522. * __ffs - find first bit in word.
  523. * @word: The word to search
  524. *
  525. * Returns 0..SZLONG-1
  526. * Undefined if no bit exists, so code should check against 0 first.
  527. */
  528. static inline unsigned long __ffs(unsigned long word)
  529. {
  530. #if defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64)
  531. return __ilog2(word & -word);
  532. #else
  533. int b = 0, s;
  534. #ifdef CONFIG_32BIT
  535. s = 16; if (word << 16 != 0) s = 0; b += s; word >>= s;
  536. s = 8; if (word << 24 != 0) s = 0; b += s; word >>= s;
  537. s = 4; if (word << 28 != 0) s = 0; b += s; word >>= s;
  538. s = 2; if (word << 30 != 0) s = 0; b += s; word >>= s;
  539. s = 1; if (word << 31 != 0) s = 0; b += s;
  540. return b;
  541. #endif
  542. #ifdef CONFIG_64BIT
  543. s = 32; if (word << 32 != 0) s = 0; b += s; word >>= s;
  544. s = 16; if (word << 48 != 0) s = 0; b += s; word >>= s;
  545. s = 8; if (word << 56 != 0) s = 0; b += s; word >>= s;
  546. s = 4; if (word << 60 != 0) s = 0; b += s; word >>= s;
  547. s = 2; if (word << 62 != 0) s = 0; b += s; word >>= s;
  548. s = 1; if (word << 63 != 0) s = 0; b += s;
  549. return b;
  550. #endif
  551. #endif
  552. }
  553. /*
  554. * ffs - find first bit set.
  555. * @word: The word to search
  556. *
  557. * Returns 1..SZLONG
  558. * Returns 0 if no bit exists
  559. */
  560. static inline unsigned long ffs(unsigned long word)
  561. {
  562. if (!word)
  563. return 0;
  564. return __ffs(word) + 1;
  565. }
  566. /*
  567. * ffz - find first zero in word.
  568. * @word: The word to search
  569. *
  570. * Undefined if no zero exists, so code should check against ~0UL first.
  571. */
  572. static inline unsigned long ffz(unsigned long word)
  573. {
  574. return __ffs (~word);
  575. }
  576. /*
  577. * flz - find last zero in word.
  578. * @word: The word to search
  579. *
  580. * Returns 0..SZLONG-1
  581. * Undefined if no zero exists, so code should check against ~0UL first.
  582. */
  583. static inline unsigned long flz(unsigned long word)
  584. {
  585. #if defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64)
  586. return __ilog2(~word);
  587. #else
  588. #ifdef CONFIG_32BIT
  589. int r = 31, s;
  590. word = ~word;
  591. s = 16; if ((word & 0xffff0000)) s = 0; r -= s; word <<= s;
  592. s = 8; if ((word & 0xff000000)) s = 0; r -= s; word <<= s;
  593. s = 4; if ((word & 0xf0000000)) s = 0; r -= s; word <<= s;
  594. s = 2; if ((word & 0xc0000000)) s = 0; r -= s; word <<= s;
  595. s = 1; if ((word & 0x80000000)) s = 0; r -= s;
  596. return r;
  597. #endif
  598. #ifdef CONFIG_64BIT
  599. int r = 63, s;
  600. word = ~word;
  601. s = 32; if ((word & 0xffffffff00000000UL)) s = 0; r -= s; word <<= s;
  602. s = 16; if ((word & 0xffff000000000000UL)) s = 0; r -= s; word <<= s;
  603. s = 8; if ((word & 0xff00000000000000UL)) s = 0; r -= s; word <<= s;
  604. s = 4; if ((word & 0xf000000000000000UL)) s = 0; r -= s; word <<= s;
  605. s = 2; if ((word & 0xc000000000000000UL)) s = 0; r -= s; word <<= s;
  606. s = 1; if ((word & 0x8000000000000000UL)) s = 0; r -= s;
  607. return r;
  608. #endif
  609. #endif
  610. }
  611. /*
  612. * fls - find last bit set.
  613. * @word: The word to search
  614. *
  615. * Returns 1..SZLONG
  616. * Returns 0 if no bit exists
  617. */
  618. static inline unsigned long fls(unsigned long word)
  619. {
  620. if (word == 0)
  621. return 0;
  622. return flz(~word) + 1;
  623. }
  624. #define fls64(x) generic_fls64(x)
  625. /*
  626. * find_next_zero_bit - find the first zero bit in a memory region
  627. * @addr: The address to base the search on
  628. * @offset: The bitnumber to start searching at
  629. * @size: The maximum size to search
  630. */
  631. static inline unsigned long find_next_zero_bit(const unsigned long *addr,
  632. unsigned long size, unsigned long offset)
  633. {
  634. const unsigned long *p = addr + (offset >> SZLONG_LOG);
  635. unsigned long result = offset & ~SZLONG_MASK;
  636. unsigned long tmp;
  637. if (offset >= size)
  638. return size;
  639. size -= result;
  640. offset &= SZLONG_MASK;
  641. if (offset) {
  642. tmp = *(p++);
  643. tmp |= ~0UL >> (_MIPS_SZLONG-offset);
  644. if (size < _MIPS_SZLONG)
  645. goto found_first;
  646. if (~tmp)
  647. goto found_middle;
  648. size -= _MIPS_SZLONG;
  649. result += _MIPS_SZLONG;
  650. }
  651. while (size & ~SZLONG_MASK) {
  652. if (~(tmp = *(p++)))
  653. goto found_middle;
  654. result += _MIPS_SZLONG;
  655. size -= _MIPS_SZLONG;
  656. }
  657. if (!size)
  658. return result;
  659. tmp = *p;
  660. found_first:
  661. tmp |= ~0UL << size;
  662. if (tmp == ~0UL) /* Are any bits zero? */
  663. return result + size; /* Nope. */
  664. found_middle:
  665. return result + ffz(tmp);
  666. }
  667. #define find_first_zero_bit(addr, size) \
  668. find_next_zero_bit((addr), (size), 0)
  669. /*
  670. * find_next_bit - find the next set bit in a memory region
  671. * @addr: The address to base the search on
  672. * @offset: The bitnumber to start searching at
  673. * @size: The maximum size to search
  674. */
  675. static inline unsigned long find_next_bit(const unsigned long *addr,
  676. unsigned long size, unsigned long offset)
  677. {
  678. const unsigned long *p = addr + (offset >> SZLONG_LOG);
  679. unsigned long result = offset & ~SZLONG_MASK;
  680. unsigned long tmp;
  681. if (offset >= size)
  682. return size;
  683. size -= result;
  684. offset &= SZLONG_MASK;
  685. if (offset) {
  686. tmp = *(p++);
  687. tmp &= ~0UL << offset;
  688. if (size < _MIPS_SZLONG)
  689. goto found_first;
  690. if (tmp)
  691. goto found_middle;
  692. size -= _MIPS_SZLONG;
  693. result += _MIPS_SZLONG;
  694. }
  695. while (size & ~SZLONG_MASK) {
  696. if ((tmp = *(p++)))
  697. goto found_middle;
  698. result += _MIPS_SZLONG;
  699. size -= _MIPS_SZLONG;
  700. }
  701. if (!size)
  702. return result;
  703. tmp = *p;
  704. found_first:
  705. tmp &= ~0UL >> (_MIPS_SZLONG - size);
  706. if (tmp == 0UL) /* Are any bits set? */
  707. return result + size; /* Nope. */
  708. found_middle:
  709. return result + __ffs(tmp);
  710. }
  711. /*
  712. * find_first_bit - find the first set bit in a memory region
  713. * @addr: The address to start the search at
  714. * @size: The maximum size to search
  715. *
  716. * Returns the bit-number of the first set bit, not the number of the byte
  717. * containing a bit.
  718. */
  719. #define find_first_bit(addr, size) \
  720. find_next_bit((addr), (size), 0)
  721. #ifdef __KERNEL__
  722. /*
  723. * Every architecture must define this function. It's the fastest
  724. * way of searching a 140-bit bitmap where the first 100 bits are
  725. * unlikely to be set. It's guaranteed that at least one of the 140
  726. * bits is cleared.
  727. */
  728. static inline int sched_find_first_bit(const unsigned long *b)
  729. {
  730. #ifdef CONFIG_32BIT
  731. if (unlikely(b[0]))
  732. return __ffs(b[0]);
  733. if (unlikely(b[1]))
  734. return __ffs(b[1]) + 32;
  735. if (unlikely(b[2]))
  736. return __ffs(b[2]) + 64;
  737. if (b[3])
  738. return __ffs(b[3]) + 96;
  739. return __ffs(b[4]) + 128;
  740. #endif
  741. #ifdef CONFIG_64BIT
  742. if (unlikely(b[0]))
  743. return __ffs(b[0]);
  744. if (unlikely(b[1]))
  745. return __ffs(b[1]) + 64;
  746. return __ffs(b[2]) + 128;
  747. #endif
  748. }
  749. /*
  750. * hweightN - returns the hamming weight of a N-bit word
  751. * @x: the word to weigh
  752. *
  753. * The Hamming Weight of a number is the total number of bits set in it.
  754. */
  755. #define hweight64(x) generic_hweight64(x)
  756. #define hweight32(x) generic_hweight32(x)
  757. #define hweight16(x) generic_hweight16(x)
  758. #define hweight8(x) generic_hweight8(x)
  759. static inline int __test_and_set_le_bit(unsigned long nr, unsigned long *addr)
  760. {
  761. unsigned char *ADDR = (unsigned char *) addr;
  762. int mask, retval;
  763. ADDR += nr >> 3;
  764. mask = 1 << (nr & 0x07);
  765. retval = (mask & *ADDR) != 0;
  766. *ADDR |= mask;
  767. return retval;
  768. }
  769. static inline int __test_and_clear_le_bit(unsigned long nr, unsigned long *addr)
  770. {
  771. unsigned char *ADDR = (unsigned char *) addr;
  772. int mask, retval;
  773. ADDR += nr >> 3;
  774. mask = 1 << (nr & 0x07);
  775. retval = (mask & *ADDR) != 0;
  776. *ADDR &= ~mask;
  777. return retval;
  778. }
  779. static inline int test_le_bit(unsigned long nr, const unsigned long * addr)
  780. {
  781. const unsigned char *ADDR = (const unsigned char *) addr;
  782. int mask;
  783. ADDR += nr >> 3;
  784. mask = 1 << (nr & 0x07);
  785. return ((mask & *ADDR) != 0);
  786. }
  787. static inline unsigned long find_next_zero_le_bit(unsigned long *addr,
  788. unsigned long size, unsigned long offset)
  789. {
  790. unsigned long *p = ((unsigned long *) addr) + (offset >> SZLONG_LOG);
  791. unsigned long result = offset & ~SZLONG_MASK;
  792. unsigned long tmp;
  793. if (offset >= size)
  794. return size;
  795. size -= result;
  796. offset &= SZLONG_MASK;
  797. if (offset) {
  798. tmp = cpu_to_lelongp(p++);
  799. tmp |= ~0UL >> (_MIPS_SZLONG-offset); /* bug or feature ? */
  800. if (size < _MIPS_SZLONG)
  801. goto found_first;
  802. if (~tmp)
  803. goto found_middle;
  804. size -= _MIPS_SZLONG;
  805. result += _MIPS_SZLONG;
  806. }
  807. while (size & ~SZLONG_MASK) {
  808. if (~(tmp = cpu_to_lelongp(p++)))
  809. goto found_middle;
  810. result += _MIPS_SZLONG;
  811. size -= _MIPS_SZLONG;
  812. }
  813. if (!size)
  814. return result;
  815. tmp = cpu_to_lelongp(p);
  816. found_first:
  817. tmp |= ~0UL << size;
  818. if (tmp == ~0UL) /* Are any bits zero? */
  819. return result + size; /* Nope. */
  820. found_middle:
  821. return result + ffz(tmp);
  822. }
  823. #define find_first_zero_le_bit(addr, size) \
  824. find_next_zero_le_bit((addr), (size), 0)
  825. #define ext2_set_bit(nr,addr) \
  826. __test_and_set_le_bit((nr),(unsigned long*)addr)
  827. #define ext2_clear_bit(nr, addr) \
  828. __test_and_clear_le_bit((nr),(unsigned long*)addr)
  829. #define ext2_set_bit_atomic(lock, nr, addr) \
  830. ({ \
  831. int ret; \
  832. spin_lock(lock); \
  833. ret = ext2_set_bit((nr), (addr)); \
  834. spin_unlock(lock); \
  835. ret; \
  836. })
  837. #define ext2_clear_bit_atomic(lock, nr, addr) \
  838. ({ \
  839. int ret; \
  840. spin_lock(lock); \
  841. ret = ext2_clear_bit((nr), (addr)); \
  842. spin_unlock(lock); \
  843. ret; \
  844. })
  845. #define ext2_test_bit(nr, addr) test_le_bit((nr),(unsigned long*)addr)
  846. #define ext2_find_first_zero_bit(addr, size) \
  847. find_first_zero_le_bit((unsigned long*)addr, size)
  848. #define ext2_find_next_zero_bit(addr, size, off) \
  849. find_next_zero_le_bit((unsigned long*)addr, size, off)
  850. /*
  851. * Bitmap functions for the minix filesystem.
  852. *
  853. * FIXME: These assume that Minix uses the native byte/bitorder.
  854. * This limits the Minix filesystem's value for data exchange very much.
  855. */
  856. #define minix_test_and_set_bit(nr,addr) test_and_set_bit(nr,addr)
  857. #define minix_set_bit(nr,addr) set_bit(nr,addr)
  858. #define minix_test_and_clear_bit(nr,addr) test_and_clear_bit(nr,addr)
  859. #define minix_test_bit(nr,addr) test_bit(nr,addr)
  860. #define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size)
  861. #endif /* __KERNEL__ */
  862. #endif /* _ASM_BITOPS_H */