bitops.h 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * Copyright (c) 1994 - 1997, 1999, 2000 Ralf Baechle (ralf@gnu.org)
  7. * Copyright (c) 2000 Silicon Graphics, Inc.
  8. */
  9. #ifndef _ASM_BITOPS_H
  10. #define _ASM_BITOPS_H
  11. #include <linux/types.h>
  12. #include <asm/byteorder.h> /* sigh ... */
  13. #ifdef __KERNEL__
  14. #include <asm/sgidefs.h>
  15. #include <asm/system.h>
  16. #include <linux/config.h>
  17. /*
  18. * clear_bit() doesn't provide any barrier for the compiler.
  19. */
  20. #define smp_mb__before_clear_bit() barrier()
  21. #define smp_mb__after_clear_bit() barrier()
  22. /*
  23. * Only disable interrupt for kernel mode stuff to keep usermode stuff
  24. * that dares to use kernel include files alive.
  25. */
  26. #define __bi_flags unsigned long flags
  27. #define __bi_cli() __cli()
  28. #define __bi_save_flags(x) __save_flags(x)
  29. #define __bi_save_and_cli(x) __save_and_cli(x)
  30. #define __bi_restore_flags(x) __restore_flags(x)
  31. #else
  32. #define __bi_flags
  33. #define __bi_cli()
  34. #define __bi_save_flags(x)
  35. #define __bi_save_and_cli(x)
  36. #define __bi_restore_flags(x)
  37. #endif /* __KERNEL__ */
  38. #ifdef CONFIG_CPU_HAS_LLSC
  39. #include <asm/mipsregs.h>
  40. /*
  41. * These functions for MIPS ISA > 1 are interrupt and SMP proof and
  42. * interrupt friendly
  43. */
  44. /*
  45. * set_bit - Atomically set a bit in memory
  46. * @nr: the bit to set
  47. * @addr: the address to start counting from
  48. *
  49. * This function is atomic and may not be reordered. See __set_bit()
  50. * if you do not require the atomic guarantees.
  51. * Note that @nr may be almost arbitrarily large; this function is not
  52. * restricted to acting on a single-word quantity.
  53. */
  54. static __inline__ void
  55. set_bit(int nr, volatile void *addr)
  56. {
  57. unsigned long *m = ((unsigned long *) addr) + (nr >> 5);
  58. unsigned long temp;
  59. __asm__ __volatile__(
  60. "1:\tll\t%0, %1\t\t# set_bit\n\t"
  61. "or\t%0, %2\n\t"
  62. "sc\t%0, %1\n\t"
  63. "beqz\t%0, 1b"
  64. : "=&r" (temp), "=m" (*m)
  65. : "ir" (1UL << (nr & 0x1f)), "m" (*m));
  66. }
  67. /*
  68. * __set_bit - Set a bit in memory
  69. * @nr: the bit to set
  70. * @addr: the address to start counting from
  71. *
  72. * Unlike set_bit(), this function is non-atomic and may be reordered.
  73. * If it's called on the same region of memory simultaneously, the effect
  74. * may be that only one operation succeeds.
  75. */
  76. static __inline__ void __set_bit(int nr, volatile void * addr)
  77. {
  78. unsigned long * m = ((unsigned long *) addr) + (nr >> 5);
  79. *m |= 1UL << (nr & 31);
  80. }
  81. #define PLATFORM__SET_BIT
  82. /*
  83. * clear_bit - Clears a bit in memory
  84. * @nr: Bit to clear
  85. * @addr: Address to start counting from
  86. *
  87. * clear_bit() is atomic and may not be reordered. However, it does
  88. * not contain a memory barrier, so if it is used for locking purposes,
  89. * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
  90. * in order to ensure changes are visible on other processors.
  91. */
  92. static __inline__ void
  93. clear_bit(int nr, volatile void *addr)
  94. {
  95. unsigned long *m = ((unsigned long *) addr) + (nr >> 5);
  96. unsigned long temp;
  97. __asm__ __volatile__(
  98. "1:\tll\t%0, %1\t\t# clear_bit\n\t"
  99. "and\t%0, %2\n\t"
  100. "sc\t%0, %1\n\t"
  101. "beqz\t%0, 1b\n\t"
  102. : "=&r" (temp), "=m" (*m)
  103. : "ir" (~(1UL << (nr & 0x1f))), "m" (*m));
  104. }
  105. /*
  106. * change_bit - Toggle a bit in memory
  107. * @nr: Bit to clear
  108. * @addr: Address to start counting from
  109. *
  110. * change_bit() is atomic and may not be reordered.
  111. * Note that @nr may be almost arbitrarily large; this function is not
  112. * restricted to acting on a single-word quantity.
  113. */
  114. static __inline__ void
  115. change_bit(int nr, volatile void *addr)
  116. {
  117. unsigned long *m = ((unsigned long *) addr) + (nr >> 5);
  118. unsigned long temp;
  119. __asm__ __volatile__(
  120. "1:\tll\t%0, %1\t\t# change_bit\n\t"
  121. "xor\t%0, %2\n\t"
  122. "sc\t%0, %1\n\t"
  123. "beqz\t%0, 1b"
  124. : "=&r" (temp), "=m" (*m)
  125. : "ir" (1UL << (nr & 0x1f)), "m" (*m));
  126. }
  127. /*
  128. * __change_bit - Toggle a bit in memory
  129. * @nr: the bit to set
  130. * @addr: the address to start counting from
  131. *
  132. * Unlike change_bit(), this function is non-atomic and may be reordered.
  133. * If it's called on the same region of memory simultaneously, the effect
  134. * may be that only one operation succeeds.
  135. */
  136. static __inline__ void __change_bit(int nr, volatile void * addr)
  137. {
  138. unsigned long * m = ((unsigned long *) addr) + (nr >> 5);
  139. *m ^= 1UL << (nr & 31);
  140. }
  141. /*
  142. * test_and_set_bit - Set a bit and return its old value
  143. * @nr: Bit to set
  144. * @addr: Address to count from
  145. *
  146. * This operation is atomic and cannot be reordered.
  147. * It also implies a memory barrier.
  148. */
  149. static __inline__ int
  150. test_and_set_bit(int nr, volatile void *addr)
  151. {
  152. unsigned long *m = ((unsigned long *) addr) + (nr >> 5);
  153. unsigned long temp, res;
  154. __asm__ __volatile__(
  155. ".set\tnoreorder\t\t# test_and_set_bit\n"
  156. "1:\tll\t%0, %1\n\t"
  157. "or\t%2, %0, %3\n\t"
  158. "sc\t%2, %1\n\t"
  159. "beqz\t%2, 1b\n\t"
  160. " and\t%2, %0, %3\n\t"
  161. ".set\treorder"
  162. : "=&r" (temp), "=m" (*m), "=&r" (res)
  163. : "r" (1UL << (nr & 0x1f)), "m" (*m)
  164. : "memory");
  165. return res != 0;
  166. }
  167. /*
  168. * __test_and_set_bit - Set a bit and return its old value
  169. * @nr: Bit to set
  170. * @addr: Address to count from
  171. *
  172. * This operation is non-atomic and can be reordered.
  173. * If two examples of this operation race, one can appear to succeed
  174. * but actually fail. You must protect multiple accesses with a lock.
  175. */
  176. static __inline__ int __test_and_set_bit(int nr, volatile void * addr)
  177. {
  178. int mask, retval;
  179. volatile int *a = addr;
  180. a += nr >> 5;
  181. mask = 1 << (nr & 0x1f);
  182. retval = (mask & *a) != 0;
  183. *a |= mask;
  184. return retval;
  185. }
  186. /*
  187. * test_and_clear_bit - Clear a bit and return its old value
  188. * @nr: Bit to set
  189. * @addr: Address to count from
  190. *
  191. * This operation is atomic and cannot be reordered.
  192. * It also implies a memory barrier.
  193. */
  194. static __inline__ int
  195. test_and_clear_bit(int nr, volatile void *addr)
  196. {
  197. unsigned long *m = ((unsigned long *) addr) + (nr >> 5);
  198. unsigned long temp, res;
  199. __asm__ __volatile__(
  200. ".set\tnoreorder\t\t# test_and_clear_bit\n"
  201. "1:\tll\t%0, %1\n\t"
  202. "or\t%2, %0, %3\n\t"
  203. "xor\t%2, %3\n\t"
  204. "sc\t%2, %1\n\t"
  205. "beqz\t%2, 1b\n\t"
  206. " and\t%2, %0, %3\n\t"
  207. ".set\treorder"
  208. : "=&r" (temp), "=m" (*m), "=&r" (res)
  209. : "r" (1UL << (nr & 0x1f)), "m" (*m)
  210. : "memory");
  211. return res != 0;
  212. }
  213. /*
  214. * __test_and_clear_bit - Clear a bit and return its old value
  215. * @nr: Bit to set
  216. * @addr: Address to count from
  217. *
  218. * This operation is non-atomic and can be reordered.
  219. * If two examples of this operation race, one can appear to succeed
  220. * but actually fail. You must protect multiple accesses with a lock.
  221. */
  222. static __inline__ int __test_and_clear_bit(int nr, volatile void * addr)
  223. {
  224. int mask, retval;
  225. volatile int *a = addr;
  226. a += nr >> 5;
  227. mask = 1 << (nr & 0x1f);
  228. retval = (mask & *a) != 0;
  229. *a &= ~mask;
  230. return retval;
  231. }
  232. /*
  233. * test_and_change_bit - Change a bit and return its new value
  234. * @nr: Bit to set
  235. * @addr: Address to count from
  236. *
  237. * This operation is atomic and cannot be reordered.
  238. * It also implies a memory barrier.
  239. */
  240. static __inline__ int
  241. test_and_change_bit(int nr, volatile void *addr)
  242. {
  243. unsigned long *m = ((unsigned long *) addr) + (nr >> 5);
  244. unsigned long temp, res;
  245. __asm__ __volatile__(
  246. ".set\tnoreorder\t\t# test_and_change_bit\n"
  247. "1:\tll\t%0, %1\n\t"
  248. "xor\t%2, %0, %3\n\t"
  249. "sc\t%2, %1\n\t"
  250. "beqz\t%2, 1b\n\t"
  251. " and\t%2, %0, %3\n\t"
  252. ".set\treorder"
  253. : "=&r" (temp), "=m" (*m), "=&r" (res)
  254. : "r" (1UL << (nr & 0x1f)), "m" (*m)
  255. : "memory");
  256. return res != 0;
  257. }
  258. /*
  259. * __test_and_change_bit - Change a bit and return its old value
  260. * @nr: Bit to set
  261. * @addr: Address to count from
  262. *
  263. * This operation is non-atomic and can be reordered.
  264. * If two examples of this operation race, one can appear to succeed
  265. * but actually fail. You must protect multiple accesses with a lock.
  266. */
  267. static __inline__ int __test_and_change_bit(int nr, volatile void * addr)
  268. {
  269. int mask, retval;
  270. volatile int *a = addr;
  271. a += nr >> 5;
  272. mask = 1 << (nr & 0x1f);
  273. retval = (mask & *a) != 0;
  274. *a ^= mask;
  275. return retval;
  276. }
  277. #else /* MIPS I */
  278. /*
  279. * set_bit - Atomically set a bit in memory
  280. * @nr: the bit to set
  281. * @addr: the address to start counting from
  282. *
  283. * This function is atomic and may not be reordered. See __set_bit()
  284. * if you do not require the atomic guarantees.
  285. * Note that @nr may be almost arbitrarily large; this function is not
  286. * restricted to acting on a single-word quantity.
  287. */
  288. static __inline__ void set_bit(int nr, volatile void * addr)
  289. {
  290. int mask;
  291. volatile int *a = addr;
  292. __bi_flags;
  293. a += nr >> 5;
  294. mask = 1 << (nr & 0x1f);
  295. __bi_save_and_cli(flags);
  296. *a |= mask;
  297. __bi_restore_flags(flags);
  298. }
  299. /*
  300. * __set_bit - Set a bit in memory
  301. * @nr: the bit to set
  302. * @addr: the address to start counting from
  303. *
  304. * Unlike set_bit(), this function is non-atomic and may be reordered.
  305. * If it's called on the same region of memory simultaneously, the effect
  306. * may be that only one operation succeeds.
  307. */
  308. static __inline__ void __set_bit(int nr, volatile void * addr)
  309. {
  310. int mask;
  311. volatile int *a = addr;
  312. a += nr >> 5;
  313. mask = 1 << (nr & 0x1f);
  314. *a |= mask;
  315. }
  316. /*
  317. * clear_bit - Clears a bit in memory
  318. * @nr: Bit to clear
  319. * @addr: Address to start counting from
  320. *
  321. * clear_bit() is atomic and may not be reordered. However, it does
  322. * not contain a memory barrier, so if it is used for locking purposes,
  323. * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
  324. * in order to ensure changes are visible on other processors.
  325. */
  326. static __inline__ void clear_bit(int nr, volatile void * addr)
  327. {
  328. int mask;
  329. volatile int *a = addr;
  330. __bi_flags;
  331. a += nr >> 5;
  332. mask = 1 << (nr & 0x1f);
  333. __bi_save_and_cli(flags);
  334. *a &= ~mask;
  335. __bi_restore_flags(flags);
  336. }
  337. /*
  338. * change_bit - Toggle a bit in memory
  339. * @nr: Bit to clear
  340. * @addr: Address to start counting from
  341. *
  342. * change_bit() is atomic and may not be reordered.
  343. * Note that @nr may be almost arbitrarily large; this function is not
  344. * restricted to acting on a single-word quantity.
  345. */
  346. static __inline__ void change_bit(int nr, volatile void * addr)
  347. {
  348. int mask;
  349. volatile int *a = addr;
  350. __bi_flags;
  351. a += nr >> 5;
  352. mask = 1 << (nr & 0x1f);
  353. __bi_save_and_cli(flags);
  354. *a ^= mask;
  355. __bi_restore_flags(flags);
  356. }
  357. /*
  358. * __change_bit - Toggle a bit in memory
  359. * @nr: the bit to set
  360. * @addr: the address to start counting from
  361. *
  362. * Unlike change_bit(), this function is non-atomic and may be reordered.
  363. * If it's called on the same region of memory simultaneously, the effect
  364. * may be that only one operation succeeds.
  365. */
  366. static __inline__ void __change_bit(int nr, volatile void * addr)
  367. {
  368. unsigned long * m = ((unsigned long *) addr) + (nr >> 5);
  369. *m ^= 1UL << (nr & 31);
  370. }
  371. /*
  372. * test_and_set_bit - Set a bit and return its old value
  373. * @nr: Bit to set
  374. * @addr: Address to count from
  375. *
  376. * This operation is atomic and cannot be reordered.
  377. * It also implies a memory barrier.
  378. */
  379. static __inline__ int test_and_set_bit(int nr, volatile void * addr)
  380. {
  381. int mask, retval;
  382. volatile int *a = addr;
  383. __bi_flags;
  384. a += nr >> 5;
  385. mask = 1 << (nr & 0x1f);
  386. __bi_save_and_cli(flags);
  387. retval = (mask & *a) != 0;
  388. *a |= mask;
  389. __bi_restore_flags(flags);
  390. return retval;
  391. }
  392. /*
  393. * __test_and_set_bit - Set a bit and return its old value
  394. * @nr: Bit to set
  395. * @addr: Address to count from
  396. *
  397. * This operation is non-atomic and can be reordered.
  398. * If two examples of this operation race, one can appear to succeed
  399. * but actually fail. You must protect multiple accesses with a lock.
  400. */
  401. static __inline__ int __test_and_set_bit(int nr, volatile void * addr)
  402. {
  403. int mask, retval;
  404. volatile int *a = addr;
  405. a += nr >> 5;
  406. mask = 1 << (nr & 0x1f);
  407. retval = (mask & *a) != 0;
  408. *a |= mask;
  409. return retval;
  410. }
  411. /*
  412. * test_and_clear_bit - Clear a bit and return its old value
  413. * @nr: Bit to set
  414. * @addr: Address to count from
  415. *
  416. * This operation is atomic and cannot be reordered.
  417. * It also implies a memory barrier.
  418. */
  419. static __inline__ int test_and_clear_bit(int nr, volatile void * addr)
  420. {
  421. int mask, retval;
  422. volatile int *a = addr;
  423. __bi_flags;
  424. a += nr >> 5;
  425. mask = 1 << (nr & 0x1f);
  426. __bi_save_and_cli(flags);
  427. retval = (mask & *a) != 0;
  428. *a &= ~mask;
  429. __bi_restore_flags(flags);
  430. return retval;
  431. }
  432. /*
  433. * __test_and_clear_bit - Clear a bit and return its old value
  434. * @nr: Bit to set
  435. * @addr: Address to count from
  436. *
  437. * This operation is non-atomic and can be reordered.
  438. * If two examples of this operation race, one can appear to succeed
  439. * but actually fail. You must protect multiple accesses with a lock.
  440. */
  441. static __inline__ int __test_and_clear_bit(int nr, volatile void * addr)
  442. {
  443. int mask, retval;
  444. volatile int *a = addr;
  445. a += nr >> 5;
  446. mask = 1 << (nr & 0x1f);
  447. retval = (mask & *a) != 0;
  448. *a &= ~mask;
  449. return retval;
  450. }
  451. /*
  452. * test_and_change_bit - Change a bit and return its new value
  453. * @nr: Bit to set
  454. * @addr: Address to count from
  455. *
  456. * This operation is atomic and cannot be reordered.
  457. * It also implies a memory barrier.
  458. */
  459. static __inline__ int test_and_change_bit(int nr, volatile void * addr)
  460. {
  461. int mask, retval;
  462. volatile int *a = addr;
  463. __bi_flags;
  464. a += nr >> 5;
  465. mask = 1 << (nr & 0x1f);
  466. __bi_save_and_cli(flags);
  467. retval = (mask & *a) != 0;
  468. *a ^= mask;
  469. __bi_restore_flags(flags);
  470. return retval;
  471. }
  472. /*
  473. * __test_and_change_bit - Change a bit and return its old value
  474. * @nr: Bit to set
  475. * @addr: Address to count from
  476. *
  477. * This operation is non-atomic and can be reordered.
  478. * If two examples of this operation race, one can appear to succeed
  479. * but actually fail. You must protect multiple accesses with a lock.
  480. */
  481. static __inline__ int __test_and_change_bit(int nr, volatile void * addr)
  482. {
  483. int mask, retval;
  484. volatile int *a = addr;
  485. a += nr >> 5;
  486. mask = 1 << (nr & 0x1f);
  487. retval = (mask & *a) != 0;
  488. *a ^= mask;
  489. return retval;
  490. }
  491. #undef __bi_flags
  492. #undef __bi_cli
  493. #undef __bi_save_flags
  494. #undef __bi_restore_flags
  495. #endif /* MIPS I */
  496. /*
  497. * test_bit - Determine whether a bit is set
  498. * @nr: bit number to test
  499. * @addr: Address to start counting from
  500. */
  501. static __inline__ int test_bit(int nr, const volatile void *addr)
  502. {
  503. return ((1UL << (nr & 31)) & (((const unsigned int *) addr)[nr >> 5])) != 0;
  504. }
  505. #ifndef __MIPSEB__
  506. /* Little endian versions. */
  507. /*
  508. * find_first_zero_bit - find the first zero bit in a memory region
  509. * @addr: The address to start the search at
  510. * @size: The maximum size to search
  511. *
  512. * Returns the bit-number of the first zero bit, not the number of the byte
  513. * containing a bit.
  514. */
  515. static __inline__ int find_first_zero_bit (void *addr, unsigned size)
  516. {
  517. unsigned long dummy;
  518. int res;
  519. if (!size)
  520. return 0;
  521. __asm__ (".set\tnoreorder\n\t"
  522. ".set\tnoat\n"
  523. "1:\tsubu\t$1,%6,%0\n\t"
  524. "blez\t$1,2f\n\t"
  525. "lw\t$1,(%5)\n\t"
  526. "addiu\t%5,4\n\t"
  527. #if (_MIPS_ISA == _MIPS_ISA_MIPS2 ) || (_MIPS_ISA == _MIPS_ISA_MIPS3 ) || \
  528. (_MIPS_ISA == _MIPS_ISA_MIPS4 ) || (_MIPS_ISA == _MIPS_ISA_MIPS5 ) || \
  529. (_MIPS_ISA == _MIPS_ISA_MIPS32) || (_MIPS_ISA == _MIPS_ISA_MIPS64)
  530. "beql\t%1,$1,1b\n\t"
  531. "addiu\t%0,32\n\t"
  532. #else
  533. "addiu\t%0,32\n\t"
  534. "beq\t%1,$1,1b\n\t"
  535. "nop\n\t"
  536. "subu\t%0,32\n\t"
  537. #endif
  538. #ifdef __MIPSEB__
  539. #error "Fix this for big endian"
  540. #endif /* __MIPSEB__ */
  541. "li\t%1,1\n"
  542. "1:\tand\t%2,$1,%1\n\t"
  543. "beqz\t%2,2f\n\t"
  544. "sll\t%1,%1,1\n\t"
  545. "bnez\t%1,1b\n\t"
  546. "add\t%0,%0,1\n\t"
  547. ".set\tat\n\t"
  548. ".set\treorder\n"
  549. "2:"
  550. : "=r" (res), "=r" (dummy), "=r" (addr)
  551. : "0" ((signed int) 0), "1" ((unsigned int) 0xffffffff),
  552. "2" (addr), "r" (size)
  553. : "$1");
  554. return res;
  555. }
  556. /*
  557. * find_next_zero_bit - find the first zero bit in a memory region
  558. * @addr: The address to base the search on
  559. * @offset: The bitnumber to start searching at
  560. * @size: The maximum size to search
  561. */
  562. static __inline__ int find_next_zero_bit (void * addr, int size, int offset)
  563. {
  564. unsigned int *p = ((unsigned int *) addr) + (offset >> 5);
  565. int set = 0, bit = offset & 31, res;
  566. unsigned long dummy;
  567. if (bit) {
  568. /*
  569. * Look for zero in first byte
  570. */
  571. #ifdef __MIPSEB__
  572. #error "Fix this for big endian byte order"
  573. #endif
  574. __asm__(".set\tnoreorder\n\t"
  575. ".set\tnoat\n"
  576. "1:\tand\t$1,%4,%1\n\t"
  577. "beqz\t$1,1f\n\t"
  578. "sll\t%1,%1,1\n\t"
  579. "bnez\t%1,1b\n\t"
  580. "addiu\t%0,1\n\t"
  581. ".set\tat\n\t"
  582. ".set\treorder\n"
  583. "1:"
  584. : "=r" (set), "=r" (dummy)
  585. : "0" (0), "1" (1 << bit), "r" (*p)
  586. : "$1");
  587. if (set < (32 - bit))
  588. return set + offset;
  589. set = 32 - bit;
  590. p++;
  591. }
  592. /*
  593. * No zero yet, search remaining full bytes for a zero
  594. */
  595. res = find_first_zero_bit(p, size - 32 * (p - (unsigned int *) addr));
  596. return offset + set + res;
  597. }
  598. #endif /* !(__MIPSEB__) */
  599. /*
  600. * ffz - find first zero in word.
  601. * @word: The word to search
  602. *
  603. * Undefined if no zero exists, so code should check against ~0UL first.
  604. */
  605. static __inline__ unsigned long ffz(unsigned long word)
  606. {
  607. unsigned int __res;
  608. unsigned int mask = 1;
  609. __asm__ (
  610. ".set\tnoreorder\n\t"
  611. ".set\tnoat\n\t"
  612. "move\t%0,$0\n"
  613. "1:\tand\t$1,%2,%1\n\t"
  614. "beqz\t$1,2f\n\t"
  615. "sll\t%1,1\n\t"
  616. "bnez\t%1,1b\n\t"
  617. "addiu\t%0,1\n\t"
  618. ".set\tat\n\t"
  619. ".set\treorder\n"
  620. "2:\n\t"
  621. : "=&r" (__res), "=r" (mask)
  622. : "r" (word), "1" (mask)
  623. : "$1");
  624. return __res;
  625. }
  626. #ifdef __KERNEL__
  627. /*
  628. * hweightN - returns the hamming weight of a N-bit word
  629. * @x: the word to weigh
  630. *
  631. * The Hamming Weight of a number is the total number of bits set in it.
  632. */
  633. #define hweight32(x) generic_hweight32(x)
  634. #define hweight16(x) generic_hweight16(x)
  635. #define hweight8(x) generic_hweight8(x)
  636. #endif /* __KERNEL__ */
  637. #ifdef __MIPSEB__
  638. /*
  639. * find_next_zero_bit - find the first zero bit in a memory region
  640. * @addr: The address to base the search on
  641. * @offset: The bitnumber to start searching at
  642. * @size: The maximum size to search
  643. */
  644. static __inline__ int find_next_zero_bit(void *addr, int size, int offset)
  645. {
  646. unsigned long *p = ((unsigned long *) addr) + (offset >> 5);
  647. unsigned long result = offset & ~31UL;
  648. unsigned long tmp;
  649. if (offset >= size)
  650. return size;
  651. size -= result;
  652. offset &= 31UL;
  653. if (offset) {
  654. tmp = *(p++);
  655. tmp |= ~0UL >> (32-offset);
  656. if (size < 32)
  657. goto found_first;
  658. if (~tmp)
  659. goto found_middle;
  660. size -= 32;
  661. result += 32;
  662. }
  663. while (size & ~31UL) {
  664. if (~(tmp = *(p++)))
  665. goto found_middle;
  666. result += 32;
  667. size -= 32;
  668. }
  669. if (!size)
  670. return result;
  671. tmp = *p;
  672. found_first:
  673. tmp |= ~0UL << size;
  674. found_middle:
  675. return result + ffz(tmp);
  676. }
  677. /* Linus sez that gcc can optimize the following correctly, we'll see if this
  678. * holds on the Sparc as it does for the ALPHA.
  679. */
  680. #if 0 /* Fool kernel-doc since it doesn't do macros yet */
  681. /*
  682. * find_first_zero_bit - find the first zero bit in a memory region
  683. * @addr: The address to start the search at
  684. * @size: The maximum size to search
  685. *
  686. * Returns the bit-number of the first zero bit, not the number of the byte
  687. * containing a bit.
  688. */
  689. static int find_first_zero_bit (void *addr, unsigned size);
  690. #endif
  691. #define find_first_zero_bit(addr, size) \
  692. find_next_zero_bit((addr), (size), 0)
  693. #endif /* (__MIPSEB__) */
  694. /* Now for the ext2 filesystem bit operations and helper routines. */
  695. #ifdef __MIPSEB__
  696. static __inline__ int ext2_set_bit(int nr, void * addr)
  697. {
  698. int mask, retval, flags;
  699. unsigned char *ADDR = (unsigned char *) addr;
  700. ADDR += nr >> 3;
  701. mask = 1 << (nr & 0x07);
  702. save_and_cli(flags);
  703. retval = (mask & *ADDR) != 0;
  704. *ADDR |= mask;
  705. restore_flags(flags);
  706. return retval;
  707. }
  708. static __inline__ int ext2_clear_bit(int nr, void * addr)
  709. {
  710. int mask, retval, flags;
  711. unsigned char *ADDR = (unsigned char *) addr;
  712. ADDR += nr >> 3;
  713. mask = 1 << (nr & 0x07);
  714. save_and_cli(flags);
  715. retval = (mask & *ADDR) != 0;
  716. *ADDR &= ~mask;
  717. restore_flags(flags);
  718. return retval;
  719. }
  720. static __inline__ int ext2_test_bit(int nr, const void * addr)
  721. {
  722. int mask;
  723. const unsigned char *ADDR = (const unsigned char *) addr;
  724. ADDR += nr >> 3;
  725. mask = 1 << (nr & 0x07);
  726. return ((mask & *ADDR) != 0);
  727. }
  728. #define ext2_find_first_zero_bit(addr, size) \
  729. ext2_find_next_zero_bit((addr), (size), 0)
  730. static __inline__ unsigned long ext2_find_next_zero_bit(void *addr, unsigned long size, unsigned long offset)
  731. {
  732. unsigned long *p = ((unsigned long *) addr) + (offset >> 5);
  733. unsigned long result = offset & ~31UL;
  734. unsigned long tmp;
  735. if (offset >= size)
  736. return size;
  737. size -= result;
  738. offset &= 31UL;
  739. if(offset) {
  740. /* We hold the little endian value in tmp, but then the
  741. * shift is illegal. So we could keep a big endian value
  742. * in tmp, like this:
  743. *
  744. * tmp = __swab32(*(p++));
  745. * tmp |= ~0UL >> (32-offset);
  746. *
  747. * but this would decrease preformance, so we change the
  748. * shift:
  749. */
  750. tmp = *(p++);
  751. tmp |= __swab32(~0UL >> (32-offset));
  752. if(size < 32)
  753. goto found_first;
  754. if(~tmp)
  755. goto found_middle;
  756. size -= 32;
  757. result += 32;
  758. }
  759. while(size & ~31UL) {
  760. if(~(tmp = *(p++)))
  761. goto found_middle;
  762. result += 32;
  763. size -= 32;
  764. }
  765. if(!size)
  766. return result;
  767. tmp = *p;
  768. found_first:
  769. /* tmp is little endian, so we would have to swab the shift,
  770. * see above. But then we have to swab tmp below for ffz, so
  771. * we might as well do this here.
  772. */
  773. return result + ffz(__swab32(tmp) | (~0UL << size));
  774. found_middle:
  775. return result + ffz(__swab32(tmp));
  776. }
  777. #else /* !(__MIPSEB__) */
  778. /* Native ext2 byte ordering, just collapse using defines. */
  779. #define ext2_set_bit(nr, addr) test_and_set_bit((nr), (addr))
  780. #define ext2_clear_bit(nr, addr) test_and_clear_bit((nr), (addr))
  781. #define ext2_test_bit(nr, addr) test_bit((nr), (addr))
  782. #define ext2_find_first_zero_bit(addr, size) find_first_zero_bit((addr), (size))
  783. #define ext2_find_next_zero_bit(addr, size, offset) \
  784. find_next_zero_bit((addr), (size), (offset))
  785. #endif /* !(__MIPSEB__) */
  786. /*
  787. * Bitmap functions for the minix filesystem.
  788. * FIXME: These assume that Minix uses the native byte/bitorder.
  789. * This limits the Minix filesystem's value for data exchange very much.
  790. */
  791. #define minix_test_and_set_bit(nr,addr) test_and_set_bit(nr,addr)
  792. #define minix_set_bit(nr,addr) set_bit(nr,addr)
  793. #define minix_test_and_clear_bit(nr,addr) test_and_clear_bit(nr,addr)
  794. #define minix_test_bit(nr,addr) test_bit(nr,addr)
  795. #define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size)
  796. #endif /* _ASM_BITOPS_H */