bitops.h 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * Copyright (c) 1994 - 1997, 1999, 2000 Ralf Baechle (ralf@gnu.org)
  7. * Copyright (c) 2000 Silicon Graphics, Inc.
  8. */
  9. #ifndef _ASM_BITOPS_H
  10. #define _ASM_BITOPS_H
  11. #include <linux/types.h>
  12. #include <asm/byteorder.h> /* sigh ... */
  13. #ifdef __KERNEL__
  14. #include <asm/sgidefs.h>
  15. #include <asm/system.h>
  16. #include <linux/config.h>
  17. /*
  18. * clear_bit() doesn't provide any barrier for the compiler.
  19. */
  20. #define smp_mb__before_clear_bit() barrier()
  21. #define smp_mb__after_clear_bit() barrier()
  22. /*
  23. * Only disable interrupt for kernel mode stuff to keep usermode stuff
  24. * that dares to use kernel include files alive.
  25. */
  26. #define __bi_flags unsigned long flags
  27. #define __bi_cli() __cli()
  28. #define __bi_save_flags(x) __save_flags(x)
  29. #define __bi_save_and_cli(x) __save_and_cli(x)
  30. #define __bi_restore_flags(x) __restore_flags(x)
  31. #else
  32. #define __bi_flags
  33. #define __bi_cli()
  34. #define __bi_save_flags(x)
  35. #define __bi_save_and_cli(x)
  36. #define __bi_restore_flags(x)
  37. #endif /* __KERNEL__ */
  38. #ifdef CONFIG_CPU_HAS_LLSC
  39. #include <asm/mipsregs.h>
  40. /*
  41. * These functions for MIPS ISA > 1 are interrupt and SMP proof and
  42. * interrupt friendly
  43. */
  44. /*
  45. * set_bit - Atomically set a bit in memory
  46. * @nr: the bit to set
  47. * @addr: the address to start counting from
  48. *
  49. * This function is atomic and may not be reordered. See __set_bit()
  50. * if you do not require the atomic guarantees.
  51. * Note that @nr may be almost arbitrarily large; this function is not
  52. * restricted to acting on a single-word quantity.
  53. */
  54. static __inline__ void
  55. set_bit(int nr, volatile void *addr)
  56. {
  57. unsigned long *m = ((unsigned long *) addr) + (nr >> 5);
  58. unsigned long temp;
  59. __asm__ __volatile__(
  60. "1:\tll\t%0, %1\t\t# set_bit\n\t"
  61. "or\t%0, %2\n\t"
  62. "sc\t%0, %1\n\t"
  63. "beqz\t%0, 1b"
  64. : "=&r" (temp), "=m" (*m)
  65. : "ir" (1UL << (nr & 0x1f)), "m" (*m));
  66. }
  67. /*
  68. * __set_bit - Set a bit in memory
  69. * @nr: the bit to set
  70. * @addr: the address to start counting from
  71. *
  72. * Unlike set_bit(), this function is non-atomic and may be reordered.
  73. * If it's called on the same region of memory simultaneously, the effect
  74. * may be that only one operation succeeds.
  75. */
  76. static __inline__ void __set_bit(int nr, volatile void * addr)
  77. {
  78. unsigned long * m = ((unsigned long *) addr) + (nr >> 5);
  79. *m |= 1UL << (nr & 31);
  80. }
  81. /*
  82. * clear_bit - Clears a bit in memory
  83. * @nr: Bit to clear
  84. * @addr: Address to start counting from
  85. *
  86. * clear_bit() is atomic and may not be reordered. However, it does
  87. * not contain a memory barrier, so if it is used for locking purposes,
  88. * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
  89. * in order to ensure changes are visible on other processors.
  90. */
  91. static __inline__ void
  92. clear_bit(int nr, volatile void *addr)
  93. {
  94. unsigned long *m = ((unsigned long *) addr) + (nr >> 5);
  95. unsigned long temp;
  96. __asm__ __volatile__(
  97. "1:\tll\t%0, %1\t\t# clear_bit\n\t"
  98. "and\t%0, %2\n\t"
  99. "sc\t%0, %1\n\t"
  100. "beqz\t%0, 1b\n\t"
  101. : "=&r" (temp), "=m" (*m)
  102. : "ir" (~(1UL << (nr & 0x1f))), "m" (*m));
  103. }
  104. /*
  105. * change_bit - Toggle a bit in memory
  106. * @nr: Bit to clear
  107. * @addr: Address to start counting from
  108. *
  109. * change_bit() is atomic and may not be reordered.
  110. * Note that @nr may be almost arbitrarily large; this function is not
  111. * restricted to acting on a single-word quantity.
  112. */
  113. static __inline__ void
  114. change_bit(int nr, volatile void *addr)
  115. {
  116. unsigned long *m = ((unsigned long *) addr) + (nr >> 5);
  117. unsigned long temp;
  118. __asm__ __volatile__(
  119. "1:\tll\t%0, %1\t\t# change_bit\n\t"
  120. "xor\t%0, %2\n\t"
  121. "sc\t%0, %1\n\t"
  122. "beqz\t%0, 1b"
  123. : "=&r" (temp), "=m" (*m)
  124. : "ir" (1UL << (nr & 0x1f)), "m" (*m));
  125. }
  126. /*
  127. * __change_bit - Toggle a bit in memory
  128. * @nr: the bit to set
  129. * @addr: the address to start counting from
  130. *
  131. * Unlike change_bit(), this function is non-atomic and may be reordered.
  132. * If it's called on the same region of memory simultaneously, the effect
  133. * may be that only one operation succeeds.
  134. */
  135. static __inline__ void __change_bit(int nr, volatile void * addr)
  136. {
  137. unsigned long * m = ((unsigned long *) addr) + (nr >> 5);
  138. *m ^= 1UL << (nr & 31);
  139. }
  140. /*
  141. * test_and_set_bit - Set a bit and return its old value
  142. * @nr: Bit to set
  143. * @addr: Address to count from
  144. *
  145. * This operation is atomic and cannot be reordered.
  146. * It also implies a memory barrier.
  147. */
  148. static __inline__ int
  149. test_and_set_bit(int nr, volatile void *addr)
  150. {
  151. unsigned long *m = ((unsigned long *) addr) + (nr >> 5);
  152. unsigned long temp, res;
  153. __asm__ __volatile__(
  154. ".set\tnoreorder\t\t# test_and_set_bit\n"
  155. "1:\tll\t%0, %1\n\t"
  156. "or\t%2, %0, %3\n\t"
  157. "sc\t%2, %1\n\t"
  158. "beqz\t%2, 1b\n\t"
  159. " and\t%2, %0, %3\n\t"
  160. ".set\treorder"
  161. : "=&r" (temp), "=m" (*m), "=&r" (res)
  162. : "r" (1UL << (nr & 0x1f)), "m" (*m)
  163. : "memory");
  164. return res != 0;
  165. }
  166. /*
  167. * __test_and_set_bit - Set a bit and return its old value
  168. * @nr: Bit to set
  169. * @addr: Address to count from
  170. *
  171. * This operation is non-atomic and can be reordered.
  172. * If two examples of this operation race, one can appear to succeed
  173. * but actually fail. You must protect multiple accesses with a lock.
  174. */
  175. static __inline__ int __test_and_set_bit(int nr, volatile void * addr)
  176. {
  177. int mask, retval;
  178. volatile int *a = addr;
  179. a += nr >> 5;
  180. mask = 1 << (nr & 0x1f);
  181. retval = (mask & *a) != 0;
  182. *a |= mask;
  183. return retval;
  184. }
  185. /*
  186. * test_and_clear_bit - Clear a bit and return its old value
  187. * @nr: Bit to set
  188. * @addr: Address to count from
  189. *
  190. * This operation is atomic and cannot be reordered.
  191. * It also implies a memory barrier.
  192. */
  193. static __inline__ int
  194. test_and_clear_bit(int nr, volatile void *addr)
  195. {
  196. unsigned long *m = ((unsigned long *) addr) + (nr >> 5);
  197. unsigned long temp, res;
  198. __asm__ __volatile__(
  199. ".set\tnoreorder\t\t# test_and_clear_bit\n"
  200. "1:\tll\t%0, %1\n\t"
  201. "or\t%2, %0, %3\n\t"
  202. "xor\t%2, %3\n\t"
  203. "sc\t%2, %1\n\t"
  204. "beqz\t%2, 1b\n\t"
  205. " and\t%2, %0, %3\n\t"
  206. ".set\treorder"
  207. : "=&r" (temp), "=m" (*m), "=&r" (res)
  208. : "r" (1UL << (nr & 0x1f)), "m" (*m)
  209. : "memory");
  210. return res != 0;
  211. }
  212. /*
  213. * __test_and_clear_bit - Clear a bit and return its old value
  214. * @nr: Bit to set
  215. * @addr: Address to count from
  216. *
  217. * This operation is non-atomic and can be reordered.
  218. * If two examples of this operation race, one can appear to succeed
  219. * but actually fail. You must protect multiple accesses with a lock.
  220. */
  221. static __inline__ int __test_and_clear_bit(int nr, volatile void * addr)
  222. {
  223. int mask, retval;
  224. volatile int *a = addr;
  225. a += nr >> 5;
  226. mask = 1 << (nr & 0x1f);
  227. retval = (mask & *a) != 0;
  228. *a &= ~mask;
  229. return retval;
  230. }
  231. /*
  232. * test_and_change_bit - Change a bit and return its new value
  233. * @nr: Bit to set
  234. * @addr: Address to count from
  235. *
  236. * This operation is atomic and cannot be reordered.
  237. * It also implies a memory barrier.
  238. */
  239. static __inline__ int
  240. test_and_change_bit(int nr, volatile void *addr)
  241. {
  242. unsigned long *m = ((unsigned long *) addr) + (nr >> 5);
  243. unsigned long temp, res;
  244. __asm__ __volatile__(
  245. ".set\tnoreorder\t\t# test_and_change_bit\n"
  246. "1:\tll\t%0, %1\n\t"
  247. "xor\t%2, %0, %3\n\t"
  248. "sc\t%2, %1\n\t"
  249. "beqz\t%2, 1b\n\t"
  250. " and\t%2, %0, %3\n\t"
  251. ".set\treorder"
  252. : "=&r" (temp), "=m" (*m), "=&r" (res)
  253. : "r" (1UL << (nr & 0x1f)), "m" (*m)
  254. : "memory");
  255. return res != 0;
  256. }
  257. /*
  258. * __test_and_change_bit - Change a bit and return its old value
  259. * @nr: Bit to set
  260. * @addr: Address to count from
  261. *
  262. * This operation is non-atomic and can be reordered.
  263. * If two examples of this operation race, one can appear to succeed
  264. * but actually fail. You must protect multiple accesses with a lock.
  265. */
  266. static __inline__ int __test_and_change_bit(int nr, volatile void * addr)
  267. {
  268. int mask, retval;
  269. volatile int *a = addr;
  270. a += nr >> 5;
  271. mask = 1 << (nr & 0x1f);
  272. retval = (mask & *a) != 0;
  273. *a ^= mask;
  274. return retval;
  275. }
  276. #else /* MIPS I */
  277. /*
  278. * set_bit - Atomically set a bit in memory
  279. * @nr: the bit to set
  280. * @addr: the address to start counting from
  281. *
  282. * This function is atomic and may not be reordered. See __set_bit()
  283. * if you do not require the atomic guarantees.
  284. * Note that @nr may be almost arbitrarily large; this function is not
  285. * restricted to acting on a single-word quantity.
  286. */
  287. static __inline__ void set_bit(int nr, volatile void * addr)
  288. {
  289. int mask;
  290. volatile int *a = addr;
  291. __bi_flags;
  292. a += nr >> 5;
  293. mask = 1 << (nr & 0x1f);
  294. __bi_save_and_cli(flags);
  295. *a |= mask;
  296. __bi_restore_flags(flags);
  297. }
  298. /*
  299. * __set_bit - Set a bit in memory
  300. * @nr: the bit to set
  301. * @addr: the address to start counting from
  302. *
  303. * Unlike set_bit(), this function is non-atomic and may be reordered.
  304. * If it's called on the same region of memory simultaneously, the effect
  305. * may be that only one operation succeeds.
  306. */
  307. static __inline__ void __set_bit(int nr, volatile void * addr)
  308. {
  309. int mask;
  310. volatile int *a = addr;
  311. a += nr >> 5;
  312. mask = 1 << (nr & 0x1f);
  313. *a |= mask;
  314. }
  315. /*
  316. * clear_bit - Clears a bit in memory
  317. * @nr: Bit to clear
  318. * @addr: Address to start counting from
  319. *
  320. * clear_bit() is atomic and may not be reordered. However, it does
  321. * not contain a memory barrier, so if it is used for locking purposes,
  322. * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
  323. * in order to ensure changes are visible on other processors.
  324. */
  325. static __inline__ void clear_bit(int nr, volatile void * addr)
  326. {
  327. int mask;
  328. volatile int *a = addr;
  329. __bi_flags;
  330. a += nr >> 5;
  331. mask = 1 << (nr & 0x1f);
  332. __bi_save_and_cli(flags);
  333. *a &= ~mask;
  334. __bi_restore_flags(flags);
  335. }
  336. /*
  337. * change_bit - Toggle a bit in memory
  338. * @nr: Bit to clear
  339. * @addr: Address to start counting from
  340. *
  341. * change_bit() is atomic and may not be reordered.
  342. * Note that @nr may be almost arbitrarily large; this function is not
  343. * restricted to acting on a single-word quantity.
  344. */
  345. static __inline__ void change_bit(int nr, volatile void * addr)
  346. {
  347. int mask;
  348. volatile int *a = addr;
  349. __bi_flags;
  350. a += nr >> 5;
  351. mask = 1 << (nr & 0x1f);
  352. __bi_save_and_cli(flags);
  353. *a ^= mask;
  354. __bi_restore_flags(flags);
  355. }
  356. /*
  357. * __change_bit - Toggle a bit in memory
  358. * @nr: the bit to set
  359. * @addr: the address to start counting from
  360. *
  361. * Unlike change_bit(), this function is non-atomic and may be reordered.
  362. * If it's called on the same region of memory simultaneously, the effect
  363. * may be that only one operation succeeds.
  364. */
  365. static __inline__ void __change_bit(int nr, volatile void * addr)
  366. {
  367. unsigned long * m = ((unsigned long *) addr) + (nr >> 5);
  368. *m ^= 1UL << (nr & 31);
  369. }
  370. /*
  371. * test_and_set_bit - Set a bit and return its old value
  372. * @nr: Bit to set
  373. * @addr: Address to count from
  374. *
  375. * This operation is atomic and cannot be reordered.
  376. * It also implies a memory barrier.
  377. */
  378. static __inline__ int test_and_set_bit(int nr, volatile void * addr)
  379. {
  380. int mask, retval;
  381. volatile int *a = addr;
  382. __bi_flags;
  383. a += nr >> 5;
  384. mask = 1 << (nr & 0x1f);
  385. __bi_save_and_cli(flags);
  386. retval = (mask & *a) != 0;
  387. *a |= mask;
  388. __bi_restore_flags(flags);
  389. return retval;
  390. }
  391. /*
  392. * __test_and_set_bit - Set a bit and return its old value
  393. * @nr: Bit to set
  394. * @addr: Address to count from
  395. *
  396. * This operation is non-atomic and can be reordered.
  397. * If two examples of this operation race, one can appear to succeed
  398. * but actually fail. You must protect multiple accesses with a lock.
  399. */
  400. static __inline__ int __test_and_set_bit(int nr, volatile void * addr)
  401. {
  402. int mask, retval;
  403. volatile int *a = addr;
  404. a += nr >> 5;
  405. mask = 1 << (nr & 0x1f);
  406. retval = (mask & *a) != 0;
  407. *a |= mask;
  408. return retval;
  409. }
  410. /*
  411. * test_and_clear_bit - Clear a bit and return its old value
  412. * @nr: Bit to set
  413. * @addr: Address to count from
  414. *
  415. * This operation is atomic and cannot be reordered.
  416. * It also implies a memory barrier.
  417. */
  418. static __inline__ int test_and_clear_bit(int nr, volatile void * addr)
  419. {
  420. int mask, retval;
  421. volatile int *a = addr;
  422. __bi_flags;
  423. a += nr >> 5;
  424. mask = 1 << (nr & 0x1f);
  425. __bi_save_and_cli(flags);
  426. retval = (mask & *a) != 0;
  427. *a &= ~mask;
  428. __bi_restore_flags(flags);
  429. return retval;
  430. }
  431. /*
  432. * __test_and_clear_bit - Clear a bit and return its old value
  433. * @nr: Bit to set
  434. * @addr: Address to count from
  435. *
  436. * This operation is non-atomic and can be reordered.
  437. * If two examples of this operation race, one can appear to succeed
  438. * but actually fail. You must protect multiple accesses with a lock.
  439. */
  440. static __inline__ int __test_and_clear_bit(int nr, volatile void * addr)
  441. {
  442. int mask, retval;
  443. volatile int *a = addr;
  444. a += nr >> 5;
  445. mask = 1 << (nr & 0x1f);
  446. retval = (mask & *a) != 0;
  447. *a &= ~mask;
  448. return retval;
  449. }
  450. /*
  451. * test_and_change_bit - Change a bit and return its new value
  452. * @nr: Bit to set
  453. * @addr: Address to count from
  454. *
  455. * This operation is atomic and cannot be reordered.
  456. * It also implies a memory barrier.
  457. */
  458. static __inline__ int test_and_change_bit(int nr, volatile void * addr)
  459. {
  460. int mask, retval;
  461. volatile int *a = addr;
  462. __bi_flags;
  463. a += nr >> 5;
  464. mask = 1 << (nr & 0x1f);
  465. __bi_save_and_cli(flags);
  466. retval = (mask & *a) != 0;
  467. *a ^= mask;
  468. __bi_restore_flags(flags);
  469. return retval;
  470. }
  471. /*
  472. * __test_and_change_bit - Change a bit and return its old value
  473. * @nr: Bit to set
  474. * @addr: Address to count from
  475. *
  476. * This operation is non-atomic and can be reordered.
  477. * If two examples of this operation race, one can appear to succeed
  478. * but actually fail. You must protect multiple accesses with a lock.
  479. */
  480. static __inline__ int __test_and_change_bit(int nr, volatile void * addr)
  481. {
  482. int mask, retval;
  483. volatile int *a = addr;
  484. a += nr >> 5;
  485. mask = 1 << (nr & 0x1f);
  486. retval = (mask & *a) != 0;
  487. *a ^= mask;
  488. return retval;
  489. }
  490. #undef __bi_flags
  491. #undef __bi_cli
  492. #undef __bi_save_flags
  493. #undef __bi_restore_flags
  494. #endif /* MIPS I */
  495. /*
  496. * test_bit - Determine whether a bit is set
  497. * @nr: bit number to test
  498. * @addr: Address to start counting from
  499. */
  500. static __inline__ int test_bit(int nr, volatile void *addr)
  501. {
  502. return ((1UL << (nr & 31)) & (((const unsigned int *) addr)[nr >> 5])) != 0;
  503. }
  504. #ifndef __MIPSEB__
  505. /* Little endian versions. */
  506. /*
  507. * find_first_zero_bit - find the first zero bit in a memory region
  508. * @addr: The address to start the search at
  509. * @size: The maximum size to search
  510. *
  511. * Returns the bit-number of the first zero bit, not the number of the byte
  512. * containing a bit.
  513. */
  514. static __inline__ int find_first_zero_bit (void *addr, unsigned size)
  515. {
  516. unsigned long dummy;
  517. int res;
  518. if (!size)
  519. return 0;
  520. __asm__ (".set\tnoreorder\n\t"
  521. ".set\tnoat\n"
  522. "1:\tsubu\t$1,%6,%0\n\t"
  523. "blez\t$1,2f\n\t"
  524. "lw\t$1,(%5)\n\t"
  525. "addiu\t%5,4\n\t"
  526. #if (_MIPS_ISA == _MIPS_ISA_MIPS2 ) || (_MIPS_ISA == _MIPS_ISA_MIPS3 ) || \
  527. (_MIPS_ISA == _MIPS_ISA_MIPS4 ) || (_MIPS_ISA == _MIPS_ISA_MIPS5 ) || \
  528. (_MIPS_ISA == _MIPS_ISA_MIPS32) || (_MIPS_ISA == _MIPS_ISA_MIPS64)
  529. "beql\t%1,$1,1b\n\t"
  530. "addiu\t%0,32\n\t"
  531. #else
  532. "addiu\t%0,32\n\t"
  533. "beq\t%1,$1,1b\n\t"
  534. "nop\n\t"
  535. "subu\t%0,32\n\t"
  536. #endif
  537. #ifdef __MIPSEB__
  538. #error "Fix this for big endian"
  539. #endif /* __MIPSEB__ */
  540. "li\t%1,1\n"
  541. "1:\tand\t%2,$1,%1\n\t"
  542. "beqz\t%2,2f\n\t"
  543. "sll\t%1,%1,1\n\t"
  544. "bnez\t%1,1b\n\t"
  545. "add\t%0,%0,1\n\t"
  546. ".set\tat\n\t"
  547. ".set\treorder\n"
  548. "2:"
  549. : "=r" (res), "=r" (dummy), "=r" (addr)
  550. : "0" ((signed int) 0), "1" ((unsigned int) 0xffffffff),
  551. "2" (addr), "r" (size)
  552. : "$1");
  553. return res;
  554. }
  555. /*
  556. * find_next_zero_bit - find the first zero bit in a memory region
  557. * @addr: The address to base the search on
  558. * @offset: The bitnumber to start searching at
  559. * @size: The maximum size to search
  560. */
  561. static __inline__ int find_next_zero_bit (void * addr, int size, int offset)
  562. {
  563. unsigned int *p = ((unsigned int *) addr) + (offset >> 5);
  564. int set = 0, bit = offset & 31, res;
  565. unsigned long dummy;
  566. if (bit) {
  567. /*
  568. * Look for zero in first byte
  569. */
  570. #ifdef __MIPSEB__
  571. #error "Fix this for big endian byte order"
  572. #endif
  573. __asm__(".set\tnoreorder\n\t"
  574. ".set\tnoat\n"
  575. "1:\tand\t$1,%4,%1\n\t"
  576. "beqz\t$1,1f\n\t"
  577. "sll\t%1,%1,1\n\t"
  578. "bnez\t%1,1b\n\t"
  579. "addiu\t%0,1\n\t"
  580. ".set\tat\n\t"
  581. ".set\treorder\n"
  582. "1:"
  583. : "=r" (set), "=r" (dummy)
  584. : "0" (0), "1" (1 << bit), "r" (*p)
  585. : "$1");
  586. if (set < (32 - bit))
  587. return set + offset;
  588. set = 32 - bit;
  589. p++;
  590. }
  591. /*
  592. * No zero yet, search remaining full bytes for a zero
  593. */
  594. res = find_first_zero_bit(p, size - 32 * (p - (unsigned int *) addr));
  595. return offset + set + res;
  596. }
  597. #endif /* !(__MIPSEB__) */
  598. /*
  599. * ffz - find first zero in word.
  600. * @word: The word to search
  601. *
  602. * Undefined if no zero exists, so code should check against ~0UL first.
  603. */
  604. static __inline__ unsigned long ffz(unsigned long word)
  605. {
  606. unsigned int __res;
  607. unsigned int mask = 1;
  608. __asm__ (
  609. ".set\tnoreorder\n\t"
  610. ".set\tnoat\n\t"
  611. "move\t%0,$0\n"
  612. "1:\tand\t$1,%2,%1\n\t"
  613. "beqz\t$1,2f\n\t"
  614. "sll\t%1,1\n\t"
  615. "bnez\t%1,1b\n\t"
  616. "addiu\t%0,1\n\t"
  617. ".set\tat\n\t"
  618. ".set\treorder\n"
  619. "2:\n\t"
  620. : "=&r" (__res), "=r" (mask)
  621. : "r" (word), "1" (mask)
  622. : "$1");
  623. return __res;
  624. }
  625. #ifdef __KERNEL__
  626. /**
  627. * ffs - find first bit set
  628. * @x: the word to search
  629. *
  630. * This is defined the same way as
  631. * the libc and compiler builtin ffs routines, therefore
  632. * differs in spirit from the above ffz (man ffs).
  633. */
  634. #define ffs(x) generic_ffs(x)
  635. /*
  636. * hweightN - returns the hamming weight of a N-bit word
  637. * @x: the word to weigh
  638. *
  639. * The Hamming Weight of a number is the total number of bits set in it.
  640. */
  641. #define hweight32(x) generic_hweight32(x)
  642. #define hweight16(x) generic_hweight16(x)
  643. #define hweight8(x) generic_hweight8(x)
  644. #endif /* __KERNEL__ */
  645. #ifdef __MIPSEB__
  646. /*
  647. * find_next_zero_bit - find the first zero bit in a memory region
  648. * @addr: The address to base the search on
  649. * @offset: The bitnumber to start searching at
  650. * @size: The maximum size to search
  651. */
  652. static __inline__ int find_next_zero_bit(void *addr, int size, int offset)
  653. {
  654. unsigned long *p = ((unsigned long *) addr) + (offset >> 5);
  655. unsigned long result = offset & ~31UL;
  656. unsigned long tmp;
  657. if (offset >= size)
  658. return size;
  659. size -= result;
  660. offset &= 31UL;
  661. if (offset) {
  662. tmp = *(p++);
  663. tmp |= ~0UL >> (32-offset);
  664. if (size < 32)
  665. goto found_first;
  666. if (~tmp)
  667. goto found_middle;
  668. size -= 32;
  669. result += 32;
  670. }
  671. while (size & ~31UL) {
  672. if (~(tmp = *(p++)))
  673. goto found_middle;
  674. result += 32;
  675. size -= 32;
  676. }
  677. if (!size)
  678. return result;
  679. tmp = *p;
  680. found_first:
  681. tmp |= ~0UL << size;
  682. found_middle:
  683. return result + ffz(tmp);
  684. }
  685. /* Linus sez that gcc can optimize the following correctly, we'll see if this
  686. * holds on the Sparc as it does for the ALPHA.
  687. */
  688. #if 0 /* Fool kernel-doc since it doesn't do macros yet */
  689. /*
  690. * find_first_zero_bit - find the first zero bit in a memory region
  691. * @addr: The address to start the search at
  692. * @size: The maximum size to search
  693. *
  694. * Returns the bit-number of the first zero bit, not the number of the byte
  695. * containing a bit.
  696. */
  697. static int find_first_zero_bit (void *addr, unsigned size);
  698. #endif
  699. #define find_first_zero_bit(addr, size) \
  700. find_next_zero_bit((addr), (size), 0)
  701. #endif /* (__MIPSEB__) */
  702. /* Now for the ext2 filesystem bit operations and helper routines. */
  703. #ifdef __MIPSEB__
  704. static __inline__ int ext2_set_bit(int nr, void * addr)
  705. {
  706. int mask, retval, flags;
  707. unsigned char *ADDR = (unsigned char *) addr;
  708. ADDR += nr >> 3;
  709. mask = 1 << (nr & 0x07);
  710. save_and_cli(flags);
  711. retval = (mask & *ADDR) != 0;
  712. *ADDR |= mask;
  713. restore_flags(flags);
  714. return retval;
  715. }
  716. static __inline__ int ext2_clear_bit(int nr, void * addr)
  717. {
  718. int mask, retval, flags;
  719. unsigned char *ADDR = (unsigned char *) addr;
  720. ADDR += nr >> 3;
  721. mask = 1 << (nr & 0x07);
  722. save_and_cli(flags);
  723. retval = (mask & *ADDR) != 0;
  724. *ADDR &= ~mask;
  725. restore_flags(flags);
  726. return retval;
  727. }
  728. static __inline__ int ext2_test_bit(int nr, const void * addr)
  729. {
  730. int mask;
  731. const unsigned char *ADDR = (const unsigned char *) addr;
  732. ADDR += nr >> 3;
  733. mask = 1 << (nr & 0x07);
  734. return ((mask & *ADDR) != 0);
  735. }
  736. #define ext2_find_first_zero_bit(addr, size) \
  737. ext2_find_next_zero_bit((addr), (size), 0)
  738. static __inline__ unsigned long ext2_find_next_zero_bit(void *addr, unsigned long size, unsigned long offset)
  739. {
  740. unsigned long *p = ((unsigned long *) addr) + (offset >> 5);
  741. unsigned long result = offset & ~31UL;
  742. unsigned long tmp;
  743. if (offset >= size)
  744. return size;
  745. size -= result;
  746. offset &= 31UL;
  747. if(offset) {
  748. /* We hold the little endian value in tmp, but then the
  749. * shift is illegal. So we could keep a big endian value
  750. * in tmp, like this:
  751. *
  752. * tmp = __swab32(*(p++));
  753. * tmp |= ~0UL >> (32-offset);
  754. *
  755. * but this would decrease preformance, so we change the
  756. * shift:
  757. */
  758. tmp = *(p++);
  759. tmp |= __swab32(~0UL >> (32-offset));
  760. if(size < 32)
  761. goto found_first;
  762. if(~tmp)
  763. goto found_middle;
  764. size -= 32;
  765. result += 32;
  766. }
  767. while(size & ~31UL) {
  768. if(~(tmp = *(p++)))
  769. goto found_middle;
  770. result += 32;
  771. size -= 32;
  772. }
  773. if(!size)
  774. return result;
  775. tmp = *p;
  776. found_first:
  777. /* tmp is little endian, so we would have to swab the shift,
  778. * see above. But then we have to swab tmp below for ffz, so
  779. * we might as well do this here.
  780. */
  781. return result + ffz(__swab32(tmp) | (~0UL << size));
  782. found_middle:
  783. return result + ffz(__swab32(tmp));
  784. }
  785. #else /* !(__MIPSEB__) */
  786. /* Native ext2 byte ordering, just collapse using defines. */
  787. #define ext2_set_bit(nr, addr) test_and_set_bit((nr), (addr))
  788. #define ext2_clear_bit(nr, addr) test_and_clear_bit((nr), (addr))
  789. #define ext2_test_bit(nr, addr) test_bit((nr), (addr))
  790. #define ext2_find_first_zero_bit(addr, size) find_first_zero_bit((addr), (size))
  791. #define ext2_find_next_zero_bit(addr, size, offset) \
  792. find_next_zero_bit((addr), (size), (offset))
  793. #endif /* !(__MIPSEB__) */
  794. /*
  795. * Bitmap functions for the minix filesystem.
  796. * FIXME: These assume that Minix uses the native byte/bitorder.
  797. * This limits the Minix filesystem's value for data exchange very much.
  798. */
  799. #define minix_test_and_set_bit(nr,addr) test_and_set_bit(nr,addr)
  800. #define minix_set_bit(nr,addr) set_bit(nr,addr)
  801. #define minix_test_and_clear_bit(nr,addr) test_and_clear_bit(nr,addr)
  802. #define minix_test_bit(nr,addr) test_bit(nr,addr)
  803. #define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size)
  804. #endif /* _ASM_BITOPS_H */