bitops.h 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795
  1. /*
  2. * S390 version
  3. * Copyright IBM Corp. 1999
  4. * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
  5. *
  6. * Derived from "include/asm-i386/bitops.h"
  7. * Copyright (C) 1992, Linus Torvalds
  8. *
  9. */
  10. #ifndef _S390_BITOPS_H
  11. #define _S390_BITOPS_H
  12. #ifndef _LINUX_BITOPS_H
  13. #error only <linux/bitops.h> can be included directly
  14. #endif
  15. #include <linux/typecheck.h>
  16. #include <linux/compiler.h>
  17. /*
  18. * 32 bit bitops format:
  19. * bit 0 is the LSB of *addr; bit 31 is the MSB of *addr;
  20. * bit 32 is the LSB of *(addr+4). That combined with the
  21. * big endian byte order on S390 give the following bit
  22. * order in memory:
  23. * 1f 1e 1d 1c 1b 1a 19 18 17 16 15 14 13 12 11 10 \
  24. * 0f 0e 0d 0c 0b 0a 09 08 07 06 05 04 03 02 01 00
  25. * after that follows the next long with bit numbers
  26. * 3f 3e 3d 3c 3b 3a 39 38 37 36 35 34 33 32 31 30
  27. * 2f 2e 2d 2c 2b 2a 29 28 27 26 25 24 23 22 21 20
  28. * The reason for this bit ordering is the fact that
  29. * in the architecture independent code bits operations
  30. * of the form "flags |= (1 << bitnr)" are used INTERMIXED
  31. * with operation of the form "set_bit(bitnr, flags)".
  32. *
  33. * 64 bit bitops format:
  34. * bit 0 is the LSB of *addr; bit 63 is the MSB of *addr;
  35. * bit 64 is the LSB of *(addr+8). That combined with the
  36. * big endian byte order on S390 give the following bit
  37. * order in memory:
  38. * 3f 3e 3d 3c 3b 3a 39 38 37 36 35 34 33 32 31 30
  39. * 2f 2e 2d 2c 2b 2a 29 28 27 26 25 24 23 22 21 20
  40. * 1f 1e 1d 1c 1b 1a 19 18 17 16 15 14 13 12 11 10
  41. * 0f 0e 0d 0c 0b 0a 09 08 07 06 05 04 03 02 01 00
  42. * after that follows the next long with bit numbers
  43. * 7f 7e 7d 7c 7b 7a 79 78 77 76 75 74 73 72 71 70
  44. * 6f 6e 6d 6c 6b 6a 69 68 67 66 65 64 63 62 61 60
  45. * 5f 5e 5d 5c 5b 5a 59 58 57 56 55 54 53 52 51 50
  46. * 4f 4e 4d 4c 4b 4a 49 48 47 46 45 44 43 42 41 40
  47. * The reason for this bit ordering is the fact that
  48. * in the architecture independent code bits operations
  49. * of the form "flags |= (1 << bitnr)" are used INTERMIXED
  50. * with operation of the form "set_bit(bitnr, flags)".
  51. */
  52. /* bitmap tables from arch/s390/kernel/bitmap.c */
  53. extern const char _zb_findmap[];
  54. extern const char _sb_findmap[];
  55. #ifndef CONFIG_64BIT
  56. #define __BITOPS_OR "or"
  57. #define __BITOPS_AND "nr"
  58. #define __BITOPS_XOR "xr"
  59. #define __BITOPS_LOOP(__addr, __val, __op_string) \
  60. ({ \
  61. unsigned long __old, __new; \
  62. \
  63. typecheck(unsigned long *, (__addr)); \
  64. asm volatile( \
  65. " l %0,%2\n" \
  66. "0: lr %1,%0\n" \
  67. __op_string " %1,%3\n" \
  68. " cs %0,%1,%2\n" \
  69. " jl 0b" \
  70. : "=&d" (__old), "=&d" (__new), "+Q" (*(__addr))\
  71. : "d" (__val) \
  72. : "cc"); \
  73. __old; \
  74. })
  75. #else /* CONFIG_64BIT */
  76. #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
  77. #define __BITOPS_OR "laog"
  78. #define __BITOPS_AND "lang"
  79. #define __BITOPS_XOR "laxg"
  80. #define __BITOPS_LOOP(__addr, __val, __op_string) \
  81. ({ \
  82. unsigned long __old; \
  83. \
  84. typecheck(unsigned long *, (__addr)); \
  85. asm volatile( \
  86. __op_string " %0,%2,%1\n" \
  87. : "=d" (__old), "+Q" (*(__addr)) \
  88. : "d" (__val) \
  89. : "cc"); \
  90. __old; \
  91. })
  92. #else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
  93. #define __BITOPS_OR "ogr"
  94. #define __BITOPS_AND "ngr"
  95. #define __BITOPS_XOR "xgr"
  96. #define __BITOPS_LOOP(__addr, __val, __op_string) \
  97. ({ \
  98. unsigned long __old, __new; \
  99. \
  100. typecheck(unsigned long *, (__addr)); \
  101. asm volatile( \
  102. " lg %0,%2\n" \
  103. "0: lgr %1,%0\n" \
  104. __op_string " %1,%3\n" \
  105. " csg %0,%1,%2\n" \
  106. " jl 0b" \
  107. : "=&d" (__old), "=&d" (__new), "+Q" (*(__addr))\
  108. : "d" (__val) \
  109. : "cc"); \
  110. __old; \
  111. })
  112. #endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
  113. #endif /* CONFIG_64BIT */
  114. #define __BITOPS_WORDS(bits) (((bits) + BITS_PER_LONG - 1) / BITS_PER_LONG)
  115. static inline unsigned long *
  116. __bitops_word(unsigned long nr, volatile unsigned long *ptr)
  117. {
  118. unsigned long addr;
  119. addr = (unsigned long)ptr + ((nr ^ (nr & (BITS_PER_LONG - 1))) >> 3);
  120. return (unsigned long *)addr;
  121. }
  122. static inline unsigned char *
  123. __bitops_byte(unsigned long nr, volatile unsigned long *ptr)
  124. {
  125. return ((unsigned char *)ptr) + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
  126. }
  127. static inline void set_bit(unsigned long nr, volatile unsigned long *ptr)
  128. {
  129. unsigned long *addr = __bitops_word(nr, ptr);
  130. unsigned long mask;
  131. mask = 1UL << (nr & (BITS_PER_LONG - 1));
  132. __BITOPS_LOOP(addr, mask, __BITOPS_OR);
  133. }
  134. static inline void clear_bit(unsigned long nr, volatile unsigned long *ptr)
  135. {
  136. unsigned long *addr = __bitops_word(nr, ptr);
  137. unsigned long mask;
  138. mask = ~(1UL << (nr & (BITS_PER_LONG - 1)));
  139. __BITOPS_LOOP(addr, mask, __BITOPS_AND);
  140. }
  141. static inline void change_bit(unsigned long nr, volatile unsigned long *ptr)
  142. {
  143. unsigned long *addr = __bitops_word(nr, ptr);
  144. unsigned long mask;
  145. mask = 1UL << (nr & (BITS_PER_LONG - 1));
  146. __BITOPS_LOOP(addr, mask, __BITOPS_XOR);
  147. }
  148. static inline int
  149. test_and_set_bit(unsigned long nr, volatile unsigned long *ptr)
  150. {
  151. unsigned long *addr = __bitops_word(nr, ptr);
  152. unsigned long old, mask;
  153. mask = 1UL << (nr & (BITS_PER_LONG - 1));
  154. old = __BITOPS_LOOP(addr, mask, __BITOPS_OR);
  155. barrier();
  156. return (old & mask) != 0;
  157. }
  158. static inline int
  159. test_and_clear_bit(unsigned long nr, volatile unsigned long *ptr)
  160. {
  161. unsigned long *addr = __bitops_word(nr, ptr);
  162. unsigned long old, mask;
  163. mask = ~(1UL << (nr & (BITS_PER_LONG - 1)));
  164. old = __BITOPS_LOOP(addr, mask, __BITOPS_AND);
  165. barrier();
  166. return (old & ~mask) != 0;
  167. }
  168. static inline int
  169. test_and_change_bit(unsigned long nr, volatile unsigned long *ptr)
  170. {
  171. unsigned long *addr = __bitops_word(nr, ptr);
  172. unsigned long old, mask;
  173. mask = 1UL << (nr & (BITS_PER_LONG - 1));
  174. old = __BITOPS_LOOP(addr, mask, __BITOPS_XOR);
  175. barrier();
  176. return (old & mask) != 0;
  177. }
  178. static inline void __set_bit(unsigned long nr, volatile unsigned long *ptr)
  179. {
  180. unsigned char *addr = __bitops_byte(nr, ptr);
  181. *addr |= 1 << (nr & 7);
  182. }
  183. static inline void
  184. __clear_bit(unsigned long nr, volatile unsigned long *ptr)
  185. {
  186. unsigned char *addr = __bitops_byte(nr, ptr);
  187. *addr &= ~(1 << (nr & 7));
  188. }
  189. static inline void __change_bit(unsigned long nr, volatile unsigned long *ptr)
  190. {
  191. unsigned char *addr = __bitops_byte(nr, ptr);
  192. *addr ^= 1 << (nr & 7);
  193. }
  194. static inline int
  195. __test_and_set_bit(unsigned long nr, volatile unsigned long *ptr)
  196. {
  197. unsigned char *addr = __bitops_byte(nr, ptr);
  198. unsigned char ch;
  199. ch = *addr;
  200. *addr |= 1 << (nr & 7);
  201. return (ch >> (nr & 7)) & 1;
  202. }
  203. static inline int
  204. __test_and_clear_bit(unsigned long nr, volatile unsigned long *ptr)
  205. {
  206. unsigned char *addr = __bitops_byte(nr, ptr);
  207. unsigned char ch;
  208. ch = *addr;
  209. *addr &= ~(1 << (nr & 7));
  210. return (ch >> (nr & 7)) & 1;
  211. }
  212. static inline int
  213. __test_and_change_bit(unsigned long nr, volatile unsigned long *ptr)
  214. {
  215. unsigned char *addr = __bitops_byte(nr, ptr);
  216. unsigned char ch;
  217. ch = *addr;
  218. *addr ^= 1 << (nr & 7);
  219. return (ch >> (nr & 7)) & 1;
  220. }
  221. static inline int test_bit(unsigned long nr, const volatile unsigned long *ptr)
  222. {
  223. const volatile unsigned char *addr;
  224. addr = ((const volatile unsigned char *)ptr);
  225. addr += (nr ^ (BITS_PER_LONG - 8)) >> 3;
  226. return (*addr >> (nr & 7)) & 1;
  227. }
  228. /*
  229. * Optimized find bit helper functions.
  230. */
  231. /**
  232. * __ffz_word_loop - find byte offset of first long != -1UL
  233. * @addr: pointer to array of unsigned long
  234. * @size: size of the array in bits
  235. */
  236. static inline unsigned long __ffz_word_loop(const unsigned long *addr,
  237. unsigned long size)
  238. {
  239. typedef struct { long _[__BITOPS_WORDS(size)]; } addrtype;
  240. unsigned long bytes = 0;
  241. asm volatile(
  242. #ifndef CONFIG_64BIT
  243. " ahi %1,-1\n"
  244. " sra %1,5\n"
  245. " jz 1f\n"
  246. "0: c %2,0(%0,%3)\n"
  247. " jne 1f\n"
  248. " la %0,4(%0)\n"
  249. " brct %1,0b\n"
  250. "1:\n"
  251. #else
  252. " aghi %1,-1\n"
  253. " srag %1,%1,6\n"
  254. " jz 1f\n"
  255. "0: cg %2,0(%0,%3)\n"
  256. " jne 1f\n"
  257. " la %0,8(%0)\n"
  258. " brct %1,0b\n"
  259. "1:\n"
  260. #endif
  261. : "+&a" (bytes), "+&d" (size)
  262. : "d" (-1UL), "a" (addr), "m" (*(addrtype *) addr)
  263. : "cc" );
  264. return bytes;
  265. }
  266. /**
  267. * __ffs_word_loop - find byte offset of first long != 0UL
  268. * @addr: pointer to array of unsigned long
  269. * @size: size of the array in bits
  270. */
  271. static inline unsigned long __ffs_word_loop(const unsigned long *addr,
  272. unsigned long size)
  273. {
  274. typedef struct { long _[__BITOPS_WORDS(size)]; } addrtype;
  275. unsigned long bytes = 0;
  276. asm volatile(
  277. #ifndef CONFIG_64BIT
  278. " ahi %1,-1\n"
  279. " sra %1,5\n"
  280. " jz 1f\n"
  281. "0: c %2,0(%0,%3)\n"
  282. " jne 1f\n"
  283. " la %0,4(%0)\n"
  284. " brct %1,0b\n"
  285. "1:\n"
  286. #else
  287. " aghi %1,-1\n"
  288. " srag %1,%1,6\n"
  289. " jz 1f\n"
  290. "0: cg %2,0(%0,%3)\n"
  291. " jne 1f\n"
  292. " la %0,8(%0)\n"
  293. " brct %1,0b\n"
  294. "1:\n"
  295. #endif
  296. : "+&a" (bytes), "+&a" (size)
  297. : "d" (0UL), "a" (addr), "m" (*(addrtype *) addr)
  298. : "cc" );
  299. return bytes;
  300. }
  301. /**
  302. * __ffz_word - add number of the first unset bit
  303. * @nr: base value the bit number is added to
  304. * @word: the word that is searched for unset bits
  305. */
  306. static inline unsigned long __ffz_word(unsigned long nr, unsigned long word)
  307. {
  308. #ifdef CONFIG_64BIT
  309. if ((word & 0xffffffff) == 0xffffffff) {
  310. word >>= 32;
  311. nr += 32;
  312. }
  313. #endif
  314. if ((word & 0xffff) == 0xffff) {
  315. word >>= 16;
  316. nr += 16;
  317. }
  318. if ((word & 0xff) == 0xff) {
  319. word >>= 8;
  320. nr += 8;
  321. }
  322. return nr + _zb_findmap[(unsigned char) word];
  323. }
  324. /**
  325. * __ffs_word - add number of the first set bit
  326. * @nr: base value the bit number is added to
  327. * @word: the word that is searched for set bits
  328. */
  329. static inline unsigned long __ffs_word(unsigned long nr, unsigned long word)
  330. {
  331. #ifdef CONFIG_64BIT
  332. if ((word & 0xffffffff) == 0) {
  333. word >>= 32;
  334. nr += 32;
  335. }
  336. #endif
  337. if ((word & 0xffff) == 0) {
  338. word >>= 16;
  339. nr += 16;
  340. }
  341. if ((word & 0xff) == 0) {
  342. word >>= 8;
  343. nr += 8;
  344. }
  345. return nr + _sb_findmap[(unsigned char) word];
  346. }
  347. /**
  348. * __load_ulong_be - load big endian unsigned long
  349. * @p: pointer to array of unsigned long
  350. * @offset: byte offset of source value in the array
  351. */
  352. static inline unsigned long __load_ulong_be(const unsigned long *p,
  353. unsigned long offset)
  354. {
  355. p = (unsigned long *)((unsigned long) p + offset);
  356. return *p;
  357. }
  358. /**
  359. * __load_ulong_le - load little endian unsigned long
  360. * @p: pointer to array of unsigned long
  361. * @offset: byte offset of source value in the array
  362. */
  363. static inline unsigned long __load_ulong_le(const unsigned long *p,
  364. unsigned long offset)
  365. {
  366. unsigned long word;
  367. p = (unsigned long *)((unsigned long) p + offset);
  368. #ifndef CONFIG_64BIT
  369. asm volatile(
  370. " ic %0,%O1(%R1)\n"
  371. " icm %0,2,%O1+1(%R1)\n"
  372. " icm %0,4,%O1+2(%R1)\n"
  373. " icm %0,8,%O1+3(%R1)"
  374. : "=&d" (word) : "Q" (*p) : "cc");
  375. #else
  376. asm volatile(
  377. " lrvg %0,%1"
  378. : "=d" (word) : "m" (*p) );
  379. #endif
  380. return word;
  381. }
  382. /*
  383. * The various find bit functions.
  384. */
  385. /*
  386. * ffz - find first zero in word.
  387. * @word: The word to search
  388. *
  389. * Undefined if no zero exists, so code should check against ~0UL first.
  390. */
  391. static inline unsigned long ffz(unsigned long word)
  392. {
  393. return __ffz_word(0, word);
  394. }
  395. /**
  396. * __ffs - find first bit in word.
  397. * @word: The word to search
  398. *
  399. * Undefined if no bit exists, so code should check against 0 first.
  400. */
  401. static inline unsigned long __ffs (unsigned long word)
  402. {
  403. return __ffs_word(0, word);
  404. }
  405. /**
  406. * ffs - find first bit set
  407. * @x: the word to search
  408. *
  409. * This is defined the same way as
  410. * the libc and compiler builtin ffs routines, therefore
  411. * differs in spirit from the above ffz (man ffs).
  412. */
  413. static inline int ffs(int x)
  414. {
  415. if (!x)
  416. return 0;
  417. return __ffs_word(1, x);
  418. }
  419. /**
  420. * find_first_zero_bit - find the first zero bit in a memory region
  421. * @addr: The address to start the search at
  422. * @size: The maximum size to search
  423. *
  424. * Returns the bit-number of the first zero bit, not the number of the byte
  425. * containing a bit.
  426. */
  427. static inline unsigned long find_first_zero_bit(const unsigned long *addr,
  428. unsigned long size)
  429. {
  430. unsigned long bytes, bits;
  431. if (!size)
  432. return 0;
  433. bytes = __ffz_word_loop(addr, size);
  434. bits = __ffz_word(bytes*8, __load_ulong_be(addr, bytes));
  435. return (bits < size) ? bits : size;
  436. }
  437. #define find_first_zero_bit find_first_zero_bit
  438. /**
  439. * find_first_bit - find the first set bit in a memory region
  440. * @addr: The address to start the search at
  441. * @size: The maximum size to search
  442. *
  443. * Returns the bit-number of the first set bit, not the number of the byte
  444. * containing a bit.
  445. */
  446. static inline unsigned long find_first_bit(const unsigned long * addr,
  447. unsigned long size)
  448. {
  449. unsigned long bytes, bits;
  450. if (!size)
  451. return 0;
  452. bytes = __ffs_word_loop(addr, size);
  453. bits = __ffs_word(bytes*8, __load_ulong_be(addr, bytes));
  454. return (bits < size) ? bits : size;
  455. }
  456. #define find_first_bit find_first_bit
  457. /*
  458. * Big endian variant whichs starts bit counting from left using
  459. * the flogr (find leftmost one) instruction.
  460. */
  461. static inline unsigned long __flo_word(unsigned long nr, unsigned long val)
  462. {
  463. register unsigned long bit asm("2") = val;
  464. register unsigned long out asm("3");
  465. asm volatile (
  466. " .insn rre,0xb9830000,%[bit],%[bit]\n"
  467. : [bit] "+d" (bit), [out] "=d" (out) : : "cc");
  468. return nr + bit;
  469. }
  470. /*
  471. * 64 bit special left bitops format:
  472. * order in memory:
  473. * 00 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f
  474. * 10 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f
  475. * 20 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f
  476. * 30 31 32 33 34 35 36 37 38 39 3a 3b 3c 3d 3e 3f
  477. * after that follows the next long with bit numbers
  478. * 40 41 42 43 44 45 46 47 48 49 4a 4b 4c 4d 4e 4f
  479. * 50 51 52 53 54 55 56 57 58 59 5a 5b 5c 5d 5e 5f
  480. * 60 61 62 63 64 65 66 67 68 69 6a 6b 6c 6d 6e 6f
  481. * 70 71 72 73 74 75 76 77 78 79 7a 7b 7c 7d 7e 7f
  482. * The reason for this bit ordering is the fact that
  483. * the hardware sets bits in a bitmap starting at bit 0
  484. * and we don't want to scan the bitmap from the 'wrong
  485. * end'.
  486. */
  487. static inline unsigned long find_first_bit_left(const unsigned long *addr,
  488. unsigned long size)
  489. {
  490. unsigned long bytes, bits;
  491. if (!size)
  492. return 0;
  493. bytes = __ffs_word_loop(addr, size);
  494. bits = __flo_word(bytes * 8, __load_ulong_be(addr, bytes));
  495. return (bits < size) ? bits : size;
  496. }
  497. static inline int find_next_bit_left(const unsigned long *addr,
  498. unsigned long size,
  499. unsigned long offset)
  500. {
  501. const unsigned long *p;
  502. unsigned long bit, set;
  503. if (offset >= size)
  504. return size;
  505. bit = offset & (BITS_PER_LONG - 1);
  506. offset -= bit;
  507. size -= offset;
  508. p = addr + offset / BITS_PER_LONG;
  509. if (bit) {
  510. set = __flo_word(0, *p & (~0UL >> bit));
  511. if (set >= size)
  512. return size + offset;
  513. if (set < BITS_PER_LONG)
  514. return set + offset;
  515. offset += BITS_PER_LONG;
  516. size -= BITS_PER_LONG;
  517. p++;
  518. }
  519. return offset + find_first_bit_left(p, size);
  520. }
  521. #define for_each_set_bit_left(bit, addr, size) \
  522. for ((bit) = find_first_bit_left((addr), (size)); \
  523. (bit) < (size); \
  524. (bit) = find_next_bit_left((addr), (size), (bit) + 1))
  525. /* same as for_each_set_bit() but use bit as value to start with */
  526. #define for_each_set_bit_left_cont(bit, addr, size) \
  527. for ((bit) = find_next_bit_left((addr), (size), (bit)); \
  528. (bit) < (size); \
  529. (bit) = find_next_bit_left((addr), (size), (bit) + 1))
  530. /**
  531. * find_next_zero_bit - find the first zero bit in a memory region
  532. * @addr: The address to base the search on
  533. * @offset: The bitnumber to start searching at
  534. * @size: The maximum size to search
  535. */
  536. static inline int find_next_zero_bit (const unsigned long * addr,
  537. unsigned long size,
  538. unsigned long offset)
  539. {
  540. const unsigned long *p;
  541. unsigned long bit, set;
  542. if (offset >= size)
  543. return size;
  544. bit = offset & (BITS_PER_LONG - 1);
  545. offset -= bit;
  546. size -= offset;
  547. p = addr + offset / BITS_PER_LONG;
  548. if (bit) {
  549. /*
  550. * __ffz_word returns BITS_PER_LONG
  551. * if no zero bit is present in the word.
  552. */
  553. set = __ffz_word(bit, *p >> bit);
  554. if (set >= size)
  555. return size + offset;
  556. if (set < BITS_PER_LONG)
  557. return set + offset;
  558. offset += BITS_PER_LONG;
  559. size -= BITS_PER_LONG;
  560. p++;
  561. }
  562. return offset + find_first_zero_bit(p, size);
  563. }
  564. #define find_next_zero_bit find_next_zero_bit
  565. /**
  566. * find_next_bit - find the first set bit in a memory region
  567. * @addr: The address to base the search on
  568. * @offset: The bitnumber to start searching at
  569. * @size: The maximum size to search
  570. */
  571. static inline int find_next_bit (const unsigned long * addr,
  572. unsigned long size,
  573. unsigned long offset)
  574. {
  575. const unsigned long *p;
  576. unsigned long bit, set;
  577. if (offset >= size)
  578. return size;
  579. bit = offset & (BITS_PER_LONG - 1);
  580. offset -= bit;
  581. size -= offset;
  582. p = addr + offset / BITS_PER_LONG;
  583. if (bit) {
  584. /*
  585. * __ffs_word returns BITS_PER_LONG
  586. * if no one bit is present in the word.
  587. */
  588. set = __ffs_word(0, *p & (~0UL << bit));
  589. if (set >= size)
  590. return size + offset;
  591. if (set < BITS_PER_LONG)
  592. return set + offset;
  593. offset += BITS_PER_LONG;
  594. size -= BITS_PER_LONG;
  595. p++;
  596. }
  597. return offset + find_first_bit(p, size);
  598. }
  599. #define find_next_bit find_next_bit
  600. /*
  601. * Every architecture must define this function. It's the fastest
  602. * way of searching a 140-bit bitmap where the first 100 bits are
  603. * unlikely to be set. It's guaranteed that at least one of the 140
  604. * bits is cleared.
  605. */
  606. static inline int sched_find_first_bit(unsigned long *b)
  607. {
  608. return find_first_bit(b, 140);
  609. }
  610. #include <asm-generic/bitops/fls.h>
  611. #include <asm-generic/bitops/__fls.h>
  612. #include <asm-generic/bitops/fls64.h>
  613. #include <asm-generic/bitops/hweight.h>
  614. #include <asm-generic/bitops/lock.h>
  615. /*
  616. * ATTENTION: intel byte ordering convention for ext2 and minix !!
  617. * bit 0 is the LSB of addr; bit 31 is the MSB of addr;
  618. * bit 32 is the LSB of (addr+4).
  619. * That combined with the little endian byte order of Intel gives the
  620. * following bit order in memory:
  621. * 07 06 05 04 03 02 01 00 15 14 13 12 11 10 09 08 \
  622. * 23 22 21 20 19 18 17 16 31 30 29 28 27 26 25 24
  623. */
  624. static inline int find_first_zero_bit_le(void *vaddr, unsigned int size)
  625. {
  626. unsigned long bytes, bits;
  627. if (!size)
  628. return 0;
  629. bytes = __ffz_word_loop(vaddr, size);
  630. bits = __ffz_word(bytes*8, __load_ulong_le(vaddr, bytes));
  631. return (bits < size) ? bits : size;
  632. }
  633. #define find_first_zero_bit_le find_first_zero_bit_le
  634. static inline int find_next_zero_bit_le(void *vaddr, unsigned long size,
  635. unsigned long offset)
  636. {
  637. unsigned long *addr = vaddr, *p;
  638. unsigned long bit, set;
  639. if (offset >= size)
  640. return size;
  641. bit = offset & (BITS_PER_LONG - 1);
  642. offset -= bit;
  643. size -= offset;
  644. p = addr + offset / BITS_PER_LONG;
  645. if (bit) {
  646. /*
  647. * s390 version of ffz returns BITS_PER_LONG
  648. * if no zero bit is present in the word.
  649. */
  650. set = __ffz_word(bit, __load_ulong_le(p, 0) >> bit);
  651. if (set >= size)
  652. return size + offset;
  653. if (set < BITS_PER_LONG)
  654. return set + offset;
  655. offset += BITS_PER_LONG;
  656. size -= BITS_PER_LONG;
  657. p++;
  658. }
  659. return offset + find_first_zero_bit_le(p, size);
  660. }
  661. #define find_next_zero_bit_le find_next_zero_bit_le
  662. static inline unsigned long find_first_bit_le(void *vaddr, unsigned long size)
  663. {
  664. unsigned long bytes, bits;
  665. if (!size)
  666. return 0;
  667. bytes = __ffs_word_loop(vaddr, size);
  668. bits = __ffs_word(bytes*8, __load_ulong_le(vaddr, bytes));
  669. return (bits < size) ? bits : size;
  670. }
  671. #define find_first_bit_le find_first_bit_le
  672. static inline int find_next_bit_le(void *vaddr, unsigned long size,
  673. unsigned long offset)
  674. {
  675. unsigned long *addr = vaddr, *p;
  676. unsigned long bit, set;
  677. if (offset >= size)
  678. return size;
  679. bit = offset & (BITS_PER_LONG - 1);
  680. offset -= bit;
  681. size -= offset;
  682. p = addr + offset / BITS_PER_LONG;
  683. if (bit) {
  684. /*
  685. * s390 version of ffz returns BITS_PER_LONG
  686. * if no zero bit is present in the word.
  687. */
  688. set = __ffs_word(0, __load_ulong_le(p, 0) & (~0UL << bit));
  689. if (set >= size)
  690. return size + offset;
  691. if (set < BITS_PER_LONG)
  692. return set + offset;
  693. offset += BITS_PER_LONG;
  694. size -= BITS_PER_LONG;
  695. p++;
  696. }
  697. return offset + find_first_bit_le(p, size);
  698. }
  699. #define find_next_bit_le find_next_bit_le
  700. #include <asm-generic/bitops/le.h>
  701. #include <asm-generic/bitops/ext2-atomic-setbit.h>
  702. #endif /* _S390_BITOPS_H */