bitops.h 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833
  1. /*
  2. * S390 version
  3. * Copyright IBM Corp. 1999
  4. * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
  5. *
  6. * Derived from "include/asm-i386/bitops.h"
  7. * Copyright (C) 1992, Linus Torvalds
  8. *
  9. */
  10. #ifndef _S390_BITOPS_H
  11. #define _S390_BITOPS_H
  12. #ifndef _LINUX_BITOPS_H
  13. #error only <linux/bitops.h> can be included directly
  14. #endif
  15. #include <linux/compiler.h>
  16. /*
  17. * 32 bit bitops format:
  18. * bit 0 is the LSB of *addr; bit 31 is the MSB of *addr;
  19. * bit 32 is the LSB of *(addr+4). That combined with the
  20. * big endian byte order on S390 give the following bit
  21. * order in memory:
  22. * 1f 1e 1d 1c 1b 1a 19 18 17 16 15 14 13 12 11 10 \
  23. * 0f 0e 0d 0c 0b 0a 09 08 07 06 05 04 03 02 01 00
  24. * after that follows the next long with bit numbers
  25. * 3f 3e 3d 3c 3b 3a 39 38 37 36 35 34 33 32 31 30
  26. * 2f 2e 2d 2c 2b 2a 29 28 27 26 25 24 23 22 21 20
  27. * The reason for this bit ordering is the fact that
  28. * in the architecture independent code bits operations
  29. * of the form "flags |= (1 << bitnr)" are used INTERMIXED
  30. * with operation of the form "set_bit(bitnr, flags)".
  31. *
  32. * 64 bit bitops format:
  33. * bit 0 is the LSB of *addr; bit 63 is the MSB of *addr;
  34. * bit 64 is the LSB of *(addr+8). That combined with the
  35. * big endian byte order on S390 give the following bit
  36. * order in memory:
  37. * 3f 3e 3d 3c 3b 3a 39 38 37 36 35 34 33 32 31 30
  38. * 2f 2e 2d 2c 2b 2a 29 28 27 26 25 24 23 22 21 20
  39. * 1f 1e 1d 1c 1b 1a 19 18 17 16 15 14 13 12 11 10
  40. * 0f 0e 0d 0c 0b 0a 09 08 07 06 05 04 03 02 01 00
  41. * after that follows the next long with bit numbers
  42. * 7f 7e 7d 7c 7b 7a 79 78 77 76 75 74 73 72 71 70
  43. * 6f 6e 6d 6c 6b 6a 69 68 67 66 65 64 63 62 61 60
  44. * 5f 5e 5d 5c 5b 5a 59 58 57 56 55 54 53 52 51 50
  45. * 4f 4e 4d 4c 4b 4a 49 48 47 46 45 44 43 42 41 40
  46. * The reason for this bit ordering is the fact that
  47. * in the architecture independent code bits operations
  48. * of the form "flags |= (1 << bitnr)" are used INTERMIXED
  49. * with operation of the form "set_bit(bitnr, flags)".
  50. */
  51. /* bitmap tables from arch/s390/kernel/bitmap.c */
  52. extern const char _oi_bitmap[];
  53. extern const char _ni_bitmap[];
  54. extern const char _zb_findmap[];
  55. extern const char _sb_findmap[];
  56. #ifndef CONFIG_64BIT
  57. #define __BITOPS_ALIGN 3
  58. #define __BITOPS_WORDSIZE 32
  59. #define __BITOPS_OR "or"
  60. #define __BITOPS_AND "nr"
  61. #define __BITOPS_XOR "xr"
  62. #define __BITOPS_LOOP(__old, __new, __addr, __val, __op_string) \
  63. asm volatile( \
  64. " l %0,%2\n" \
  65. "0: lr %1,%0\n" \
  66. __op_string " %1,%3\n" \
  67. " cs %0,%1,%2\n" \
  68. " jl 0b" \
  69. : "=&d" (__old), "=&d" (__new), \
  70. "=Q" (*(unsigned long *) __addr) \
  71. : "d" (__val), "Q" (*(unsigned long *) __addr) \
  72. : "cc");
  73. #else /* CONFIG_64BIT */
  74. #define __BITOPS_ALIGN 7
  75. #define __BITOPS_WORDSIZE 64
  76. #define __BITOPS_OR "ogr"
  77. #define __BITOPS_AND "ngr"
  78. #define __BITOPS_XOR "xgr"
  79. #define __BITOPS_LOOP(__old, __new, __addr, __val, __op_string) \
  80. asm volatile( \
  81. " lg %0,%2\n" \
  82. "0: lgr %1,%0\n" \
  83. __op_string " %1,%3\n" \
  84. " csg %0,%1,%2\n" \
  85. " jl 0b" \
  86. : "=&d" (__old), "=&d" (__new), \
  87. "=Q" (*(unsigned long *) __addr) \
  88. : "d" (__val), "Q" (*(unsigned long *) __addr) \
  89. : "cc");
  90. #endif /* CONFIG_64BIT */
  91. #define __BITOPS_WORDS(bits) (((bits)+__BITOPS_WORDSIZE-1)/__BITOPS_WORDSIZE)
  92. #define __BITOPS_BARRIER() asm volatile("" : : : "memory")
  93. #ifdef CONFIG_SMP
  94. /*
  95. * SMP safe set_bit routine based on compare and swap (CS)
  96. */
  97. static inline void set_bit_cs(unsigned long nr, volatile unsigned long *ptr)
  98. {
  99. unsigned long addr, old, new, mask;
  100. addr = (unsigned long) ptr;
  101. /* calculate address for CS */
  102. addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3;
  103. /* make OR mask */
  104. mask = 1UL << (nr & (__BITOPS_WORDSIZE - 1));
  105. /* Do the atomic update. */
  106. __BITOPS_LOOP(old, new, addr, mask, __BITOPS_OR);
  107. }
  108. /*
  109. * SMP safe clear_bit routine based on compare and swap (CS)
  110. */
  111. static inline void clear_bit_cs(unsigned long nr, volatile unsigned long *ptr)
  112. {
  113. unsigned long addr, old, new, mask;
  114. addr = (unsigned long) ptr;
  115. /* calculate address for CS */
  116. addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3;
  117. /* make AND mask */
  118. mask = ~(1UL << (nr & (__BITOPS_WORDSIZE - 1)));
  119. /* Do the atomic update. */
  120. __BITOPS_LOOP(old, new, addr, mask, __BITOPS_AND);
  121. }
  122. /*
  123. * SMP safe change_bit routine based on compare and swap (CS)
  124. */
  125. static inline void change_bit_cs(unsigned long nr, volatile unsigned long *ptr)
  126. {
  127. unsigned long addr, old, new, mask;
  128. addr = (unsigned long) ptr;
  129. /* calculate address for CS */
  130. addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3;
  131. /* make XOR mask */
  132. mask = 1UL << (nr & (__BITOPS_WORDSIZE - 1));
  133. /* Do the atomic update. */
  134. __BITOPS_LOOP(old, new, addr, mask, __BITOPS_XOR);
  135. }
  136. /*
  137. * SMP safe test_and_set_bit routine based on compare and swap (CS)
  138. */
  139. static inline int
  140. test_and_set_bit_cs(unsigned long nr, volatile unsigned long *ptr)
  141. {
  142. unsigned long addr, old, new, mask;
  143. addr = (unsigned long) ptr;
  144. /* calculate address for CS */
  145. addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3;
  146. /* make OR/test mask */
  147. mask = 1UL << (nr & (__BITOPS_WORDSIZE - 1));
  148. /* Do the atomic update. */
  149. __BITOPS_LOOP(old, new, addr, mask, __BITOPS_OR);
  150. __BITOPS_BARRIER();
  151. return (old & mask) != 0;
  152. }
  153. /*
  154. * SMP safe test_and_clear_bit routine based on compare and swap (CS)
  155. */
  156. static inline int
  157. test_and_clear_bit_cs(unsigned long nr, volatile unsigned long *ptr)
  158. {
  159. unsigned long addr, old, new, mask;
  160. addr = (unsigned long) ptr;
  161. /* calculate address for CS */
  162. addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3;
  163. /* make AND/test mask */
  164. mask = ~(1UL << (nr & (__BITOPS_WORDSIZE - 1)));
  165. /* Do the atomic update. */
  166. __BITOPS_LOOP(old, new, addr, mask, __BITOPS_AND);
  167. __BITOPS_BARRIER();
  168. return (old ^ new) != 0;
  169. }
  170. /*
  171. * SMP safe test_and_change_bit routine based on compare and swap (CS)
  172. */
  173. static inline int
  174. test_and_change_bit_cs(unsigned long nr, volatile unsigned long *ptr)
  175. {
  176. unsigned long addr, old, new, mask;
  177. addr = (unsigned long) ptr;
  178. /* calculate address for CS */
  179. addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3;
  180. /* make XOR/test mask */
  181. mask = 1UL << (nr & (__BITOPS_WORDSIZE - 1));
  182. /* Do the atomic update. */
  183. __BITOPS_LOOP(old, new, addr, mask, __BITOPS_XOR);
  184. __BITOPS_BARRIER();
  185. return (old & mask) != 0;
  186. }
  187. #endif /* CONFIG_SMP */
  188. /*
  189. * fast, non-SMP set_bit routine
  190. */
  191. static inline void __set_bit(unsigned long nr, volatile unsigned long *ptr)
  192. {
  193. unsigned long addr;
  194. addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
  195. asm volatile(
  196. " oc %O0(1,%R0),%1"
  197. : "=Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7]) : "cc" );
  198. }
  199. static inline void
  200. __constant_set_bit(const unsigned long nr, volatile unsigned long *ptr)
  201. {
  202. unsigned long addr;
  203. addr = ((unsigned long) ptr) + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
  204. *(unsigned char *) addr |= 1 << (nr & 7);
  205. }
  206. #define set_bit_simple(nr,addr) \
  207. (__builtin_constant_p((nr)) ? \
  208. __constant_set_bit((nr),(addr)) : \
  209. __set_bit((nr),(addr)) )
  210. /*
  211. * fast, non-SMP clear_bit routine
  212. */
  213. static inline void
  214. __clear_bit(unsigned long nr, volatile unsigned long *ptr)
  215. {
  216. unsigned long addr;
  217. addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
  218. asm volatile(
  219. " nc %O0(1,%R0),%1"
  220. : "=Q" (*(char *) addr) : "Q" (_ni_bitmap[nr & 7]) : "cc" );
  221. }
  222. static inline void
  223. __constant_clear_bit(const unsigned long nr, volatile unsigned long *ptr)
  224. {
  225. unsigned long addr;
  226. addr = ((unsigned long) ptr) + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
  227. *(unsigned char *) addr &= ~(1 << (nr & 7));
  228. }
  229. #define clear_bit_simple(nr,addr) \
  230. (__builtin_constant_p((nr)) ? \
  231. __constant_clear_bit((nr),(addr)) : \
  232. __clear_bit((nr),(addr)) )
  233. /*
  234. * fast, non-SMP change_bit routine
  235. */
  236. static inline void __change_bit(unsigned long nr, volatile unsigned long *ptr)
  237. {
  238. unsigned long addr;
  239. addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
  240. asm volatile(
  241. " xc %O0(1,%R0),%1"
  242. : "=Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7]) : "cc" );
  243. }
  244. static inline void
  245. __constant_change_bit(const unsigned long nr, volatile unsigned long *ptr)
  246. {
  247. unsigned long addr;
  248. addr = ((unsigned long) ptr) + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
  249. *(unsigned char *) addr ^= 1 << (nr & 7);
  250. }
  251. #define change_bit_simple(nr,addr) \
  252. (__builtin_constant_p((nr)) ? \
  253. __constant_change_bit((nr),(addr)) : \
  254. __change_bit((nr),(addr)) )
  255. /*
  256. * fast, non-SMP test_and_set_bit routine
  257. */
  258. static inline int
  259. test_and_set_bit_simple(unsigned long nr, volatile unsigned long *ptr)
  260. {
  261. unsigned long addr;
  262. unsigned char ch;
  263. addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
  264. ch = *(unsigned char *) addr;
  265. asm volatile(
  266. " oc %O0(1,%R0),%1"
  267. : "=Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7])
  268. : "cc", "memory");
  269. return (ch >> (nr & 7)) & 1;
  270. }
  271. #define __test_and_set_bit(X,Y) test_and_set_bit_simple(X,Y)
  272. /*
  273. * fast, non-SMP test_and_clear_bit routine
  274. */
  275. static inline int
  276. test_and_clear_bit_simple(unsigned long nr, volatile unsigned long *ptr)
  277. {
  278. unsigned long addr;
  279. unsigned char ch;
  280. addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
  281. ch = *(unsigned char *) addr;
  282. asm volatile(
  283. " nc %O0(1,%R0),%1"
  284. : "=Q" (*(char *) addr) : "Q" (_ni_bitmap[nr & 7])
  285. : "cc", "memory");
  286. return (ch >> (nr & 7)) & 1;
  287. }
  288. #define __test_and_clear_bit(X,Y) test_and_clear_bit_simple(X,Y)
  289. /*
  290. * fast, non-SMP test_and_change_bit routine
  291. */
  292. static inline int
  293. test_and_change_bit_simple(unsigned long nr, volatile unsigned long *ptr)
  294. {
  295. unsigned long addr;
  296. unsigned char ch;
  297. addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
  298. ch = *(unsigned char *) addr;
  299. asm volatile(
  300. " xc %O0(1,%R0),%1"
  301. : "=Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7])
  302. : "cc", "memory");
  303. return (ch >> (nr & 7)) & 1;
  304. }
  305. #define __test_and_change_bit(X,Y) test_and_change_bit_simple(X,Y)
  306. #ifdef CONFIG_SMP
  307. #define set_bit set_bit_cs
  308. #define clear_bit clear_bit_cs
  309. #define change_bit change_bit_cs
  310. #define test_and_set_bit test_and_set_bit_cs
  311. #define test_and_clear_bit test_and_clear_bit_cs
  312. #define test_and_change_bit test_and_change_bit_cs
  313. #else
  314. #define set_bit set_bit_simple
  315. #define clear_bit clear_bit_simple
  316. #define change_bit change_bit_simple
  317. #define test_and_set_bit test_and_set_bit_simple
  318. #define test_and_clear_bit test_and_clear_bit_simple
  319. #define test_and_change_bit test_and_change_bit_simple
  320. #endif
  321. /*
  322. * This routine doesn't need to be atomic.
  323. */
  324. static inline int __test_bit(unsigned long nr, const volatile unsigned long *ptr)
  325. {
  326. unsigned long addr;
  327. unsigned char ch;
  328. addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
  329. ch = *(volatile unsigned char *) addr;
  330. return (ch >> (nr & 7)) & 1;
  331. }
  332. static inline int
  333. __constant_test_bit(unsigned long nr, const volatile unsigned long *addr) {
  334. return (((volatile char *) addr)
  335. [(nr^(__BITOPS_WORDSIZE-8))>>3] & (1<<(nr&7))) != 0;
  336. }
  337. #define test_bit(nr,addr) \
  338. (__builtin_constant_p((nr)) ? \
  339. __constant_test_bit((nr),(addr)) : \
  340. __test_bit((nr),(addr)) )
  341. /*
  342. * Optimized find bit helper functions.
  343. */
  344. /**
  345. * __ffz_word_loop - find byte offset of first long != -1UL
  346. * @addr: pointer to array of unsigned long
  347. * @size: size of the array in bits
  348. */
  349. static inline unsigned long __ffz_word_loop(const unsigned long *addr,
  350. unsigned long size)
  351. {
  352. typedef struct { long _[__BITOPS_WORDS(size)]; } addrtype;
  353. unsigned long bytes = 0;
  354. asm volatile(
  355. #ifndef CONFIG_64BIT
  356. " ahi %1,-1\n"
  357. " sra %1,5\n"
  358. " jz 1f\n"
  359. "0: c %2,0(%0,%3)\n"
  360. " jne 1f\n"
  361. " la %0,4(%0)\n"
  362. " brct %1,0b\n"
  363. "1:\n"
  364. #else
  365. " aghi %1,-1\n"
  366. " srag %1,%1,6\n"
  367. " jz 1f\n"
  368. "0: cg %2,0(%0,%3)\n"
  369. " jne 1f\n"
  370. " la %0,8(%0)\n"
  371. " brct %1,0b\n"
  372. "1:\n"
  373. #endif
  374. : "+&a" (bytes), "+&d" (size)
  375. : "d" (-1UL), "a" (addr), "m" (*(addrtype *) addr)
  376. : "cc" );
  377. return bytes;
  378. }
  379. /**
  380. * __ffs_word_loop - find byte offset of first long != 0UL
  381. * @addr: pointer to array of unsigned long
  382. * @size: size of the array in bits
  383. */
  384. static inline unsigned long __ffs_word_loop(const unsigned long *addr,
  385. unsigned long size)
  386. {
  387. typedef struct { long _[__BITOPS_WORDS(size)]; } addrtype;
  388. unsigned long bytes = 0;
  389. asm volatile(
  390. #ifndef CONFIG_64BIT
  391. " ahi %1,-1\n"
  392. " sra %1,5\n"
  393. " jz 1f\n"
  394. "0: c %2,0(%0,%3)\n"
  395. " jne 1f\n"
  396. " la %0,4(%0)\n"
  397. " brct %1,0b\n"
  398. "1:\n"
  399. #else
  400. " aghi %1,-1\n"
  401. " srag %1,%1,6\n"
  402. " jz 1f\n"
  403. "0: cg %2,0(%0,%3)\n"
  404. " jne 1f\n"
  405. " la %0,8(%0)\n"
  406. " brct %1,0b\n"
  407. "1:\n"
  408. #endif
  409. : "+&a" (bytes), "+&a" (size)
  410. : "d" (0UL), "a" (addr), "m" (*(addrtype *) addr)
  411. : "cc" );
  412. return bytes;
  413. }
  414. /**
  415. * __ffz_word - add number of the first unset bit
  416. * @nr: base value the bit number is added to
  417. * @word: the word that is searched for unset bits
  418. */
  419. static inline unsigned long __ffz_word(unsigned long nr, unsigned long word)
  420. {
  421. #ifdef CONFIG_64BIT
  422. if ((word & 0xffffffff) == 0xffffffff) {
  423. word >>= 32;
  424. nr += 32;
  425. }
  426. #endif
  427. if ((word & 0xffff) == 0xffff) {
  428. word >>= 16;
  429. nr += 16;
  430. }
  431. if ((word & 0xff) == 0xff) {
  432. word >>= 8;
  433. nr += 8;
  434. }
  435. return nr + _zb_findmap[(unsigned char) word];
  436. }
  437. /**
  438. * __ffs_word - add number of the first set bit
  439. * @nr: base value the bit number is added to
  440. * @word: the word that is searched for set bits
  441. */
  442. static inline unsigned long __ffs_word(unsigned long nr, unsigned long word)
  443. {
  444. #ifdef CONFIG_64BIT
  445. if ((word & 0xffffffff) == 0) {
  446. word >>= 32;
  447. nr += 32;
  448. }
  449. #endif
  450. if ((word & 0xffff) == 0) {
  451. word >>= 16;
  452. nr += 16;
  453. }
  454. if ((word & 0xff) == 0) {
  455. word >>= 8;
  456. nr += 8;
  457. }
  458. return nr + _sb_findmap[(unsigned char) word];
  459. }
  460. /**
  461. * __load_ulong_be - load big endian unsigned long
  462. * @p: pointer to array of unsigned long
  463. * @offset: byte offset of source value in the array
  464. */
  465. static inline unsigned long __load_ulong_be(const unsigned long *p,
  466. unsigned long offset)
  467. {
  468. p = (unsigned long *)((unsigned long) p + offset);
  469. return *p;
  470. }
  471. /**
  472. * __load_ulong_le - load little endian unsigned long
  473. * @p: pointer to array of unsigned long
  474. * @offset: byte offset of source value in the array
  475. */
  476. static inline unsigned long __load_ulong_le(const unsigned long *p,
  477. unsigned long offset)
  478. {
  479. unsigned long word;
  480. p = (unsigned long *)((unsigned long) p + offset);
  481. #ifndef CONFIG_64BIT
  482. asm volatile(
  483. " ic %0,%O1(%R1)\n"
  484. " icm %0,2,%O1+1(%R1)\n"
  485. " icm %0,4,%O1+2(%R1)\n"
  486. " icm %0,8,%O1+3(%R1)"
  487. : "=&d" (word) : "Q" (*p) : "cc");
  488. #else
  489. asm volatile(
  490. " lrvg %0,%1"
  491. : "=d" (word) : "m" (*p) );
  492. #endif
  493. return word;
  494. }
  495. /*
  496. * The various find bit functions.
  497. */
  498. /*
  499. * ffz - find first zero in word.
  500. * @word: The word to search
  501. *
  502. * Undefined if no zero exists, so code should check against ~0UL first.
  503. */
  504. static inline unsigned long ffz(unsigned long word)
  505. {
  506. return __ffz_word(0, word);
  507. }
  508. /**
  509. * __ffs - find first bit in word.
  510. * @word: The word to search
  511. *
  512. * Undefined if no bit exists, so code should check against 0 first.
  513. */
  514. static inline unsigned long __ffs (unsigned long word)
  515. {
  516. return __ffs_word(0, word);
  517. }
  518. /**
  519. * ffs - find first bit set
  520. * @x: the word to search
  521. *
  522. * This is defined the same way as
  523. * the libc and compiler builtin ffs routines, therefore
  524. * differs in spirit from the above ffz (man ffs).
  525. */
  526. static inline int ffs(int x)
  527. {
  528. if (!x)
  529. return 0;
  530. return __ffs_word(1, x);
  531. }
  532. /**
  533. * find_first_zero_bit - find the first zero bit in a memory region
  534. * @addr: The address to start the search at
  535. * @size: The maximum size to search
  536. *
  537. * Returns the bit-number of the first zero bit, not the number of the byte
  538. * containing a bit.
  539. */
  540. static inline unsigned long find_first_zero_bit(const unsigned long *addr,
  541. unsigned long size)
  542. {
  543. unsigned long bytes, bits;
  544. if (!size)
  545. return 0;
  546. bytes = __ffz_word_loop(addr, size);
  547. bits = __ffz_word(bytes*8, __load_ulong_be(addr, bytes));
  548. return (bits < size) ? bits : size;
  549. }
  550. #define find_first_zero_bit find_first_zero_bit
  551. /**
  552. * find_first_bit - find the first set bit in a memory region
  553. * @addr: The address to start the search at
  554. * @size: The maximum size to search
  555. *
  556. * Returns the bit-number of the first set bit, not the number of the byte
  557. * containing a bit.
  558. */
  559. static inline unsigned long find_first_bit(const unsigned long * addr,
  560. unsigned long size)
  561. {
  562. unsigned long bytes, bits;
  563. if (!size)
  564. return 0;
  565. bytes = __ffs_word_loop(addr, size);
  566. bits = __ffs_word(bytes*8, __load_ulong_be(addr, bytes));
  567. return (bits < size) ? bits : size;
  568. }
  569. #define find_first_bit find_first_bit
  570. /**
  571. * find_next_zero_bit - find the first zero bit in a memory region
  572. * @addr: The address to base the search on
  573. * @offset: The bitnumber to start searching at
  574. * @size: The maximum size to search
  575. */
  576. static inline int find_next_zero_bit (const unsigned long * addr,
  577. unsigned long size,
  578. unsigned long offset)
  579. {
  580. const unsigned long *p;
  581. unsigned long bit, set;
  582. if (offset >= size)
  583. return size;
  584. bit = offset & (__BITOPS_WORDSIZE - 1);
  585. offset -= bit;
  586. size -= offset;
  587. p = addr + offset / __BITOPS_WORDSIZE;
  588. if (bit) {
  589. /*
  590. * __ffz_word returns __BITOPS_WORDSIZE
  591. * if no zero bit is present in the word.
  592. */
  593. set = __ffz_word(bit, *p >> bit);
  594. if (set >= size)
  595. return size + offset;
  596. if (set < __BITOPS_WORDSIZE)
  597. return set + offset;
  598. offset += __BITOPS_WORDSIZE;
  599. size -= __BITOPS_WORDSIZE;
  600. p++;
  601. }
  602. return offset + find_first_zero_bit(p, size);
  603. }
  604. #define find_next_zero_bit find_next_zero_bit
  605. /**
  606. * find_next_bit - find the first set bit in a memory region
  607. * @addr: The address to base the search on
  608. * @offset: The bitnumber to start searching at
  609. * @size: The maximum size to search
  610. */
  611. static inline int find_next_bit (const unsigned long * addr,
  612. unsigned long size,
  613. unsigned long offset)
  614. {
  615. const unsigned long *p;
  616. unsigned long bit, set;
  617. if (offset >= size)
  618. return size;
  619. bit = offset & (__BITOPS_WORDSIZE - 1);
  620. offset -= bit;
  621. size -= offset;
  622. p = addr + offset / __BITOPS_WORDSIZE;
  623. if (bit) {
  624. /*
  625. * __ffs_word returns __BITOPS_WORDSIZE
  626. * if no one bit is present in the word.
  627. */
  628. set = __ffs_word(0, *p & (~0UL << bit));
  629. if (set >= size)
  630. return size + offset;
  631. if (set < __BITOPS_WORDSIZE)
  632. return set + offset;
  633. offset += __BITOPS_WORDSIZE;
  634. size -= __BITOPS_WORDSIZE;
  635. p++;
  636. }
  637. return offset + find_first_bit(p, size);
  638. }
  639. #define find_next_bit find_next_bit
  640. /*
  641. * Every architecture must define this function. It's the fastest
  642. * way of searching a 140-bit bitmap where the first 100 bits are
  643. * unlikely to be set. It's guaranteed that at least one of the 140
  644. * bits is cleared.
  645. */
  646. static inline int sched_find_first_bit(unsigned long *b)
  647. {
  648. return find_first_bit(b, 140);
  649. }
  650. #include <asm-generic/bitops/fls.h>
  651. #include <asm-generic/bitops/__fls.h>
  652. #include <asm-generic/bitops/fls64.h>
  653. #include <asm-generic/bitops/hweight.h>
  654. #include <asm-generic/bitops/lock.h>
  655. /*
  656. * ATTENTION: intel byte ordering convention for ext2 and minix !!
  657. * bit 0 is the LSB of addr; bit 31 is the MSB of addr;
  658. * bit 32 is the LSB of (addr+4).
  659. * That combined with the little endian byte order of Intel gives the
  660. * following bit order in memory:
  661. * 07 06 05 04 03 02 01 00 15 14 13 12 11 10 09 08 \
  662. * 23 22 21 20 19 18 17 16 31 30 29 28 27 26 25 24
  663. */
  664. static inline int find_first_zero_bit_le(void *vaddr, unsigned int size)
  665. {
  666. unsigned long bytes, bits;
  667. if (!size)
  668. return 0;
  669. bytes = __ffz_word_loop(vaddr, size);
  670. bits = __ffz_word(bytes*8, __load_ulong_le(vaddr, bytes));
  671. return (bits < size) ? bits : size;
  672. }
  673. #define find_first_zero_bit_le find_first_zero_bit_le
  674. static inline int find_next_zero_bit_le(void *vaddr, unsigned long size,
  675. unsigned long offset)
  676. {
  677. unsigned long *addr = vaddr, *p;
  678. unsigned long bit, set;
  679. if (offset >= size)
  680. return size;
  681. bit = offset & (__BITOPS_WORDSIZE - 1);
  682. offset -= bit;
  683. size -= offset;
  684. p = addr + offset / __BITOPS_WORDSIZE;
  685. if (bit) {
  686. /*
  687. * s390 version of ffz returns __BITOPS_WORDSIZE
  688. * if no zero bit is present in the word.
  689. */
  690. set = __ffz_word(bit, __load_ulong_le(p, 0) >> bit);
  691. if (set >= size)
  692. return size + offset;
  693. if (set < __BITOPS_WORDSIZE)
  694. return set + offset;
  695. offset += __BITOPS_WORDSIZE;
  696. size -= __BITOPS_WORDSIZE;
  697. p++;
  698. }
  699. return offset + find_first_zero_bit_le(p, size);
  700. }
  701. #define find_next_zero_bit_le find_next_zero_bit_le
  702. static inline unsigned long find_first_bit_le(void *vaddr, unsigned long size)
  703. {
  704. unsigned long bytes, bits;
  705. if (!size)
  706. return 0;
  707. bytes = __ffs_word_loop(vaddr, size);
  708. bits = __ffs_word(bytes*8, __load_ulong_le(vaddr, bytes));
  709. return (bits < size) ? bits : size;
  710. }
  711. #define find_first_bit_le find_first_bit_le
  712. static inline int find_next_bit_le(void *vaddr, unsigned long size,
  713. unsigned long offset)
  714. {
  715. unsigned long *addr = vaddr, *p;
  716. unsigned long bit, set;
  717. if (offset >= size)
  718. return size;
  719. bit = offset & (__BITOPS_WORDSIZE - 1);
  720. offset -= bit;
  721. size -= offset;
  722. p = addr + offset / __BITOPS_WORDSIZE;
  723. if (bit) {
  724. /*
  725. * s390 version of ffz returns __BITOPS_WORDSIZE
  726. * if no zero bit is present in the word.
  727. */
  728. set = __ffs_word(0, __load_ulong_le(p, 0) & (~0UL << bit));
  729. if (set >= size)
  730. return size + offset;
  731. if (set < __BITOPS_WORDSIZE)
  732. return set + offset;
  733. offset += __BITOPS_WORDSIZE;
  734. size -= __BITOPS_WORDSIZE;
  735. p++;
  736. }
  737. return offset + find_first_bit_le(p, size);
  738. }
  739. #define find_next_bit_le find_next_bit_le
  740. #include <asm-generic/bitops/le.h>
  741. #include <asm-generic/bitops/ext2-atomic-setbit.h>
  742. #endif /* _S390_BITOPS_H */