bitops.h 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835
  1. #ifndef _S390_BITOPS_H
  2. #define _S390_BITOPS_H
  3. /*
  4. * include/asm-s390/bitops.h
  5. *
  6. * S390 version
  7. * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
  8. * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
  9. *
  10. * Derived from "include/asm-i386/bitops.h"
  11. * Copyright (C) 1992, Linus Torvalds
  12. *
  13. */
  14. #ifndef _LINUX_BITOPS_H
  15. #error only <linux/bitops.h> can be included directly
  16. #endif
  17. #include <linux/compiler.h>
  18. /*
  19. * 32 bit bitops format:
  20. * bit 0 is the LSB of *addr; bit 31 is the MSB of *addr;
  21. * bit 32 is the LSB of *(addr+4). That combined with the
  22. * big endian byte order on S390 give the following bit
  23. * order in memory:
  24. * 1f 1e 1d 1c 1b 1a 19 18 17 16 15 14 13 12 11 10 \
  25. * 0f 0e 0d 0c 0b 0a 09 08 07 06 05 04 03 02 01 00
  26. * after that follows the next long with bit numbers
  27. * 3f 3e 3d 3c 3b 3a 39 38 37 36 35 34 33 32 31 30
  28. * 2f 2e 2d 2c 2b 2a 29 28 27 26 25 24 23 22 21 20
  29. * The reason for this bit ordering is the fact that
  30. * in the architecture independent code bits operations
  31. * of the form "flags |= (1 << bitnr)" are used INTERMIXED
  32. * with operation of the form "set_bit(bitnr, flags)".
  33. *
  34. * 64 bit bitops format:
  35. * bit 0 is the LSB of *addr; bit 63 is the MSB of *addr;
  36. * bit 64 is the LSB of *(addr+8). That combined with the
  37. * big endian byte order on S390 give the following bit
  38. * order in memory:
  39. * 3f 3e 3d 3c 3b 3a 39 38 37 36 35 34 33 32 31 30
  40. * 2f 2e 2d 2c 2b 2a 29 28 27 26 25 24 23 22 21 20
  41. * 1f 1e 1d 1c 1b 1a 19 18 17 16 15 14 13 12 11 10
  42. * 0f 0e 0d 0c 0b 0a 09 08 07 06 05 04 03 02 01 00
  43. * after that follows the next long with bit numbers
  44. * 7f 7e 7d 7c 7b 7a 79 78 77 76 75 74 73 72 71 70
  45. * 6f 6e 6d 6c 6b 6a 69 68 67 66 65 64 63 62 61 60
  46. * 5f 5e 5d 5c 5b 5a 59 58 57 56 55 54 53 52 51 50
  47. * 4f 4e 4d 4c 4b 4a 49 48 47 46 45 44 43 42 41 40
  48. * The reason for this bit ordering is the fact that
  49. * in the architecture independent code bits operations
  50. * of the form "flags |= (1 << bitnr)" are used INTERMIXED
  51. * with operation of the form "set_bit(bitnr, flags)".
  52. */
  53. /* bitmap tables from arch/s390/kernel/bitmap.c */
  54. extern const char _oi_bitmap[];
  55. extern const char _ni_bitmap[];
  56. extern const char _zb_findmap[];
  57. extern const char _sb_findmap[];
  58. #ifndef CONFIG_64BIT
  59. #define __BITOPS_ALIGN 3
  60. #define __BITOPS_WORDSIZE 32
  61. #define __BITOPS_OR "or"
  62. #define __BITOPS_AND "nr"
  63. #define __BITOPS_XOR "xr"
  64. #define __BITOPS_LOOP(__old, __new, __addr, __val, __op_string) \
  65. asm volatile( \
  66. " l %0,%2\n" \
  67. "0: lr %1,%0\n" \
  68. __op_string " %1,%3\n" \
  69. " cs %0,%1,%2\n" \
  70. " jl 0b" \
  71. : "=&d" (__old), "=&d" (__new), \
  72. "=Q" (*(unsigned long *) __addr) \
  73. : "d" (__val), "Q" (*(unsigned long *) __addr) \
  74. : "cc");
  75. #else /* CONFIG_64BIT */
  76. #define __BITOPS_ALIGN 7
  77. #define __BITOPS_WORDSIZE 64
  78. #define __BITOPS_OR "ogr"
  79. #define __BITOPS_AND "ngr"
  80. #define __BITOPS_XOR "xgr"
  81. #define __BITOPS_LOOP(__old, __new, __addr, __val, __op_string) \
  82. asm volatile( \
  83. " lg %0,%2\n" \
  84. "0: lgr %1,%0\n" \
  85. __op_string " %1,%3\n" \
  86. " csg %0,%1,%2\n" \
  87. " jl 0b" \
  88. : "=&d" (__old), "=&d" (__new), \
  89. "=Q" (*(unsigned long *) __addr) \
  90. : "d" (__val), "Q" (*(unsigned long *) __addr) \
  91. : "cc");
  92. #endif /* CONFIG_64BIT */
  93. #define __BITOPS_WORDS(bits) (((bits)+__BITOPS_WORDSIZE-1)/__BITOPS_WORDSIZE)
  94. #define __BITOPS_BARRIER() asm volatile("" : : : "memory")
  95. #ifdef CONFIG_SMP
  96. /*
  97. * SMP safe set_bit routine based on compare and swap (CS)
  98. */
  99. static inline void set_bit_cs(unsigned long nr, volatile unsigned long *ptr)
  100. {
  101. unsigned long addr, old, new, mask;
  102. addr = (unsigned long) ptr;
  103. /* calculate address for CS */
  104. addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3;
  105. /* make OR mask */
  106. mask = 1UL << (nr & (__BITOPS_WORDSIZE - 1));
  107. /* Do the atomic update. */
  108. __BITOPS_LOOP(old, new, addr, mask, __BITOPS_OR);
  109. }
  110. /*
  111. * SMP safe clear_bit routine based on compare and swap (CS)
  112. */
  113. static inline void clear_bit_cs(unsigned long nr, volatile unsigned long *ptr)
  114. {
  115. unsigned long addr, old, new, mask;
  116. addr = (unsigned long) ptr;
  117. /* calculate address for CS */
  118. addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3;
  119. /* make AND mask */
  120. mask = ~(1UL << (nr & (__BITOPS_WORDSIZE - 1)));
  121. /* Do the atomic update. */
  122. __BITOPS_LOOP(old, new, addr, mask, __BITOPS_AND);
  123. }
  124. /*
  125. * SMP safe change_bit routine based on compare and swap (CS)
  126. */
  127. static inline void change_bit_cs(unsigned long nr, volatile unsigned long *ptr)
  128. {
  129. unsigned long addr, old, new, mask;
  130. addr = (unsigned long) ptr;
  131. /* calculate address for CS */
  132. addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3;
  133. /* make XOR mask */
  134. mask = 1UL << (nr & (__BITOPS_WORDSIZE - 1));
  135. /* Do the atomic update. */
  136. __BITOPS_LOOP(old, new, addr, mask, __BITOPS_XOR);
  137. }
  138. /*
  139. * SMP safe test_and_set_bit routine based on compare and swap (CS)
  140. */
  141. static inline int
  142. test_and_set_bit_cs(unsigned long nr, volatile unsigned long *ptr)
  143. {
  144. unsigned long addr, old, new, mask;
  145. addr = (unsigned long) ptr;
  146. /* calculate address for CS */
  147. addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3;
  148. /* make OR/test mask */
  149. mask = 1UL << (nr & (__BITOPS_WORDSIZE - 1));
  150. /* Do the atomic update. */
  151. __BITOPS_LOOP(old, new, addr, mask, __BITOPS_OR);
  152. __BITOPS_BARRIER();
  153. return (old & mask) != 0;
  154. }
  155. /*
  156. * SMP safe test_and_clear_bit routine based on compare and swap (CS)
  157. */
  158. static inline int
  159. test_and_clear_bit_cs(unsigned long nr, volatile unsigned long *ptr)
  160. {
  161. unsigned long addr, old, new, mask;
  162. addr = (unsigned long) ptr;
  163. /* calculate address for CS */
  164. addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3;
  165. /* make AND/test mask */
  166. mask = ~(1UL << (nr & (__BITOPS_WORDSIZE - 1)));
  167. /* Do the atomic update. */
  168. __BITOPS_LOOP(old, new, addr, mask, __BITOPS_AND);
  169. __BITOPS_BARRIER();
  170. return (old ^ new) != 0;
  171. }
  172. /*
  173. * SMP safe test_and_change_bit routine based on compare and swap (CS)
  174. */
  175. static inline int
  176. test_and_change_bit_cs(unsigned long nr, volatile unsigned long *ptr)
  177. {
  178. unsigned long addr, old, new, mask;
  179. addr = (unsigned long) ptr;
  180. /* calculate address for CS */
  181. addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3;
  182. /* make XOR/test mask */
  183. mask = 1UL << (nr & (__BITOPS_WORDSIZE - 1));
  184. /* Do the atomic update. */
  185. __BITOPS_LOOP(old, new, addr, mask, __BITOPS_XOR);
  186. __BITOPS_BARRIER();
  187. return (old & mask) != 0;
  188. }
  189. #endif /* CONFIG_SMP */
  190. /*
  191. * fast, non-SMP set_bit routine
  192. */
  193. static inline void __set_bit(unsigned long nr, volatile unsigned long *ptr)
  194. {
  195. unsigned long addr;
  196. addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
  197. asm volatile(
  198. " oc %O0(1,%R0),%1"
  199. : "=Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7]) : "cc" );
  200. }
  201. static inline void
  202. __constant_set_bit(const unsigned long nr, volatile unsigned long *ptr)
  203. {
  204. unsigned long addr;
  205. addr = ((unsigned long) ptr) + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
  206. *(unsigned char *) addr |= 1 << (nr & 7);
  207. }
  208. #define set_bit_simple(nr,addr) \
  209. (__builtin_constant_p((nr)) ? \
  210. __constant_set_bit((nr),(addr)) : \
  211. __set_bit((nr),(addr)) )
  212. /*
  213. * fast, non-SMP clear_bit routine
  214. */
  215. static inline void
  216. __clear_bit(unsigned long nr, volatile unsigned long *ptr)
  217. {
  218. unsigned long addr;
  219. addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
  220. asm volatile(
  221. " nc %O0(1,%R0),%1"
  222. : "=Q" (*(char *) addr) : "Q" (_ni_bitmap[nr & 7]) : "cc" );
  223. }
  224. static inline void
  225. __constant_clear_bit(const unsigned long nr, volatile unsigned long *ptr)
  226. {
  227. unsigned long addr;
  228. addr = ((unsigned long) ptr) + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
  229. *(unsigned char *) addr &= ~(1 << (nr & 7));
  230. }
  231. #define clear_bit_simple(nr,addr) \
  232. (__builtin_constant_p((nr)) ? \
  233. __constant_clear_bit((nr),(addr)) : \
  234. __clear_bit((nr),(addr)) )
  235. /*
  236. * fast, non-SMP change_bit routine
  237. */
  238. static inline void __change_bit(unsigned long nr, volatile unsigned long *ptr)
  239. {
  240. unsigned long addr;
  241. addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
  242. asm volatile(
  243. " xc %O0(1,%R0),%1"
  244. : "=Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7]) : "cc" );
  245. }
  246. static inline void
  247. __constant_change_bit(const unsigned long nr, volatile unsigned long *ptr)
  248. {
  249. unsigned long addr;
  250. addr = ((unsigned long) ptr) + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
  251. *(unsigned char *) addr ^= 1 << (nr & 7);
  252. }
  253. #define change_bit_simple(nr,addr) \
  254. (__builtin_constant_p((nr)) ? \
  255. __constant_change_bit((nr),(addr)) : \
  256. __change_bit((nr),(addr)) )
  257. /*
  258. * fast, non-SMP test_and_set_bit routine
  259. */
  260. static inline int
  261. test_and_set_bit_simple(unsigned long nr, volatile unsigned long *ptr)
  262. {
  263. unsigned long addr;
  264. unsigned char ch;
  265. addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
  266. ch = *(unsigned char *) addr;
  267. asm volatile(
  268. " oc %O0(1,%R0),%1"
  269. : "=Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7])
  270. : "cc", "memory");
  271. return (ch >> (nr & 7)) & 1;
  272. }
  273. #define __test_and_set_bit(X,Y) test_and_set_bit_simple(X,Y)
  274. /*
  275. * fast, non-SMP test_and_clear_bit routine
  276. */
  277. static inline int
  278. test_and_clear_bit_simple(unsigned long nr, volatile unsigned long *ptr)
  279. {
  280. unsigned long addr;
  281. unsigned char ch;
  282. addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
  283. ch = *(unsigned char *) addr;
  284. asm volatile(
  285. " nc %O0(1,%R0),%1"
  286. : "=Q" (*(char *) addr) : "Q" (_ni_bitmap[nr & 7])
  287. : "cc", "memory");
  288. return (ch >> (nr & 7)) & 1;
  289. }
  290. #define __test_and_clear_bit(X,Y) test_and_clear_bit_simple(X,Y)
  291. /*
  292. * fast, non-SMP test_and_change_bit routine
  293. */
  294. static inline int
  295. test_and_change_bit_simple(unsigned long nr, volatile unsigned long *ptr)
  296. {
  297. unsigned long addr;
  298. unsigned char ch;
  299. addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
  300. ch = *(unsigned char *) addr;
  301. asm volatile(
  302. " xc %O0(1,%R0),%1"
  303. : "=Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7])
  304. : "cc", "memory");
  305. return (ch >> (nr & 7)) & 1;
  306. }
  307. #define __test_and_change_bit(X,Y) test_and_change_bit_simple(X,Y)
  308. #ifdef CONFIG_SMP
  309. #define set_bit set_bit_cs
  310. #define clear_bit clear_bit_cs
  311. #define change_bit change_bit_cs
  312. #define test_and_set_bit test_and_set_bit_cs
  313. #define test_and_clear_bit test_and_clear_bit_cs
  314. #define test_and_change_bit test_and_change_bit_cs
  315. #else
  316. #define set_bit set_bit_simple
  317. #define clear_bit clear_bit_simple
  318. #define change_bit change_bit_simple
  319. #define test_and_set_bit test_and_set_bit_simple
  320. #define test_and_clear_bit test_and_clear_bit_simple
  321. #define test_and_change_bit test_and_change_bit_simple
  322. #endif
  323. /*
  324. * This routine doesn't need to be atomic.
  325. */
  326. static inline int __test_bit(unsigned long nr, const volatile unsigned long *ptr)
  327. {
  328. unsigned long addr;
  329. unsigned char ch;
  330. addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
  331. ch = *(volatile unsigned char *) addr;
  332. return (ch >> (nr & 7)) & 1;
  333. }
  334. static inline int
  335. __constant_test_bit(unsigned long nr, const volatile unsigned long *addr) {
  336. return (((volatile char *) addr)
  337. [(nr^(__BITOPS_WORDSIZE-8))>>3] & (1<<(nr&7))) != 0;
  338. }
  339. #define test_bit(nr,addr) \
  340. (__builtin_constant_p((nr)) ? \
  341. __constant_test_bit((nr),(addr)) : \
  342. __test_bit((nr),(addr)) )
  343. /*
  344. * Optimized find bit helper functions.
  345. */
  346. /**
  347. * __ffz_word_loop - find byte offset of first long != -1UL
  348. * @addr: pointer to array of unsigned long
  349. * @size: size of the array in bits
  350. */
  351. static inline unsigned long __ffz_word_loop(const unsigned long *addr,
  352. unsigned long size)
  353. {
  354. typedef struct { long _[__BITOPS_WORDS(size)]; } addrtype;
  355. unsigned long bytes = 0;
  356. asm volatile(
  357. #ifndef CONFIG_64BIT
  358. " ahi %1,-1\n"
  359. " sra %1,5\n"
  360. " jz 1f\n"
  361. "0: c %2,0(%0,%3)\n"
  362. " jne 1f\n"
  363. " la %0,4(%0)\n"
  364. " brct %1,0b\n"
  365. "1:\n"
  366. #else
  367. " aghi %1,-1\n"
  368. " srag %1,%1,6\n"
  369. " jz 1f\n"
  370. "0: cg %2,0(%0,%3)\n"
  371. " jne 1f\n"
  372. " la %0,8(%0)\n"
  373. " brct %1,0b\n"
  374. "1:\n"
  375. #endif
  376. : "+&a" (bytes), "+&d" (size)
  377. : "d" (-1UL), "a" (addr), "m" (*(addrtype *) addr)
  378. : "cc" );
  379. return bytes;
  380. }
  381. /**
  382. * __ffs_word_loop - find byte offset of first long != 0UL
  383. * @addr: pointer to array of unsigned long
  384. * @size: size of the array in bits
  385. */
  386. static inline unsigned long __ffs_word_loop(const unsigned long *addr,
  387. unsigned long size)
  388. {
  389. typedef struct { long _[__BITOPS_WORDS(size)]; } addrtype;
  390. unsigned long bytes = 0;
  391. asm volatile(
  392. #ifndef CONFIG_64BIT
  393. " ahi %1,-1\n"
  394. " sra %1,5\n"
  395. " jz 1f\n"
  396. "0: c %2,0(%0,%3)\n"
  397. " jne 1f\n"
  398. " la %0,4(%0)\n"
  399. " brct %1,0b\n"
  400. "1:\n"
  401. #else
  402. " aghi %1,-1\n"
  403. " srag %1,%1,6\n"
  404. " jz 1f\n"
  405. "0: cg %2,0(%0,%3)\n"
  406. " jne 1f\n"
  407. " la %0,8(%0)\n"
  408. " brct %1,0b\n"
  409. "1:\n"
  410. #endif
  411. : "+&a" (bytes), "+&a" (size)
  412. : "d" (0UL), "a" (addr), "m" (*(addrtype *) addr)
  413. : "cc" );
  414. return bytes;
  415. }
  416. /**
  417. * __ffz_word - add number of the first unset bit
  418. * @nr: base value the bit number is added to
  419. * @word: the word that is searched for unset bits
  420. */
  421. static inline unsigned long __ffz_word(unsigned long nr, unsigned long word)
  422. {
  423. #ifdef CONFIG_64BIT
  424. if ((word & 0xffffffff) == 0xffffffff) {
  425. word >>= 32;
  426. nr += 32;
  427. }
  428. #endif
  429. if ((word & 0xffff) == 0xffff) {
  430. word >>= 16;
  431. nr += 16;
  432. }
  433. if ((word & 0xff) == 0xff) {
  434. word >>= 8;
  435. nr += 8;
  436. }
  437. return nr + _zb_findmap[(unsigned char) word];
  438. }
  439. /**
  440. * __ffs_word - add number of the first set bit
  441. * @nr: base value the bit number is added to
  442. * @word: the word that is searched for set bits
  443. */
  444. static inline unsigned long __ffs_word(unsigned long nr, unsigned long word)
  445. {
  446. #ifdef CONFIG_64BIT
  447. if ((word & 0xffffffff) == 0) {
  448. word >>= 32;
  449. nr += 32;
  450. }
  451. #endif
  452. if ((word & 0xffff) == 0) {
  453. word >>= 16;
  454. nr += 16;
  455. }
  456. if ((word & 0xff) == 0) {
  457. word >>= 8;
  458. nr += 8;
  459. }
  460. return nr + _sb_findmap[(unsigned char) word];
  461. }
  462. /**
  463. * __load_ulong_be - load big endian unsigned long
  464. * @p: pointer to array of unsigned long
  465. * @offset: byte offset of source value in the array
  466. */
  467. static inline unsigned long __load_ulong_be(const unsigned long *p,
  468. unsigned long offset)
  469. {
  470. p = (unsigned long *)((unsigned long) p + offset);
  471. return *p;
  472. }
  473. /**
  474. * __load_ulong_le - load little endian unsigned long
  475. * @p: pointer to array of unsigned long
  476. * @offset: byte offset of source value in the array
  477. */
  478. static inline unsigned long __load_ulong_le(const unsigned long *p,
  479. unsigned long offset)
  480. {
  481. unsigned long word;
  482. p = (unsigned long *)((unsigned long) p + offset);
  483. #ifndef CONFIG_64BIT
  484. asm volatile(
  485. " ic %0,%O1(%R1)\n"
  486. " icm %0,2,%O1+1(%R1)\n"
  487. " icm %0,4,%O1+2(%R1)\n"
  488. " icm %0,8,%O1+3(%R1)"
  489. : "=&d" (word) : "Q" (*p) : "cc");
  490. #else
  491. asm volatile(
  492. " lrvg %0,%1"
  493. : "=d" (word) : "m" (*p) );
  494. #endif
  495. return word;
  496. }
  497. /*
  498. * The various find bit functions.
  499. */
  500. /*
  501. * ffz - find first zero in word.
  502. * @word: The word to search
  503. *
  504. * Undefined if no zero exists, so code should check against ~0UL first.
  505. */
  506. static inline unsigned long ffz(unsigned long word)
  507. {
  508. return __ffz_word(0, word);
  509. }
  510. /**
  511. * __ffs - find first bit in word.
  512. * @word: The word to search
  513. *
  514. * Undefined if no bit exists, so code should check against 0 first.
  515. */
  516. static inline unsigned long __ffs (unsigned long word)
  517. {
  518. return __ffs_word(0, word);
  519. }
  520. /**
  521. * ffs - find first bit set
  522. * @x: the word to search
  523. *
  524. * This is defined the same way as
  525. * the libc and compiler builtin ffs routines, therefore
  526. * differs in spirit from the above ffz (man ffs).
  527. */
  528. static inline int ffs(int x)
  529. {
  530. if (!x)
  531. return 0;
  532. return __ffs_word(1, x);
  533. }
  534. /**
  535. * find_first_zero_bit - find the first zero bit in a memory region
  536. * @addr: The address to start the search at
  537. * @size: The maximum size to search
  538. *
  539. * Returns the bit-number of the first zero bit, not the number of the byte
  540. * containing a bit.
  541. */
  542. static inline unsigned long find_first_zero_bit(const unsigned long *addr,
  543. unsigned long size)
  544. {
  545. unsigned long bytes, bits;
  546. if (!size)
  547. return 0;
  548. bytes = __ffz_word_loop(addr, size);
  549. bits = __ffz_word(bytes*8, __load_ulong_be(addr, bytes));
  550. return (bits < size) ? bits : size;
  551. }
  552. #define find_first_zero_bit find_first_zero_bit
  553. /**
  554. * find_first_bit - find the first set bit in a memory region
  555. * @addr: The address to start the search at
  556. * @size: The maximum size to search
  557. *
  558. * Returns the bit-number of the first set bit, not the number of the byte
  559. * containing a bit.
  560. */
  561. static inline unsigned long find_first_bit(const unsigned long * addr,
  562. unsigned long size)
  563. {
  564. unsigned long bytes, bits;
  565. if (!size)
  566. return 0;
  567. bytes = __ffs_word_loop(addr, size);
  568. bits = __ffs_word(bytes*8, __load_ulong_be(addr, bytes));
  569. return (bits < size) ? bits : size;
  570. }
  571. #define find_first_bit find_first_bit
  572. /**
  573. * find_next_zero_bit - find the first zero bit in a memory region
  574. * @addr: The address to base the search on
  575. * @offset: The bitnumber to start searching at
  576. * @size: The maximum size to search
  577. */
  578. static inline int find_next_zero_bit (const unsigned long * addr,
  579. unsigned long size,
  580. unsigned long offset)
  581. {
  582. const unsigned long *p;
  583. unsigned long bit, set;
  584. if (offset >= size)
  585. return size;
  586. bit = offset & (__BITOPS_WORDSIZE - 1);
  587. offset -= bit;
  588. size -= offset;
  589. p = addr + offset / __BITOPS_WORDSIZE;
  590. if (bit) {
  591. /*
  592. * __ffz_word returns __BITOPS_WORDSIZE
  593. * if no zero bit is present in the word.
  594. */
  595. set = __ffz_word(bit, *p >> bit);
  596. if (set >= size)
  597. return size + offset;
  598. if (set < __BITOPS_WORDSIZE)
  599. return set + offset;
  600. offset += __BITOPS_WORDSIZE;
  601. size -= __BITOPS_WORDSIZE;
  602. p++;
  603. }
  604. return offset + find_first_zero_bit(p, size);
  605. }
  606. #define find_next_zero_bit find_next_zero_bit
  607. /**
  608. * find_next_bit - find the first set bit in a memory region
  609. * @addr: The address to base the search on
  610. * @offset: The bitnumber to start searching at
  611. * @size: The maximum size to search
  612. */
  613. static inline int find_next_bit (const unsigned long * addr,
  614. unsigned long size,
  615. unsigned long offset)
  616. {
  617. const unsigned long *p;
  618. unsigned long bit, set;
  619. if (offset >= size)
  620. return size;
  621. bit = offset & (__BITOPS_WORDSIZE - 1);
  622. offset -= bit;
  623. size -= offset;
  624. p = addr + offset / __BITOPS_WORDSIZE;
  625. if (bit) {
  626. /*
  627. * __ffs_word returns __BITOPS_WORDSIZE
  628. * if no one bit is present in the word.
  629. */
  630. set = __ffs_word(0, *p & (~0UL << bit));
  631. if (set >= size)
  632. return size + offset;
  633. if (set < __BITOPS_WORDSIZE)
  634. return set + offset;
  635. offset += __BITOPS_WORDSIZE;
  636. size -= __BITOPS_WORDSIZE;
  637. p++;
  638. }
  639. return offset + find_first_bit(p, size);
  640. }
  641. #define find_next_bit find_next_bit
  642. /*
  643. * Every architecture must define this function. It's the fastest
  644. * way of searching a 140-bit bitmap where the first 100 bits are
  645. * unlikely to be set. It's guaranteed that at least one of the 140
  646. * bits is cleared.
  647. */
  648. static inline int sched_find_first_bit(unsigned long *b)
  649. {
  650. return find_first_bit(b, 140);
  651. }
  652. #include <asm-generic/bitops/fls.h>
  653. #include <asm-generic/bitops/__fls.h>
  654. #include <asm-generic/bitops/fls64.h>
  655. #include <asm-generic/bitops/hweight.h>
  656. #include <asm-generic/bitops/lock.h>
  657. /*
  658. * ATTENTION: intel byte ordering convention for ext2 and minix !!
  659. * bit 0 is the LSB of addr; bit 31 is the MSB of addr;
  660. * bit 32 is the LSB of (addr+4).
  661. * That combined with the little endian byte order of Intel gives the
  662. * following bit order in memory:
  663. * 07 06 05 04 03 02 01 00 15 14 13 12 11 10 09 08 \
  664. * 23 22 21 20 19 18 17 16 31 30 29 28 27 26 25 24
  665. */
  666. static inline int find_first_zero_bit_le(void *vaddr, unsigned int size)
  667. {
  668. unsigned long bytes, bits;
  669. if (!size)
  670. return 0;
  671. bytes = __ffz_word_loop(vaddr, size);
  672. bits = __ffz_word(bytes*8, __load_ulong_le(vaddr, bytes));
  673. return (bits < size) ? bits : size;
  674. }
  675. #define find_first_zero_bit_le find_first_zero_bit_le
  676. static inline int find_next_zero_bit_le(void *vaddr, unsigned long size,
  677. unsigned long offset)
  678. {
  679. unsigned long *addr = vaddr, *p;
  680. unsigned long bit, set;
  681. if (offset >= size)
  682. return size;
  683. bit = offset & (__BITOPS_WORDSIZE - 1);
  684. offset -= bit;
  685. size -= offset;
  686. p = addr + offset / __BITOPS_WORDSIZE;
  687. if (bit) {
  688. /*
  689. * s390 version of ffz returns __BITOPS_WORDSIZE
  690. * if no zero bit is present in the word.
  691. */
  692. set = __ffz_word(bit, __load_ulong_le(p, 0) >> bit);
  693. if (set >= size)
  694. return size + offset;
  695. if (set < __BITOPS_WORDSIZE)
  696. return set + offset;
  697. offset += __BITOPS_WORDSIZE;
  698. size -= __BITOPS_WORDSIZE;
  699. p++;
  700. }
  701. return offset + find_first_zero_bit_le(p, size);
  702. }
  703. #define find_next_zero_bit_le find_next_zero_bit_le
  704. static inline unsigned long find_first_bit_le(void *vaddr, unsigned long size)
  705. {
  706. unsigned long bytes, bits;
  707. if (!size)
  708. return 0;
  709. bytes = __ffs_word_loop(vaddr, size);
  710. bits = __ffs_word(bytes*8, __load_ulong_le(vaddr, bytes));
  711. return (bits < size) ? bits : size;
  712. }
  713. #define find_first_bit_le find_first_bit_le
  714. static inline int find_next_bit_le(void *vaddr, unsigned long size,
  715. unsigned long offset)
  716. {
  717. unsigned long *addr = vaddr, *p;
  718. unsigned long bit, set;
  719. if (offset >= size)
  720. return size;
  721. bit = offset & (__BITOPS_WORDSIZE - 1);
  722. offset -= bit;
  723. size -= offset;
  724. p = addr + offset / __BITOPS_WORDSIZE;
  725. if (bit) {
  726. /*
  727. * s390 version of ffz returns __BITOPS_WORDSIZE
  728. * if no zero bit is present in the word.
  729. */
  730. set = __ffs_word(0, __load_ulong_le(p, 0) & (~0UL << bit));
  731. if (set >= size)
  732. return size + offset;
  733. if (set < __BITOPS_WORDSIZE)
  734. return set + offset;
  735. offset += __BITOPS_WORDSIZE;
  736. size -= __BITOPS_WORDSIZE;
  737. p++;
  738. }
  739. return offset + find_first_bit_le(p, size);
  740. }
  741. #define find_next_bit_le find_next_bit_le
  742. #include <asm-generic/bitops/le.h>
  743. #include <asm-generic/bitops/ext2-atomic-setbit.h>
  744. #endif /* _S390_BITOPS_H */