bitops.h 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958
  1. #ifndef _S390_BITOPS_H
  2. #define _S390_BITOPS_H
  3. /*
  4. * include/asm-s390/bitops.h
  5. *
  6. * S390 version
  7. * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
  8. * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
  9. *
  10. * Derived from "include/asm-i386/bitops.h"
  11. * Copyright (C) 1992, Linus Torvalds
  12. *
  13. */
  14. #ifdef __KERNEL__
  15. #include <linux/compiler.h>
  16. /*
  17. * 32 bit bitops format:
  18. * bit 0 is the LSB of *addr; bit 31 is the MSB of *addr;
  19. * bit 32 is the LSB of *(addr+4). That combined with the
  20. * big endian byte order on S390 give the following bit
  21. * order in memory:
  22. * 1f 1e 1d 1c 1b 1a 19 18 17 16 15 14 13 12 11 10 \
  23. * 0f 0e 0d 0c 0b 0a 09 08 07 06 05 04 03 02 01 00
  24. * after that follows the next long with bit numbers
  25. * 3f 3e 3d 3c 3b 3a 39 38 37 36 35 34 33 32 31 30
  26. * 2f 2e 2d 2c 2b 2a 29 28 27 26 25 24 23 22 21 20
  27. * The reason for this bit ordering is the fact that
  28. * in the architecture independent code bits operations
  29. * of the form "flags |= (1 << bitnr)" are used INTERMIXED
  30. * with operation of the form "set_bit(bitnr, flags)".
  31. *
  32. * 64 bit bitops format:
  33. * bit 0 is the LSB of *addr; bit 63 is the MSB of *addr;
  34. * bit 64 is the LSB of *(addr+8). That combined with the
  35. * big endian byte order on S390 give the following bit
  36. * order in memory:
  37. * 3f 3e 3d 3c 3b 3a 39 38 37 36 35 34 33 32 31 30
  38. * 2f 2e 2d 2c 2b 2a 29 28 27 26 25 24 23 22 21 20
  39. * 1f 1e 1d 1c 1b 1a 19 18 17 16 15 14 13 12 11 10
  40. * 0f 0e 0d 0c 0b 0a 09 08 07 06 05 04 03 02 01 00
  41. * after that follows the next long with bit numbers
  42. * 7f 7e 7d 7c 7b 7a 79 78 77 76 75 74 73 72 71 70
  43. * 6f 6e 6d 6c 6b 6a 69 68 67 66 65 64 63 62 61 60
  44. * 5f 5e 5d 5c 5b 5a 59 58 57 56 55 54 53 52 51 50
  45. * 4f 4e 4d 4c 4b 4a 49 48 47 46 45 44 43 42 41 40
  46. * The reason for this bit ordering is the fact that
  47. * in the architecture independent code bits operations
  48. * of the form "flags |= (1 << bitnr)" are used INTERMIXED
  49. * with operation of the form "set_bit(bitnr, flags)".
  50. */
  51. /* bitmap tables from arch/S390/kernel/bitmap.S */
  52. extern const char _oi_bitmap[];
  53. extern const char _ni_bitmap[];
  54. extern const char _zb_findmap[];
  55. extern const char _sb_findmap[];
  56. #ifndef __s390x__
  57. #define __BITOPS_ALIGN 3
  58. #define __BITOPS_WORDSIZE 32
  59. #define __BITOPS_OR "or"
  60. #define __BITOPS_AND "nr"
  61. #define __BITOPS_XOR "xr"
  62. #define __BITOPS_LOOP(__old, __new, __addr, __val, __op_string) \
  63. __asm__ __volatile__(" l %0,0(%4)\n" \
  64. "0: lr %1,%0\n" \
  65. __op_string " %1,%3\n" \
  66. " cs %0,%1,0(%4)\n" \
  67. " jl 0b" \
  68. : "=&d" (__old), "=&d" (__new), \
  69. "=m" (*(unsigned long *) __addr) \
  70. : "d" (__val), "a" (__addr), \
  71. "m" (*(unsigned long *) __addr) : "cc" );
  72. #else /* __s390x__ */
  73. #define __BITOPS_ALIGN 7
  74. #define __BITOPS_WORDSIZE 64
  75. #define __BITOPS_OR "ogr"
  76. #define __BITOPS_AND "ngr"
  77. #define __BITOPS_XOR "xgr"
  78. #define __BITOPS_LOOP(__old, __new, __addr, __val, __op_string) \
  79. __asm__ __volatile__(" lg %0,0(%4)\n" \
  80. "0: lgr %1,%0\n" \
  81. __op_string " %1,%3\n" \
  82. " csg %0,%1,0(%4)\n" \
  83. " jl 0b" \
  84. : "=&d" (__old), "=&d" (__new), \
  85. "=m" (*(unsigned long *) __addr) \
  86. : "d" (__val), "a" (__addr), \
  87. "m" (*(unsigned long *) __addr) : "cc" );
  88. #endif /* __s390x__ */
  89. #define __BITOPS_WORDS(bits) (((bits)+__BITOPS_WORDSIZE-1)/__BITOPS_WORDSIZE)
  90. #define __BITOPS_BARRIER() __asm__ __volatile__ ( "" : : : "memory" )
  91. #ifdef CONFIG_SMP
  92. /*
  93. * SMP safe set_bit routine based on compare and swap (CS)
  94. */
  95. static inline void set_bit_cs(unsigned long nr, volatile unsigned long *ptr)
  96. {
  97. unsigned long addr, old, new, mask;
  98. addr = (unsigned long) ptr;
  99. /* calculate address for CS */
  100. addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3;
  101. /* make OR mask */
  102. mask = 1UL << (nr & (__BITOPS_WORDSIZE - 1));
  103. /* Do the atomic update. */
  104. __BITOPS_LOOP(old, new, addr, mask, __BITOPS_OR);
  105. }
  106. /*
  107. * SMP safe clear_bit routine based on compare and swap (CS)
  108. */
  109. static inline void clear_bit_cs(unsigned long nr, volatile unsigned long *ptr)
  110. {
  111. unsigned long addr, old, new, mask;
  112. addr = (unsigned long) ptr;
  113. /* calculate address for CS */
  114. addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3;
  115. /* make AND mask */
  116. mask = ~(1UL << (nr & (__BITOPS_WORDSIZE - 1)));
  117. /* Do the atomic update. */
  118. __BITOPS_LOOP(old, new, addr, mask, __BITOPS_AND);
  119. }
  120. /*
  121. * SMP safe change_bit routine based on compare and swap (CS)
  122. */
  123. static inline void change_bit_cs(unsigned long nr, volatile unsigned long *ptr)
  124. {
  125. unsigned long addr, old, new, mask;
  126. addr = (unsigned long) ptr;
  127. /* calculate address for CS */
  128. addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3;
  129. /* make XOR mask */
  130. mask = 1UL << (nr & (__BITOPS_WORDSIZE - 1));
  131. /* Do the atomic update. */
  132. __BITOPS_LOOP(old, new, addr, mask, __BITOPS_XOR);
  133. }
  134. /*
  135. * SMP safe test_and_set_bit routine based on compare and swap (CS)
  136. */
  137. static inline int
  138. test_and_set_bit_cs(unsigned long nr, volatile unsigned long *ptr)
  139. {
  140. unsigned long addr, old, new, mask;
  141. addr = (unsigned long) ptr;
  142. /* calculate address for CS */
  143. addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3;
  144. /* make OR/test mask */
  145. mask = 1UL << (nr & (__BITOPS_WORDSIZE - 1));
  146. /* Do the atomic update. */
  147. __BITOPS_LOOP(old, new, addr, mask, __BITOPS_OR);
  148. __BITOPS_BARRIER();
  149. return (old & mask) != 0;
  150. }
  151. /*
  152. * SMP safe test_and_clear_bit routine based on compare and swap (CS)
  153. */
  154. static inline int
  155. test_and_clear_bit_cs(unsigned long nr, volatile unsigned long *ptr)
  156. {
  157. unsigned long addr, old, new, mask;
  158. addr = (unsigned long) ptr;
  159. /* calculate address for CS */
  160. addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3;
  161. /* make AND/test mask */
  162. mask = ~(1UL << (nr & (__BITOPS_WORDSIZE - 1)));
  163. /* Do the atomic update. */
  164. __BITOPS_LOOP(old, new, addr, mask, __BITOPS_AND);
  165. __BITOPS_BARRIER();
  166. return (old ^ new) != 0;
  167. }
  168. /*
  169. * SMP safe test_and_change_bit routine based on compare and swap (CS)
  170. */
  171. static inline int
  172. test_and_change_bit_cs(unsigned long nr, volatile unsigned long *ptr)
  173. {
  174. unsigned long addr, old, new, mask;
  175. addr = (unsigned long) ptr;
  176. /* calculate address for CS */
  177. addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3;
  178. /* make XOR/test mask */
  179. mask = 1UL << (nr & (__BITOPS_WORDSIZE - 1));
  180. /* Do the atomic update. */
  181. __BITOPS_LOOP(old, new, addr, mask, __BITOPS_XOR);
  182. __BITOPS_BARRIER();
  183. return (old & mask) != 0;
  184. }
  185. #endif /* CONFIG_SMP */
  186. /*
  187. * fast, non-SMP set_bit routine
  188. */
  189. static inline void __set_bit(unsigned long nr, volatile unsigned long *ptr)
  190. {
  191. unsigned long addr;
  192. addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
  193. asm volatile("oc 0(1,%1),0(%2)"
  194. : "=m" (*(char *) addr)
  195. : "a" (addr), "a" (_oi_bitmap + (nr & 7)),
  196. "m" (*(char *) addr) : "cc" );
  197. }
  198. static inline void
  199. __constant_set_bit(const unsigned long nr, volatile unsigned long *ptr)
  200. {
  201. unsigned long addr;
  202. addr = ((unsigned long) ptr) + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
  203. switch (nr&7) {
  204. case 0:
  205. asm volatile ("oi 0(%1),0x01" : "=m" (*(char *) addr)
  206. : "a" (addr), "m" (*(char *) addr) : "cc" );
  207. break;
  208. case 1:
  209. asm volatile ("oi 0(%1),0x02" : "=m" (*(char *) addr)
  210. : "a" (addr), "m" (*(char *) addr) : "cc" );
  211. break;
  212. case 2:
  213. asm volatile ("oi 0(%1),0x04" : "=m" (*(char *) addr)
  214. : "a" (addr), "m" (*(char *) addr) : "cc" );
  215. break;
  216. case 3:
  217. asm volatile ("oi 0(%1),0x08" : "=m" (*(char *) addr)
  218. : "a" (addr), "m" (*(char *) addr) : "cc" );
  219. break;
  220. case 4:
  221. asm volatile ("oi 0(%1),0x10" : "=m" (*(char *) addr)
  222. : "a" (addr), "m" (*(char *) addr) : "cc" );
  223. break;
  224. case 5:
  225. asm volatile ("oi 0(%1),0x20" : "=m" (*(char *) addr)
  226. : "a" (addr), "m" (*(char *) addr) : "cc" );
  227. break;
  228. case 6:
  229. asm volatile ("oi 0(%1),0x40" : "=m" (*(char *) addr)
  230. : "a" (addr), "m" (*(char *) addr) : "cc" );
  231. break;
  232. case 7:
  233. asm volatile ("oi 0(%1),0x80" : "=m" (*(char *) addr)
  234. : "a" (addr), "m" (*(char *) addr) : "cc" );
  235. break;
  236. }
  237. }
  238. #define set_bit_simple(nr,addr) \
  239. (__builtin_constant_p((nr)) ? \
  240. __constant_set_bit((nr),(addr)) : \
  241. __set_bit((nr),(addr)) )
  242. /*
  243. * fast, non-SMP clear_bit routine
  244. */
  245. static inline void
  246. __clear_bit(unsigned long nr, volatile unsigned long *ptr)
  247. {
  248. unsigned long addr;
  249. addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
  250. asm volatile("nc 0(1,%1),0(%2)"
  251. : "=m" (*(char *) addr)
  252. : "a" (addr), "a" (_ni_bitmap + (nr & 7)),
  253. "m" (*(char *) addr) : "cc" );
  254. }
  255. static inline void
  256. __constant_clear_bit(const unsigned long nr, volatile unsigned long *ptr)
  257. {
  258. unsigned long addr;
  259. addr = ((unsigned long) ptr) + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
  260. switch (nr&7) {
  261. case 0:
  262. asm volatile ("ni 0(%1),0xFE" : "=m" (*(char *) addr)
  263. : "a" (addr), "m" (*(char *) addr) : "cc" );
  264. break;
  265. case 1:
  266. asm volatile ("ni 0(%1),0xFD": "=m" (*(char *) addr)
  267. : "a" (addr), "m" (*(char *) addr) : "cc" );
  268. break;
  269. case 2:
  270. asm volatile ("ni 0(%1),0xFB" : "=m" (*(char *) addr)
  271. : "a" (addr), "m" (*(char *) addr) : "cc" );
  272. break;
  273. case 3:
  274. asm volatile ("ni 0(%1),0xF7" : "=m" (*(char *) addr)
  275. : "a" (addr), "m" (*(char *) addr) : "cc" );
  276. break;
  277. case 4:
  278. asm volatile ("ni 0(%1),0xEF" : "=m" (*(char *) addr)
  279. : "a" (addr), "m" (*(char *) addr) : "cc" );
  280. break;
  281. case 5:
  282. asm volatile ("ni 0(%1),0xDF" : "=m" (*(char *) addr)
  283. : "a" (addr), "m" (*(char *) addr) : "cc" );
  284. break;
  285. case 6:
  286. asm volatile ("ni 0(%1),0xBF" : "=m" (*(char *) addr)
  287. : "a" (addr), "m" (*(char *) addr) : "cc" );
  288. break;
  289. case 7:
  290. asm volatile ("ni 0(%1),0x7F" : "=m" (*(char *) addr)
  291. : "a" (addr), "m" (*(char *) addr) : "cc" );
  292. break;
  293. }
  294. }
  295. #define clear_bit_simple(nr,addr) \
  296. (__builtin_constant_p((nr)) ? \
  297. __constant_clear_bit((nr),(addr)) : \
  298. __clear_bit((nr),(addr)) )
  299. /*
  300. * fast, non-SMP change_bit routine
  301. */
  302. static inline void __change_bit(unsigned long nr, volatile unsigned long *ptr)
  303. {
  304. unsigned long addr;
  305. addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
  306. asm volatile("xc 0(1,%1),0(%2)"
  307. : "=m" (*(char *) addr)
  308. : "a" (addr), "a" (_oi_bitmap + (nr & 7)),
  309. "m" (*(char *) addr) : "cc" );
  310. }
  311. static inline void
  312. __constant_change_bit(const unsigned long nr, volatile unsigned long *ptr)
  313. {
  314. unsigned long addr;
  315. addr = ((unsigned long) ptr) + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
  316. switch (nr&7) {
  317. case 0:
  318. asm volatile ("xi 0(%1),0x01" : "=m" (*(char *) addr)
  319. : "a" (addr), "m" (*(char *) addr) : "cc" );
  320. break;
  321. case 1:
  322. asm volatile ("xi 0(%1),0x02" : "=m" (*(char *) addr)
  323. : "a" (addr), "m" (*(char *) addr) : "cc" );
  324. break;
  325. case 2:
  326. asm volatile ("xi 0(%1),0x04" : "=m" (*(char *) addr)
  327. : "a" (addr), "m" (*(char *) addr) : "cc" );
  328. break;
  329. case 3:
  330. asm volatile ("xi 0(%1),0x08" : "=m" (*(char *) addr)
  331. : "a" (addr), "m" (*(char *) addr) : "cc" );
  332. break;
  333. case 4:
  334. asm volatile ("xi 0(%1),0x10" : "=m" (*(char *) addr)
  335. : "a" (addr), "m" (*(char *) addr) : "cc" );
  336. break;
  337. case 5:
  338. asm volatile ("xi 0(%1),0x20" : "=m" (*(char *) addr)
  339. : "a" (addr), "m" (*(char *) addr) : "cc" );
  340. break;
  341. case 6:
  342. asm volatile ("xi 0(%1),0x40" : "=m" (*(char *) addr)
  343. : "a" (addr), "m" (*(char *) addr) : "cc" );
  344. break;
  345. case 7:
  346. asm volatile ("xi 0(%1),0x80" : "=m" (*(char *) addr)
  347. : "a" (addr), "m" (*(char *) addr) : "cc" );
  348. break;
  349. }
  350. }
  351. #define change_bit_simple(nr,addr) \
  352. (__builtin_constant_p((nr)) ? \
  353. __constant_change_bit((nr),(addr)) : \
  354. __change_bit((nr),(addr)) )
  355. /*
  356. * fast, non-SMP test_and_set_bit routine
  357. */
  358. static inline int
  359. test_and_set_bit_simple(unsigned long nr, volatile unsigned long *ptr)
  360. {
  361. unsigned long addr;
  362. unsigned char ch;
  363. addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
  364. ch = *(unsigned char *) addr;
  365. asm volatile("oc 0(1,%1),0(%2)"
  366. : "=m" (*(char *) addr)
  367. : "a" (addr), "a" (_oi_bitmap + (nr & 7)),
  368. "m" (*(char *) addr) : "cc", "memory" );
  369. return (ch >> (nr & 7)) & 1;
  370. }
  371. #define __test_and_set_bit(X,Y) test_and_set_bit_simple(X,Y)
  372. /*
  373. * fast, non-SMP test_and_clear_bit routine
  374. */
  375. static inline int
  376. test_and_clear_bit_simple(unsigned long nr, volatile unsigned long *ptr)
  377. {
  378. unsigned long addr;
  379. unsigned char ch;
  380. addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
  381. ch = *(unsigned char *) addr;
  382. asm volatile("nc 0(1,%1),0(%2)"
  383. : "=m" (*(char *) addr)
  384. : "a" (addr), "a" (_ni_bitmap + (nr & 7)),
  385. "m" (*(char *) addr) : "cc", "memory" );
  386. return (ch >> (nr & 7)) & 1;
  387. }
  388. #define __test_and_clear_bit(X,Y) test_and_clear_bit_simple(X,Y)
  389. /*
  390. * fast, non-SMP test_and_change_bit routine
  391. */
  392. static inline int
  393. test_and_change_bit_simple(unsigned long nr, volatile unsigned long *ptr)
  394. {
  395. unsigned long addr;
  396. unsigned char ch;
  397. addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
  398. ch = *(unsigned char *) addr;
  399. asm volatile("xc 0(1,%1),0(%2)"
  400. : "=m" (*(char *) addr)
  401. : "a" (addr), "a" (_oi_bitmap + (nr & 7)),
  402. "m" (*(char *) addr) : "cc", "memory" );
  403. return (ch >> (nr & 7)) & 1;
  404. }
  405. #define __test_and_change_bit(X,Y) test_and_change_bit_simple(X,Y)
  406. #ifdef CONFIG_SMP
  407. #define set_bit set_bit_cs
  408. #define clear_bit clear_bit_cs
  409. #define change_bit change_bit_cs
  410. #define test_and_set_bit test_and_set_bit_cs
  411. #define test_and_clear_bit test_and_clear_bit_cs
  412. #define test_and_change_bit test_and_change_bit_cs
  413. #else
  414. #define set_bit set_bit_simple
  415. #define clear_bit clear_bit_simple
  416. #define change_bit change_bit_simple
  417. #define test_and_set_bit test_and_set_bit_simple
  418. #define test_and_clear_bit test_and_clear_bit_simple
  419. #define test_and_change_bit test_and_change_bit_simple
  420. #endif
  421. /*
  422. * This routine doesn't need to be atomic.
  423. */
  424. static inline int __test_bit(unsigned long nr, const volatile unsigned long *ptr)
  425. {
  426. unsigned long addr;
  427. unsigned char ch;
  428. addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
  429. ch = *(volatile unsigned char *) addr;
  430. return (ch >> (nr & 7)) & 1;
  431. }
  432. static inline int
  433. __constant_test_bit(unsigned long nr, const volatile unsigned long *addr) {
  434. return (((volatile char *) addr)
  435. [(nr^(__BITOPS_WORDSIZE-8))>>3] & (1<<(nr&7))) != 0;
  436. }
  437. #define test_bit(nr,addr) \
  438. (__builtin_constant_p((nr)) ? \
  439. __constant_test_bit((nr),(addr)) : \
  440. __test_bit((nr),(addr)) )
  441. /*
  442. * ffz = Find First Zero in word. Undefined if no zero exists,
  443. * so code should check against ~0UL first..
  444. */
  445. static inline unsigned long ffz(unsigned long word)
  446. {
  447. unsigned long bit = 0;
  448. #ifdef __s390x__
  449. if (likely((word & 0xffffffff) == 0xffffffff)) {
  450. word >>= 32;
  451. bit += 32;
  452. }
  453. #endif
  454. if (likely((word & 0xffff) == 0xffff)) {
  455. word >>= 16;
  456. bit += 16;
  457. }
  458. if (likely((word & 0xff) == 0xff)) {
  459. word >>= 8;
  460. bit += 8;
  461. }
  462. return bit + _zb_findmap[word & 0xff];
  463. }
  464. /*
  465. * __ffs = find first bit in word. Undefined if no bit exists,
  466. * so code should check against 0UL first..
  467. */
  468. static inline unsigned long __ffs (unsigned long word)
  469. {
  470. unsigned long bit = 0;
  471. #ifdef __s390x__
  472. if (likely((word & 0xffffffff) == 0)) {
  473. word >>= 32;
  474. bit += 32;
  475. }
  476. #endif
  477. if (likely((word & 0xffff) == 0)) {
  478. word >>= 16;
  479. bit += 16;
  480. }
  481. if (likely((word & 0xff) == 0)) {
  482. word >>= 8;
  483. bit += 8;
  484. }
  485. return bit + _sb_findmap[word & 0xff];
  486. }
  487. /*
  488. * Find-bit routines..
  489. */
  490. #ifndef __s390x__
  491. static inline int
  492. find_first_zero_bit(const unsigned long * addr, unsigned long size)
  493. {
  494. typedef struct { long _[__BITOPS_WORDS(size)]; } addrtype;
  495. unsigned long cmp, count;
  496. unsigned int res;
  497. if (!size)
  498. return 0;
  499. __asm__(" lhi %1,-1\n"
  500. " lr %2,%3\n"
  501. " slr %0,%0\n"
  502. " ahi %2,31\n"
  503. " srl %2,5\n"
  504. "0: c %1,0(%0,%4)\n"
  505. " jne 1f\n"
  506. " la %0,4(%0)\n"
  507. " brct %2,0b\n"
  508. " lr %0,%3\n"
  509. " j 4f\n"
  510. "1: l %2,0(%0,%4)\n"
  511. " sll %0,3\n"
  512. " lhi %1,0xff\n"
  513. " tml %2,0xffff\n"
  514. " jno 2f\n"
  515. " ahi %0,16\n"
  516. " srl %2,16\n"
  517. "2: tml %2,0x00ff\n"
  518. " jno 3f\n"
  519. " ahi %0,8\n"
  520. " srl %2,8\n"
  521. "3: nr %2,%1\n"
  522. " ic %2,0(%2,%5)\n"
  523. " alr %0,%2\n"
  524. "4:"
  525. : "=&a" (res), "=&d" (cmp), "=&a" (count)
  526. : "a" (size), "a" (addr), "a" (&_zb_findmap),
  527. "m" (*(addrtype *) addr) : "cc" );
  528. return (res < size) ? res : size;
  529. }
  530. static inline int
  531. find_first_bit(const unsigned long * addr, unsigned long size)
  532. {
  533. typedef struct { long _[__BITOPS_WORDS(size)]; } addrtype;
  534. unsigned long cmp, count;
  535. unsigned int res;
  536. if (!size)
  537. return 0;
  538. __asm__(" slr %1,%1\n"
  539. " lr %2,%3\n"
  540. " slr %0,%0\n"
  541. " ahi %2,31\n"
  542. " srl %2,5\n"
  543. "0: c %1,0(%0,%4)\n"
  544. " jne 1f\n"
  545. " la %0,4(%0)\n"
  546. " brct %2,0b\n"
  547. " lr %0,%3\n"
  548. " j 4f\n"
  549. "1: l %2,0(%0,%4)\n"
  550. " sll %0,3\n"
  551. " lhi %1,0xff\n"
  552. " tml %2,0xffff\n"
  553. " jnz 2f\n"
  554. " ahi %0,16\n"
  555. " srl %2,16\n"
  556. "2: tml %2,0x00ff\n"
  557. " jnz 3f\n"
  558. " ahi %0,8\n"
  559. " srl %2,8\n"
  560. "3: nr %2,%1\n"
  561. " ic %2,0(%2,%5)\n"
  562. " alr %0,%2\n"
  563. "4:"
  564. : "=&a" (res), "=&d" (cmp), "=&a" (count)
  565. : "a" (size), "a" (addr), "a" (&_sb_findmap),
  566. "m" (*(addrtype *) addr) : "cc" );
  567. return (res < size) ? res : size;
  568. }
  569. #else /* __s390x__ */
  570. static inline unsigned long
  571. find_first_zero_bit(const unsigned long * addr, unsigned long size)
  572. {
  573. typedef struct { long _[__BITOPS_WORDS(size)]; } addrtype;
  574. unsigned long res, cmp, count;
  575. if (!size)
  576. return 0;
  577. __asm__(" lghi %1,-1\n"
  578. " lgr %2,%3\n"
  579. " slgr %0,%0\n"
  580. " aghi %2,63\n"
  581. " srlg %2,%2,6\n"
  582. "0: cg %1,0(%0,%4)\n"
  583. " jne 1f\n"
  584. " la %0,8(%0)\n"
  585. " brct %2,0b\n"
  586. " lgr %0,%3\n"
  587. " j 5f\n"
  588. "1: lg %2,0(%0,%4)\n"
  589. " sllg %0,%0,3\n"
  590. " clr %2,%1\n"
  591. " jne 2f\n"
  592. " aghi %0,32\n"
  593. " srlg %2,%2,32\n"
  594. "2: lghi %1,0xff\n"
  595. " tmll %2,0xffff\n"
  596. " jno 3f\n"
  597. " aghi %0,16\n"
  598. " srl %2,16\n"
  599. "3: tmll %2,0x00ff\n"
  600. " jno 4f\n"
  601. " aghi %0,8\n"
  602. " srl %2,8\n"
  603. "4: ngr %2,%1\n"
  604. " ic %2,0(%2,%5)\n"
  605. " algr %0,%2\n"
  606. "5:"
  607. : "=&a" (res), "=&d" (cmp), "=&a" (count)
  608. : "a" (size), "a" (addr), "a" (&_zb_findmap),
  609. "m" (*(addrtype *) addr) : "cc" );
  610. return (res < size) ? res : size;
  611. }
  612. static inline unsigned long
  613. find_first_bit(const unsigned long * addr, unsigned long size)
  614. {
  615. typedef struct { long _[__BITOPS_WORDS(size)]; } addrtype;
  616. unsigned long res, cmp, count;
  617. if (!size)
  618. return 0;
  619. __asm__(" slgr %1,%1\n"
  620. " lgr %2,%3\n"
  621. " slgr %0,%0\n"
  622. " aghi %2,63\n"
  623. " srlg %2,%2,6\n"
  624. "0: cg %1,0(%0,%4)\n"
  625. " jne 1f\n"
  626. " aghi %0,8\n"
  627. " brct %2,0b\n"
  628. " lgr %0,%3\n"
  629. " j 5f\n"
  630. "1: lg %2,0(%0,%4)\n"
  631. " sllg %0,%0,3\n"
  632. " clr %2,%1\n"
  633. " jne 2f\n"
  634. " aghi %0,32\n"
  635. " srlg %2,%2,32\n"
  636. "2: lghi %1,0xff\n"
  637. " tmll %2,0xffff\n"
  638. " jnz 3f\n"
  639. " aghi %0,16\n"
  640. " srl %2,16\n"
  641. "3: tmll %2,0x00ff\n"
  642. " jnz 4f\n"
  643. " aghi %0,8\n"
  644. " srl %2,8\n"
  645. "4: ngr %2,%1\n"
  646. " ic %2,0(%2,%5)\n"
  647. " algr %0,%2\n"
  648. "5:"
  649. : "=&a" (res), "=&d" (cmp), "=&a" (count)
  650. : "a" (size), "a" (addr), "a" (&_sb_findmap),
  651. "m" (*(addrtype *) addr) : "cc" );
  652. return (res < size) ? res : size;
  653. }
  654. #endif /* __s390x__ */
  655. static inline int
  656. find_next_zero_bit (const unsigned long * addr, unsigned long size,
  657. unsigned long offset)
  658. {
  659. const unsigned long *p;
  660. unsigned long bit, set;
  661. if (offset >= size)
  662. return size;
  663. bit = offset & (__BITOPS_WORDSIZE - 1);
  664. offset -= bit;
  665. size -= offset;
  666. p = addr + offset / __BITOPS_WORDSIZE;
  667. if (bit) {
  668. /*
  669. * s390 version of ffz returns __BITOPS_WORDSIZE
  670. * if no zero bit is present in the word.
  671. */
  672. set = ffz(*p >> bit) + bit;
  673. if (set >= size)
  674. return size + offset;
  675. if (set < __BITOPS_WORDSIZE)
  676. return set + offset;
  677. offset += __BITOPS_WORDSIZE;
  678. size -= __BITOPS_WORDSIZE;
  679. p++;
  680. }
  681. return offset + find_first_zero_bit(p, size);
  682. }
  683. static inline int
  684. find_next_bit (const unsigned long * addr, unsigned long size,
  685. unsigned long offset)
  686. {
  687. const unsigned long *p;
  688. unsigned long bit, set;
  689. if (offset >= size)
  690. return size;
  691. bit = offset & (__BITOPS_WORDSIZE - 1);
  692. offset -= bit;
  693. size -= offset;
  694. p = addr + offset / __BITOPS_WORDSIZE;
  695. if (bit) {
  696. /*
  697. * s390 version of __ffs returns __BITOPS_WORDSIZE
  698. * if no one bit is present in the word.
  699. */
  700. set = __ffs(*p & (~0UL << bit));
  701. if (set >= size)
  702. return size + offset;
  703. if (set < __BITOPS_WORDSIZE)
  704. return set + offset;
  705. offset += __BITOPS_WORDSIZE;
  706. size -= __BITOPS_WORDSIZE;
  707. p++;
  708. }
  709. return offset + find_first_bit(p, size);
  710. }
  711. /*
  712. * Every architecture must define this function. It's the fastest
  713. * way of searching a 140-bit bitmap where the first 100 bits are
  714. * unlikely to be set. It's guaranteed that at least one of the 140
  715. * bits is cleared.
  716. */
  717. static inline int sched_find_first_bit(unsigned long *b)
  718. {
  719. return find_first_bit(b, 140);
  720. }
  721. #include <asm-generic/bitops/ffs.h>
  722. #include <asm-generic/bitops/fls.h>
  723. #include <asm-generic/bitops/fls64.h>
  724. #include <asm-generic/bitops/hweight.h>
  725. /*
  726. * ATTENTION: intel byte ordering convention for ext2 and minix !!
  727. * bit 0 is the LSB of addr; bit 31 is the MSB of addr;
  728. * bit 32 is the LSB of (addr+4).
  729. * That combined with the little endian byte order of Intel gives the
  730. * following bit order in memory:
  731. * 07 06 05 04 03 02 01 00 15 14 13 12 11 10 09 08 \
  732. * 23 22 21 20 19 18 17 16 31 30 29 28 27 26 25 24
  733. */
  734. #define ext2_set_bit(nr, addr) \
  735. __test_and_set_bit((nr)^(__BITOPS_WORDSIZE - 8), (unsigned long *)addr)
  736. #define ext2_set_bit_atomic(lock, nr, addr) \
  737. test_and_set_bit((nr)^(__BITOPS_WORDSIZE - 8), (unsigned long *)addr)
  738. #define ext2_clear_bit(nr, addr) \
  739. __test_and_clear_bit((nr)^(__BITOPS_WORDSIZE - 8), (unsigned long *)addr)
  740. #define ext2_clear_bit_atomic(lock, nr, addr) \
  741. test_and_clear_bit((nr)^(__BITOPS_WORDSIZE - 8), (unsigned long *)addr)
  742. #define ext2_test_bit(nr, addr) \
  743. test_bit((nr)^(__BITOPS_WORDSIZE - 8), (unsigned long *)addr)
  744. #ifndef __s390x__
  745. static inline int
  746. ext2_find_first_zero_bit(void *vaddr, unsigned int size)
  747. {
  748. typedef struct { long _[__BITOPS_WORDS(size)]; } addrtype;
  749. unsigned long cmp, count;
  750. unsigned int res;
  751. if (!size)
  752. return 0;
  753. __asm__(" lhi %1,-1\n"
  754. " lr %2,%3\n"
  755. " ahi %2,31\n"
  756. " srl %2,5\n"
  757. " slr %0,%0\n"
  758. "0: cl %1,0(%0,%4)\n"
  759. " jne 1f\n"
  760. " ahi %0,4\n"
  761. " brct %2,0b\n"
  762. " lr %0,%3\n"
  763. " j 4f\n"
  764. "1: l %2,0(%0,%4)\n"
  765. " sll %0,3\n"
  766. " ahi %0,24\n"
  767. " lhi %1,0xff\n"
  768. " tmh %2,0xffff\n"
  769. " jo 2f\n"
  770. " ahi %0,-16\n"
  771. " srl %2,16\n"
  772. "2: tml %2,0xff00\n"
  773. " jo 3f\n"
  774. " ahi %0,-8\n"
  775. " srl %2,8\n"
  776. "3: nr %2,%1\n"
  777. " ic %2,0(%2,%5)\n"
  778. " alr %0,%2\n"
  779. "4:"
  780. : "=&a" (res), "=&d" (cmp), "=&a" (count)
  781. : "a" (size), "a" (vaddr), "a" (&_zb_findmap),
  782. "m" (*(addrtype *) vaddr) : "cc" );
  783. return (res < size) ? res : size;
  784. }
  785. #else /* __s390x__ */
  786. static inline unsigned long
  787. ext2_find_first_zero_bit(void *vaddr, unsigned long size)
  788. {
  789. typedef struct { long _[__BITOPS_WORDS(size)]; } addrtype;
  790. unsigned long res, cmp, count;
  791. if (!size)
  792. return 0;
  793. __asm__(" lghi %1,-1\n"
  794. " lgr %2,%3\n"
  795. " aghi %2,63\n"
  796. " srlg %2,%2,6\n"
  797. " slgr %0,%0\n"
  798. "0: clg %1,0(%0,%4)\n"
  799. " jne 1f\n"
  800. " aghi %0,8\n"
  801. " brct %2,0b\n"
  802. " lgr %0,%3\n"
  803. " j 5f\n"
  804. "1: cl %1,0(%0,%4)\n"
  805. " jne 2f\n"
  806. " aghi %0,4\n"
  807. "2: l %2,0(%0,%4)\n"
  808. " sllg %0,%0,3\n"
  809. " aghi %0,24\n"
  810. " lghi %1,0xff\n"
  811. " tmlh %2,0xffff\n"
  812. " jo 3f\n"
  813. " aghi %0,-16\n"
  814. " srl %2,16\n"
  815. "3: tmll %2,0xff00\n"
  816. " jo 4f\n"
  817. " aghi %0,-8\n"
  818. " srl %2,8\n"
  819. "4: ngr %2,%1\n"
  820. " ic %2,0(%2,%5)\n"
  821. " algr %0,%2\n"
  822. "5:"
  823. : "=&a" (res), "=&d" (cmp), "=&a" (count)
  824. : "a" (size), "a" (vaddr), "a" (&_zb_findmap),
  825. "m" (*(addrtype *) vaddr) : "cc" );
  826. return (res < size) ? res : size;
  827. }
  828. #endif /* __s390x__ */
  829. static inline int
  830. ext2_find_next_zero_bit(void *vaddr, unsigned long size, unsigned long offset)
  831. {
  832. unsigned long *addr = vaddr, *p;
  833. unsigned long word, bit, set;
  834. if (offset >= size)
  835. return size;
  836. bit = offset & (__BITOPS_WORDSIZE - 1);
  837. offset -= bit;
  838. size -= offset;
  839. p = addr + offset / __BITOPS_WORDSIZE;
  840. if (bit) {
  841. #ifndef __s390x__
  842. asm(" ic %0,0(%1)\n"
  843. " icm %0,2,1(%1)\n"
  844. " icm %0,4,2(%1)\n"
  845. " icm %0,8,3(%1)"
  846. : "=&a" (word) : "a" (p), "m" (*p) : "cc" );
  847. #else
  848. asm(" lrvg %0,%1" : "=a" (word) : "m" (*p) );
  849. #endif
  850. /*
  851. * s390 version of ffz returns __BITOPS_WORDSIZE
  852. * if no zero bit is present in the word.
  853. */
  854. set = ffz(word >> bit) + bit;
  855. if (set >= size)
  856. return size + offset;
  857. if (set < __BITOPS_WORDSIZE)
  858. return set + offset;
  859. offset += __BITOPS_WORDSIZE;
  860. size -= __BITOPS_WORDSIZE;
  861. p++;
  862. }
  863. return offset + ext2_find_first_zero_bit(p, size);
  864. }
  865. #include <asm-generic/bitops/minix.h>
  866. #endif /* __KERNEL__ */
  867. #endif /* _S390_BITOPS_H */