bitops.h 36 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188
  1. #ifndef _S390_BITOPS_H
  2. #define _S390_BITOPS_H
  3. /*
  4. * include/asm-s390/bitops.h
  5. *
  6. * S390 version
  7. * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
  8. * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
  9. *
  10. * Derived from "include/asm-i386/bitops.h"
  11. * Copyright (C) 1992, Linus Torvalds
  12. *
  13. */
  14. #include <linux/config.h>
  15. #include <linux/compiler.h>
  16. /*
  17. * 32 bit bitops format:
  18. * bit 0 is the LSB of *addr; bit 31 is the MSB of *addr;
  19. * bit 32 is the LSB of *(addr+4). That combined with the
  20. * big endian byte order on S390 give the following bit
  21. * order in memory:
  22. * 1f 1e 1d 1c 1b 1a 19 18 17 16 15 14 13 12 11 10 \
  23. * 0f 0e 0d 0c 0b 0a 09 08 07 06 05 04 03 02 01 00
  24. * after that follows the next long with bit numbers
  25. * 3f 3e 3d 3c 3b 3a 39 38 37 36 35 34 33 32 31 30
  26. * 2f 2e 2d 2c 2b 2a 29 28 27 26 25 24 23 22 21 20
  27. * The reason for this bit ordering is the fact that
  28. * in the architecture independent code bits operations
  29. * of the form "flags |= (1 << bitnr)" are used INTERMIXED
  30. * with operation of the form "set_bit(bitnr, flags)".
  31. *
  32. * 64 bit bitops format:
  33. * bit 0 is the LSB of *addr; bit 63 is the MSB of *addr;
  34. * bit 64 is the LSB of *(addr+8). That combined with the
  35. * big endian byte order on S390 give the following bit
  36. * order in memory:
  37. * 3f 3e 3d 3c 3b 3a 39 38 37 36 35 34 33 32 31 30
  38. * 2f 2e 2d 2c 2b 2a 29 28 27 26 25 24 23 22 21 20
  39. * 1f 1e 1d 1c 1b 1a 19 18 17 16 15 14 13 12 11 10
  40. * 0f 0e 0d 0c 0b 0a 09 08 07 06 05 04 03 02 01 00
  41. * after that follows the next long with bit numbers
  42. * 7f 7e 7d 7c 7b 7a 79 78 77 76 75 74 73 72 71 70
  43. * 6f 6e 6d 6c 6b 6a 69 68 67 66 65 64 63 62 61 60
  44. * 5f 5e 5d 5c 5b 5a 59 58 57 56 55 54 53 52 51 50
  45. * 4f 4e 4d 4c 4b 4a 49 48 47 46 45 44 43 42 41 40
  46. * The reason for this bit ordering is the fact that
  47. * in the architecture independent code bits operations
  48. * of the form "flags |= (1 << bitnr)" are used INTERMIXED
  49. * with operation of the form "set_bit(bitnr, flags)".
  50. */
  51. /* set ALIGN_CS to 1 if the SMP safe bit operations should
  52. * align the address to 4 byte boundary. It seems to work
  53. * without the alignment.
  54. */
  55. #ifdef __KERNEL__
  56. #define ALIGN_CS 0
  57. #else
  58. #define ALIGN_CS 1
  59. #ifndef CONFIG_SMP
  60. #error "bitops won't work without CONFIG_SMP"
  61. #endif
  62. #endif
  63. /* bitmap tables from arch/S390/kernel/bitmap.S */
  64. extern const char _oi_bitmap[];
  65. extern const char _ni_bitmap[];
  66. extern const char _zb_findmap[];
  67. extern const char _sb_findmap[];
  68. #ifndef __s390x__
  69. #define __BITOPS_ALIGN 3
  70. #define __BITOPS_WORDSIZE 32
  71. #define __BITOPS_OR "or"
  72. #define __BITOPS_AND "nr"
  73. #define __BITOPS_XOR "xr"
  74. #define __BITOPS_LOOP(__old, __new, __addr, __val, __op_string) \
  75. __asm__ __volatile__(" l %0,0(%4)\n" \
  76. "0: lr %1,%0\n" \
  77. __op_string " %1,%3\n" \
  78. " cs %0,%1,0(%4)\n" \
  79. " jl 0b" \
  80. : "=&d" (__old), "=&d" (__new), \
  81. "=m" (*(unsigned long *) __addr) \
  82. : "d" (__val), "a" (__addr), \
  83. "m" (*(unsigned long *) __addr) : "cc" );
  84. #else /* __s390x__ */
  85. #define __BITOPS_ALIGN 7
  86. #define __BITOPS_WORDSIZE 64
  87. #define __BITOPS_OR "ogr"
  88. #define __BITOPS_AND "ngr"
  89. #define __BITOPS_XOR "xgr"
  90. #define __BITOPS_LOOP(__old, __new, __addr, __val, __op_string) \
  91. __asm__ __volatile__(" lg %0,0(%4)\n" \
  92. "0: lgr %1,%0\n" \
  93. __op_string " %1,%3\n" \
  94. " csg %0,%1,0(%4)\n" \
  95. " jl 0b" \
  96. : "=&d" (__old), "=&d" (__new), \
  97. "=m" (*(unsigned long *) __addr) \
  98. : "d" (__val), "a" (__addr), \
  99. "m" (*(unsigned long *) __addr) : "cc" );
  100. #endif /* __s390x__ */
  101. #define __BITOPS_WORDS(bits) (((bits)+__BITOPS_WORDSIZE-1)/__BITOPS_WORDSIZE)
  102. #define __BITOPS_BARRIER() __asm__ __volatile__ ( "" : : : "memory" )
  103. #ifdef CONFIG_SMP
  104. /*
  105. * SMP safe set_bit routine based on compare and swap (CS)
  106. */
  107. static inline void set_bit_cs(unsigned long nr, volatile unsigned long *ptr)
  108. {
  109. unsigned long addr, old, new, mask;
  110. addr = (unsigned long) ptr;
  111. #if ALIGN_CS == 1
  112. nr += (addr & __BITOPS_ALIGN) << 3; /* add alignment to bit number */
  113. addr ^= addr & __BITOPS_ALIGN; /* align address to 8 */
  114. #endif
  115. /* calculate address for CS */
  116. addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3;
  117. /* make OR mask */
  118. mask = 1UL << (nr & (__BITOPS_WORDSIZE - 1));
  119. /* Do the atomic update. */
  120. __BITOPS_LOOP(old, new, addr, mask, __BITOPS_OR);
  121. }
  122. /*
  123. * SMP safe clear_bit routine based on compare and swap (CS)
  124. */
  125. static inline void clear_bit_cs(unsigned long nr, volatile unsigned long *ptr)
  126. {
  127. unsigned long addr, old, new, mask;
  128. addr = (unsigned long) ptr;
  129. #if ALIGN_CS == 1
  130. nr += (addr & __BITOPS_ALIGN) << 3; /* add alignment to bit number */
  131. addr ^= addr & __BITOPS_ALIGN; /* align address to 8 */
  132. #endif
  133. /* calculate address for CS */
  134. addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3;
  135. /* make AND mask */
  136. mask = ~(1UL << (nr & (__BITOPS_WORDSIZE - 1)));
  137. /* Do the atomic update. */
  138. __BITOPS_LOOP(old, new, addr, mask, __BITOPS_AND);
  139. }
  140. /*
  141. * SMP safe change_bit routine based on compare and swap (CS)
  142. */
  143. static inline void change_bit_cs(unsigned long nr, volatile unsigned long *ptr)
  144. {
  145. unsigned long addr, old, new, mask;
  146. addr = (unsigned long) ptr;
  147. #if ALIGN_CS == 1
  148. nr += (addr & __BITOPS_ALIGN) << 3; /* add alignment to bit number */
  149. addr ^= addr & __BITOPS_ALIGN; /* align address to 8 */
  150. #endif
  151. /* calculate address for CS */
  152. addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3;
  153. /* make XOR mask */
  154. mask = 1UL << (nr & (__BITOPS_WORDSIZE - 1));
  155. /* Do the atomic update. */
  156. __BITOPS_LOOP(old, new, addr, mask, __BITOPS_XOR);
  157. }
  158. /*
  159. * SMP safe test_and_set_bit routine based on compare and swap (CS)
  160. */
  161. static inline int
  162. test_and_set_bit_cs(unsigned long nr, volatile unsigned long *ptr)
  163. {
  164. unsigned long addr, old, new, mask;
  165. addr = (unsigned long) ptr;
  166. #if ALIGN_CS == 1
  167. nr += (addr & __BITOPS_ALIGN) << 3; /* add alignment to bit number */
  168. addr ^= addr & __BITOPS_ALIGN; /* align address to 8 */
  169. #endif
  170. /* calculate address for CS */
  171. addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3;
  172. /* make OR/test mask */
  173. mask = 1UL << (nr & (__BITOPS_WORDSIZE - 1));
  174. /* Do the atomic update. */
  175. __BITOPS_LOOP(old, new, addr, mask, __BITOPS_OR);
  176. __BITOPS_BARRIER();
  177. return (old & mask) != 0;
  178. }
  179. /*
  180. * SMP safe test_and_clear_bit routine based on compare and swap (CS)
  181. */
  182. static inline int
  183. test_and_clear_bit_cs(unsigned long nr, volatile unsigned long *ptr)
  184. {
  185. unsigned long addr, old, new, mask;
  186. addr = (unsigned long) ptr;
  187. #if ALIGN_CS == 1
  188. nr += (addr & __BITOPS_ALIGN) << 3; /* add alignment to bit number */
  189. addr ^= addr & __BITOPS_ALIGN; /* align address to 8 */
  190. #endif
  191. /* calculate address for CS */
  192. addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3;
  193. /* make AND/test mask */
  194. mask = ~(1UL << (nr & (__BITOPS_WORDSIZE - 1)));
  195. /* Do the atomic update. */
  196. __BITOPS_LOOP(old, new, addr, mask, __BITOPS_AND);
  197. __BITOPS_BARRIER();
  198. return (old ^ new) != 0;
  199. }
  200. /*
  201. * SMP safe test_and_change_bit routine based on compare and swap (CS)
  202. */
  203. static inline int
  204. test_and_change_bit_cs(unsigned long nr, volatile unsigned long *ptr)
  205. {
  206. unsigned long addr, old, new, mask;
  207. addr = (unsigned long) ptr;
  208. #if ALIGN_CS == 1
  209. nr += (addr & __BITOPS_ALIGN) << 3; /* add alignment to bit number */
  210. addr ^= addr & __BITOPS_ALIGN; /* align address to 8 */
  211. #endif
  212. /* calculate address for CS */
  213. addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3;
  214. /* make XOR/test mask */
  215. mask = 1UL << (nr & (__BITOPS_WORDSIZE - 1));
  216. /* Do the atomic update. */
  217. __BITOPS_LOOP(old, new, addr, mask, __BITOPS_XOR);
  218. __BITOPS_BARRIER();
  219. return (old & mask) != 0;
  220. }
  221. #endif /* CONFIG_SMP */
  222. /*
  223. * fast, non-SMP set_bit routine
  224. */
  225. static inline void __set_bit(unsigned long nr, volatile unsigned long *ptr)
  226. {
  227. unsigned long addr;
  228. addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
  229. asm volatile("oc 0(1,%1),0(%2)"
  230. : "=m" (*(char *) addr)
  231. : "a" (addr), "a" (_oi_bitmap + (nr & 7)),
  232. "m" (*(char *) addr) : "cc" );
  233. }
  234. static inline void
  235. __constant_set_bit(const unsigned long nr, volatile unsigned long *ptr)
  236. {
  237. unsigned long addr;
  238. addr = ((unsigned long) ptr) + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
  239. switch (nr&7) {
  240. case 0:
  241. asm volatile ("oi 0(%1),0x01" : "=m" (*(char *) addr)
  242. : "a" (addr), "m" (*(char *) addr) : "cc" );
  243. break;
  244. case 1:
  245. asm volatile ("oi 0(%1),0x02" : "=m" (*(char *) addr)
  246. : "a" (addr), "m" (*(char *) addr) : "cc" );
  247. break;
  248. case 2:
  249. asm volatile ("oi 0(%1),0x04" : "=m" (*(char *) addr)
  250. : "a" (addr), "m" (*(char *) addr) : "cc" );
  251. break;
  252. case 3:
  253. asm volatile ("oi 0(%1),0x08" : "=m" (*(char *) addr)
  254. : "a" (addr), "m" (*(char *) addr) : "cc" );
  255. break;
  256. case 4:
  257. asm volatile ("oi 0(%1),0x10" : "=m" (*(char *) addr)
  258. : "a" (addr), "m" (*(char *) addr) : "cc" );
  259. break;
  260. case 5:
  261. asm volatile ("oi 0(%1),0x20" : "=m" (*(char *) addr)
  262. : "a" (addr), "m" (*(char *) addr) : "cc" );
  263. break;
  264. case 6:
  265. asm volatile ("oi 0(%1),0x40" : "=m" (*(char *) addr)
  266. : "a" (addr), "m" (*(char *) addr) : "cc" );
  267. break;
  268. case 7:
  269. asm volatile ("oi 0(%1),0x80" : "=m" (*(char *) addr)
  270. : "a" (addr), "m" (*(char *) addr) : "cc" );
  271. break;
  272. }
  273. }
  274. #define set_bit_simple(nr,addr) \
  275. (__builtin_constant_p((nr)) ? \
  276. __constant_set_bit((nr),(addr)) : \
  277. __set_bit((nr),(addr)) )
  278. /*
  279. * fast, non-SMP clear_bit routine
  280. */
  281. static inline void
  282. __clear_bit(unsigned long nr, volatile unsigned long *ptr)
  283. {
  284. unsigned long addr;
  285. addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
  286. asm volatile("nc 0(1,%1),0(%2)"
  287. : "=m" (*(char *) addr)
  288. : "a" (addr), "a" (_ni_bitmap + (nr & 7)),
  289. "m" (*(char *) addr) : "cc" );
  290. }
  291. static inline void
  292. __constant_clear_bit(const unsigned long nr, volatile unsigned long *ptr)
  293. {
  294. unsigned long addr;
  295. addr = ((unsigned long) ptr) + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
  296. switch (nr&7) {
  297. case 0:
  298. asm volatile ("ni 0(%1),0xFE" : "=m" (*(char *) addr)
  299. : "a" (addr), "m" (*(char *) addr) : "cc" );
  300. break;
  301. case 1:
  302. asm volatile ("ni 0(%1),0xFD": "=m" (*(char *) addr)
  303. : "a" (addr), "m" (*(char *) addr) : "cc" );
  304. break;
  305. case 2:
  306. asm volatile ("ni 0(%1),0xFB" : "=m" (*(char *) addr)
  307. : "a" (addr), "m" (*(char *) addr) : "cc" );
  308. break;
  309. case 3:
  310. asm volatile ("ni 0(%1),0xF7" : "=m" (*(char *) addr)
  311. : "a" (addr), "m" (*(char *) addr) : "cc" );
  312. break;
  313. case 4:
  314. asm volatile ("ni 0(%1),0xEF" : "=m" (*(char *) addr)
  315. : "a" (addr), "m" (*(char *) addr) : "cc" );
  316. break;
  317. case 5:
  318. asm volatile ("ni 0(%1),0xDF" : "=m" (*(char *) addr)
  319. : "a" (addr), "m" (*(char *) addr) : "cc" );
  320. break;
  321. case 6:
  322. asm volatile ("ni 0(%1),0xBF" : "=m" (*(char *) addr)
  323. : "a" (addr), "m" (*(char *) addr) : "cc" );
  324. break;
  325. case 7:
  326. asm volatile ("ni 0(%1),0x7F" : "=m" (*(char *) addr)
  327. : "a" (addr), "m" (*(char *) addr) : "cc" );
  328. break;
  329. }
  330. }
  331. #define clear_bit_simple(nr,addr) \
  332. (__builtin_constant_p((nr)) ? \
  333. __constant_clear_bit((nr),(addr)) : \
  334. __clear_bit((nr),(addr)) )
  335. /*
  336. * fast, non-SMP change_bit routine
  337. */
  338. static inline void __change_bit(unsigned long nr, volatile unsigned long *ptr)
  339. {
  340. unsigned long addr;
  341. addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
  342. asm volatile("xc 0(1,%1),0(%2)"
  343. : "=m" (*(char *) addr)
  344. : "a" (addr), "a" (_oi_bitmap + (nr & 7)),
  345. "m" (*(char *) addr) : "cc" );
  346. }
  347. static inline void
  348. __constant_change_bit(const unsigned long nr, volatile unsigned long *ptr)
  349. {
  350. unsigned long addr;
  351. addr = ((unsigned long) ptr) + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
  352. switch (nr&7) {
  353. case 0:
  354. asm volatile ("xi 0(%1),0x01" : "=m" (*(char *) addr)
  355. : "a" (addr), "m" (*(char *) addr) : "cc" );
  356. break;
  357. case 1:
  358. asm volatile ("xi 0(%1),0x02" : "=m" (*(char *) addr)
  359. : "a" (addr), "m" (*(char *) addr) : "cc" );
  360. break;
  361. case 2:
  362. asm volatile ("xi 0(%1),0x04" : "=m" (*(char *) addr)
  363. : "a" (addr), "m" (*(char *) addr) : "cc" );
  364. break;
  365. case 3:
  366. asm volatile ("xi 0(%1),0x08" : "=m" (*(char *) addr)
  367. : "a" (addr), "m" (*(char *) addr) : "cc" );
  368. break;
  369. case 4:
  370. asm volatile ("xi 0(%1),0x10" : "=m" (*(char *) addr)
  371. : "a" (addr), "m" (*(char *) addr) : "cc" );
  372. break;
  373. case 5:
  374. asm volatile ("xi 0(%1),0x20" : "=m" (*(char *) addr)
  375. : "a" (addr), "m" (*(char *) addr) : "cc" );
  376. break;
  377. case 6:
  378. asm volatile ("xi 0(%1),0x40" : "=m" (*(char *) addr)
  379. : "a" (addr), "m" (*(char *) addr) : "cc" );
  380. break;
  381. case 7:
  382. asm volatile ("xi 0(%1),0x80" : "=m" (*(char *) addr)
  383. : "a" (addr), "m" (*(char *) addr) : "cc" );
  384. break;
  385. }
  386. }
  387. #define change_bit_simple(nr,addr) \
  388. (__builtin_constant_p((nr)) ? \
  389. __constant_change_bit((nr),(addr)) : \
  390. __change_bit((nr),(addr)) )
  391. /*
  392. * fast, non-SMP test_and_set_bit routine
  393. */
  394. static inline int
  395. test_and_set_bit_simple(unsigned long nr, volatile unsigned long *ptr)
  396. {
  397. unsigned long addr;
  398. unsigned char ch;
  399. addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
  400. ch = *(unsigned char *) addr;
  401. asm volatile("oc 0(1,%1),0(%2)"
  402. : "=m" (*(char *) addr)
  403. : "a" (addr), "a" (_oi_bitmap + (nr & 7)),
  404. "m" (*(char *) addr) : "cc", "memory" );
  405. return (ch >> (nr & 7)) & 1;
  406. }
  407. #define __test_and_set_bit(X,Y) test_and_set_bit_simple(X,Y)
  408. /*
  409. * fast, non-SMP test_and_clear_bit routine
  410. */
  411. static inline int
  412. test_and_clear_bit_simple(unsigned long nr, volatile unsigned long *ptr)
  413. {
  414. unsigned long addr;
  415. unsigned char ch;
  416. addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
  417. ch = *(unsigned char *) addr;
  418. asm volatile("nc 0(1,%1),0(%2)"
  419. : "=m" (*(char *) addr)
  420. : "a" (addr), "a" (_ni_bitmap + (nr & 7)),
  421. "m" (*(char *) addr) : "cc", "memory" );
  422. return (ch >> (nr & 7)) & 1;
  423. }
  424. #define __test_and_clear_bit(X,Y) test_and_clear_bit_simple(X,Y)
  425. /*
  426. * fast, non-SMP test_and_change_bit routine
  427. */
  428. static inline int
  429. test_and_change_bit_simple(unsigned long nr, volatile unsigned long *ptr)
  430. {
  431. unsigned long addr;
  432. unsigned char ch;
  433. addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
  434. ch = *(unsigned char *) addr;
  435. asm volatile("xc 0(1,%1),0(%2)"
  436. : "=m" (*(char *) addr)
  437. : "a" (addr), "a" (_oi_bitmap + (nr & 7)),
  438. "m" (*(char *) addr) : "cc", "memory" );
  439. return (ch >> (nr & 7)) & 1;
  440. }
  441. #define __test_and_change_bit(X,Y) test_and_change_bit_simple(X,Y)
  442. #ifdef CONFIG_SMP
  443. #define set_bit set_bit_cs
  444. #define clear_bit clear_bit_cs
  445. #define change_bit change_bit_cs
  446. #define test_and_set_bit test_and_set_bit_cs
  447. #define test_and_clear_bit test_and_clear_bit_cs
  448. #define test_and_change_bit test_and_change_bit_cs
  449. #else
  450. #define set_bit set_bit_simple
  451. #define clear_bit clear_bit_simple
  452. #define change_bit change_bit_simple
  453. #define test_and_set_bit test_and_set_bit_simple
  454. #define test_and_clear_bit test_and_clear_bit_simple
  455. #define test_and_change_bit test_and_change_bit_simple
  456. #endif
  457. /*
  458. * This routine doesn't need to be atomic.
  459. */
  460. static inline int __test_bit(unsigned long nr, const volatile unsigned long *ptr)
  461. {
  462. unsigned long addr;
  463. unsigned char ch;
  464. addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
  465. ch = *(volatile unsigned char *) addr;
  466. return (ch >> (nr & 7)) & 1;
  467. }
  468. static inline int
  469. __constant_test_bit(unsigned long nr, const volatile unsigned long *addr) {
  470. return (((volatile char *) addr)
  471. [(nr^(__BITOPS_WORDSIZE-8))>>3] & (1<<(nr&7)));
  472. }
  473. #define test_bit(nr,addr) \
  474. (__builtin_constant_p((nr)) ? \
  475. __constant_test_bit((nr),(addr)) : \
  476. __test_bit((nr),(addr)) )
  477. #ifndef __s390x__
  478. /*
  479. * Find-bit routines..
  480. */
  481. static inline int
  482. find_first_zero_bit(const unsigned long * addr, unsigned int size)
  483. {
  484. typedef struct { long _[__BITOPS_WORDS(size)]; } addrtype;
  485. unsigned long cmp, count;
  486. unsigned int res;
  487. if (!size)
  488. return 0;
  489. __asm__(" lhi %1,-1\n"
  490. " lr %2,%3\n"
  491. " slr %0,%0\n"
  492. " ahi %2,31\n"
  493. " srl %2,5\n"
  494. "0: c %1,0(%0,%4)\n"
  495. " jne 1f\n"
  496. " ahi %0,4\n"
  497. " brct %2,0b\n"
  498. " lr %0,%3\n"
  499. " j 4f\n"
  500. "1: l %2,0(%0,%4)\n"
  501. " sll %0,3\n"
  502. " lhi %1,0xff\n"
  503. " tml %2,0xffff\n"
  504. " jno 2f\n"
  505. " ahi %0,16\n"
  506. " srl %2,16\n"
  507. "2: tml %2,0x00ff\n"
  508. " jno 3f\n"
  509. " ahi %0,8\n"
  510. " srl %2,8\n"
  511. "3: nr %2,%1\n"
  512. " ic %2,0(%2,%5)\n"
  513. " alr %0,%2\n"
  514. "4:"
  515. : "=&a" (res), "=&d" (cmp), "=&a" (count)
  516. : "a" (size), "a" (addr), "a" (&_zb_findmap),
  517. "m" (*(addrtype *) addr) : "cc" );
  518. return (res < size) ? res : size;
  519. }
  520. static inline int
  521. find_first_bit(const unsigned long * addr, unsigned int size)
  522. {
  523. typedef struct { long _[__BITOPS_WORDS(size)]; } addrtype;
  524. unsigned long cmp, count;
  525. unsigned int res;
  526. if (!size)
  527. return 0;
  528. __asm__(" slr %1,%1\n"
  529. " lr %2,%3\n"
  530. " slr %0,%0\n"
  531. " ahi %2,31\n"
  532. " srl %2,5\n"
  533. "0: c %1,0(%0,%4)\n"
  534. " jne 1f\n"
  535. " ahi %0,4\n"
  536. " brct %2,0b\n"
  537. " lr %0,%3\n"
  538. " j 4f\n"
  539. "1: l %2,0(%0,%4)\n"
  540. " sll %0,3\n"
  541. " lhi %1,0xff\n"
  542. " tml %2,0xffff\n"
  543. " jnz 2f\n"
  544. " ahi %0,16\n"
  545. " srl %2,16\n"
  546. "2: tml %2,0x00ff\n"
  547. " jnz 3f\n"
  548. " ahi %0,8\n"
  549. " srl %2,8\n"
  550. "3: nr %2,%1\n"
  551. " ic %2,0(%2,%5)\n"
  552. " alr %0,%2\n"
  553. "4:"
  554. : "=&a" (res), "=&d" (cmp), "=&a" (count)
  555. : "a" (size), "a" (addr), "a" (&_sb_findmap),
  556. "m" (*(addrtype *) addr) : "cc" );
  557. return (res < size) ? res : size;
  558. }
  559. static inline int
  560. find_next_zero_bit (const unsigned long * addr, int size, int offset)
  561. {
  562. unsigned long * p = ((unsigned long *) addr) + (offset >> 5);
  563. unsigned long bitvec, reg;
  564. int set, bit = offset & 31, res;
  565. if (bit) {
  566. /*
  567. * Look for zero in first word
  568. */
  569. bitvec = (*p) >> bit;
  570. __asm__(" slr %0,%0\n"
  571. " lhi %2,0xff\n"
  572. " tml %1,0xffff\n"
  573. " jno 0f\n"
  574. " ahi %0,16\n"
  575. " srl %1,16\n"
  576. "0: tml %1,0x00ff\n"
  577. " jno 1f\n"
  578. " ahi %0,8\n"
  579. " srl %1,8\n"
  580. "1: nr %1,%2\n"
  581. " ic %1,0(%1,%3)\n"
  582. " alr %0,%1"
  583. : "=&d" (set), "+a" (bitvec), "=&d" (reg)
  584. : "a" (&_zb_findmap) : "cc" );
  585. if (set < (32 - bit))
  586. return set + offset;
  587. offset += 32 - bit;
  588. p++;
  589. }
  590. /*
  591. * No zero yet, search remaining full words for a zero
  592. */
  593. res = find_first_zero_bit (p, size - 32 * (p - (unsigned long *) addr));
  594. return (offset + res);
  595. }
  596. static inline int
  597. find_next_bit (const unsigned long * addr, int size, int offset)
  598. {
  599. unsigned long * p = ((unsigned long *) addr) + (offset >> 5);
  600. unsigned long bitvec, reg;
  601. int set, bit = offset & 31, res;
  602. if (bit) {
  603. /*
  604. * Look for set bit in first word
  605. */
  606. bitvec = (*p) >> bit;
  607. __asm__(" slr %0,%0\n"
  608. " lhi %2,0xff\n"
  609. " tml %1,0xffff\n"
  610. " jnz 0f\n"
  611. " ahi %0,16\n"
  612. " srl %1,16\n"
  613. "0: tml %1,0x00ff\n"
  614. " jnz 1f\n"
  615. " ahi %0,8\n"
  616. " srl %1,8\n"
  617. "1: nr %1,%2\n"
  618. " ic %1,0(%1,%3)\n"
  619. " alr %0,%1"
  620. : "=&d" (set), "+a" (bitvec), "=&d" (reg)
  621. : "a" (&_sb_findmap) : "cc" );
  622. if (set < (32 - bit))
  623. return set + offset;
  624. offset += 32 - bit;
  625. p++;
  626. }
  627. /*
  628. * No set bit yet, search remaining full words for a bit
  629. */
  630. res = find_first_bit (p, size - 32 * (p - (unsigned long *) addr));
  631. return (offset + res);
  632. }
  633. #else /* __s390x__ */
  634. /*
  635. * Find-bit routines..
  636. */
  637. static inline unsigned long
  638. find_first_zero_bit(const unsigned long * addr, unsigned long size)
  639. {
  640. typedef struct { long _[__BITOPS_WORDS(size)]; } addrtype;
  641. unsigned long res, cmp, count;
  642. if (!size)
  643. return 0;
  644. __asm__(" lghi %1,-1\n"
  645. " lgr %2,%3\n"
  646. " slgr %0,%0\n"
  647. " aghi %2,63\n"
  648. " srlg %2,%2,6\n"
  649. "0: cg %1,0(%0,%4)\n"
  650. " jne 1f\n"
  651. " aghi %0,8\n"
  652. " brct %2,0b\n"
  653. " lgr %0,%3\n"
  654. " j 5f\n"
  655. "1: lg %2,0(%0,%4)\n"
  656. " sllg %0,%0,3\n"
  657. " clr %2,%1\n"
  658. " jne 2f\n"
  659. " aghi %0,32\n"
  660. " srlg %2,%2,32\n"
  661. "2: lghi %1,0xff\n"
  662. " tmll %2,0xffff\n"
  663. " jno 3f\n"
  664. " aghi %0,16\n"
  665. " srl %2,16\n"
  666. "3: tmll %2,0x00ff\n"
  667. " jno 4f\n"
  668. " aghi %0,8\n"
  669. " srl %2,8\n"
  670. "4: ngr %2,%1\n"
  671. " ic %2,0(%2,%5)\n"
  672. " algr %0,%2\n"
  673. "5:"
  674. : "=&a" (res), "=&d" (cmp), "=&a" (count)
  675. : "a" (size), "a" (addr), "a" (&_zb_findmap),
  676. "m" (*(addrtype *) addr) : "cc" );
  677. return (res < size) ? res : size;
  678. }
  679. static inline unsigned long
  680. find_first_bit(const unsigned long * addr, unsigned long size)
  681. {
  682. typedef struct { long _[__BITOPS_WORDS(size)]; } addrtype;
  683. unsigned long res, cmp, count;
  684. if (!size)
  685. return 0;
  686. __asm__(" slgr %1,%1\n"
  687. " lgr %2,%3\n"
  688. " slgr %0,%0\n"
  689. " aghi %2,63\n"
  690. " srlg %2,%2,6\n"
  691. "0: cg %1,0(%0,%4)\n"
  692. " jne 1f\n"
  693. " aghi %0,8\n"
  694. " brct %2,0b\n"
  695. " lgr %0,%3\n"
  696. " j 5f\n"
  697. "1: lg %2,0(%0,%4)\n"
  698. " sllg %0,%0,3\n"
  699. " clr %2,%1\n"
  700. " jne 2f\n"
  701. " aghi %0,32\n"
  702. " srlg %2,%2,32\n"
  703. "2: lghi %1,0xff\n"
  704. " tmll %2,0xffff\n"
  705. " jnz 3f\n"
  706. " aghi %0,16\n"
  707. " srl %2,16\n"
  708. "3: tmll %2,0x00ff\n"
  709. " jnz 4f\n"
  710. " aghi %0,8\n"
  711. " srl %2,8\n"
  712. "4: ngr %2,%1\n"
  713. " ic %2,0(%2,%5)\n"
  714. " algr %0,%2\n"
  715. "5:"
  716. : "=&a" (res), "=&d" (cmp), "=&a" (count)
  717. : "a" (size), "a" (addr), "a" (&_sb_findmap),
  718. "m" (*(addrtype *) addr) : "cc" );
  719. return (res < size) ? res : size;
  720. }
  721. static inline unsigned long
  722. find_next_zero_bit (const unsigned long * addr, unsigned long size, unsigned long offset)
  723. {
  724. unsigned long * p = ((unsigned long *) addr) + (offset >> 6);
  725. unsigned long bitvec, reg;
  726. unsigned long set, bit = offset & 63, res;
  727. if (bit) {
  728. /*
  729. * Look for zero in first word
  730. */
  731. bitvec = (*p) >> bit;
  732. __asm__(" lhi %2,-1\n"
  733. " slgr %0,%0\n"
  734. " clr %1,%2\n"
  735. " jne 0f\n"
  736. " aghi %0,32\n"
  737. " srlg %1,%1,32\n"
  738. "0: lghi %2,0xff\n"
  739. " tmll %1,0xffff\n"
  740. " jno 1f\n"
  741. " aghi %0,16\n"
  742. " srlg %1,%1,16\n"
  743. "1: tmll %1,0x00ff\n"
  744. " jno 2f\n"
  745. " aghi %0,8\n"
  746. " srlg %1,%1,8\n"
  747. "2: ngr %1,%2\n"
  748. " ic %1,0(%1,%3)\n"
  749. " algr %0,%1"
  750. : "=&d" (set), "+a" (bitvec), "=&d" (reg)
  751. : "a" (&_zb_findmap) : "cc" );
  752. if (set < (64 - bit))
  753. return set + offset;
  754. offset += 64 - bit;
  755. p++;
  756. }
  757. /*
  758. * No zero yet, search remaining full words for a zero
  759. */
  760. res = find_first_zero_bit (p, size - 64 * (p - (unsigned long *) addr));
  761. return (offset + res);
  762. }
  763. static inline unsigned long
  764. find_next_bit (const unsigned long * addr, unsigned long size, unsigned long offset)
  765. {
  766. unsigned long * p = ((unsigned long *) addr) + (offset >> 6);
  767. unsigned long bitvec, reg;
  768. unsigned long set, bit = offset & 63, res;
  769. if (bit) {
  770. /*
  771. * Look for zero in first word
  772. */
  773. bitvec = (*p) >> bit;
  774. __asm__(" slgr %0,%0\n"
  775. " ltr %1,%1\n"
  776. " jnz 0f\n"
  777. " aghi %0,32\n"
  778. " srlg %1,%1,32\n"
  779. "0: lghi %2,0xff\n"
  780. " tmll %1,0xffff\n"
  781. " jnz 1f\n"
  782. " aghi %0,16\n"
  783. " srlg %1,%1,16\n"
  784. "1: tmll %1,0x00ff\n"
  785. " jnz 2f\n"
  786. " aghi %0,8\n"
  787. " srlg %1,%1,8\n"
  788. "2: ngr %1,%2\n"
  789. " ic %1,0(%1,%3)\n"
  790. " algr %0,%1"
  791. : "=&d" (set), "+a" (bitvec), "=&d" (reg)
  792. : "a" (&_sb_findmap) : "cc" );
  793. if (set < (64 - bit))
  794. return set + offset;
  795. offset += 64 - bit;
  796. p++;
  797. }
  798. /*
  799. * No set bit yet, search remaining full words for a bit
  800. */
  801. res = find_first_bit (p, size - 64 * (p - (unsigned long *) addr));
  802. return (offset + res);
  803. }
  804. #endif /* __s390x__ */
  805. /*
  806. * ffz = Find First Zero in word. Undefined if no zero exists,
  807. * so code should check against ~0UL first..
  808. */
  809. static inline unsigned long ffz(unsigned long word)
  810. {
  811. unsigned long bit = 0;
  812. #ifdef __s390x__
  813. if (likely((word & 0xffffffff) == 0xffffffff)) {
  814. word >>= 32;
  815. bit += 32;
  816. }
  817. #endif
  818. if (likely((word & 0xffff) == 0xffff)) {
  819. word >>= 16;
  820. bit += 16;
  821. }
  822. if (likely((word & 0xff) == 0xff)) {
  823. word >>= 8;
  824. bit += 8;
  825. }
  826. return bit + _zb_findmap[word & 0xff];
  827. }
  828. /*
  829. * __ffs = find first bit in word. Undefined if no bit exists,
  830. * so code should check against 0UL first..
  831. */
  832. static inline unsigned long __ffs (unsigned long word)
  833. {
  834. unsigned long bit = 0;
  835. #ifdef __s390x__
  836. if (likely((word & 0xffffffff) == 0)) {
  837. word >>= 32;
  838. bit += 32;
  839. }
  840. #endif
  841. if (likely((word & 0xffff) == 0)) {
  842. word >>= 16;
  843. bit += 16;
  844. }
  845. if (likely((word & 0xff) == 0)) {
  846. word >>= 8;
  847. bit += 8;
  848. }
  849. return bit + _sb_findmap[word & 0xff];
  850. }
  851. /*
  852. * Every architecture must define this function. It's the fastest
  853. * way of searching a 140-bit bitmap where the first 100 bits are
  854. * unlikely to be set. It's guaranteed that at least one of the 140
  855. * bits is cleared.
  856. */
  857. static inline int sched_find_first_bit(unsigned long *b)
  858. {
  859. return find_first_bit(b, 140);
  860. }
  861. /*
  862. * ffs: find first bit set. This is defined the same way as
  863. * the libc and compiler builtin ffs routines, therefore
  864. * differs in spirit from the above ffz (man ffs).
  865. */
  866. #define ffs(x) generic_ffs(x)
  867. /*
  868. * fls: find last bit set.
  869. */
  870. #define fls(x) generic_fls(x)
  871. /*
  872. * hweightN: returns the hamming weight (i.e. the number
  873. * of bits set) of a N-bit word
  874. */
  875. #define hweight64(x) \
  876. ({ \
  877. unsigned long __x = (x); \
  878. unsigned int __w; \
  879. __w = generic_hweight32((unsigned int) __x); \
  880. __w += generic_hweight32((unsigned int) (__x>>32)); \
  881. __w; \
  882. })
  883. #define hweight32(x) generic_hweight32(x)
  884. #define hweight16(x) generic_hweight16(x)
  885. #define hweight8(x) generic_hweight8(x)
  886. #ifdef __KERNEL__
  887. /*
  888. * ATTENTION: intel byte ordering convention for ext2 and minix !!
  889. * bit 0 is the LSB of addr; bit 31 is the MSB of addr;
  890. * bit 32 is the LSB of (addr+4).
  891. * That combined with the little endian byte order of Intel gives the
  892. * following bit order in memory:
  893. * 07 06 05 04 03 02 01 00 15 14 13 12 11 10 09 08 \
  894. * 23 22 21 20 19 18 17 16 31 30 29 28 27 26 25 24
  895. */
  896. #define ext2_set_bit(nr, addr) \
  897. test_and_set_bit((nr)^(__BITOPS_WORDSIZE - 8), (unsigned long *)addr)
  898. #define ext2_set_bit_atomic(lock, nr, addr) \
  899. test_and_set_bit((nr)^(__BITOPS_WORDSIZE - 8), (unsigned long *)addr)
  900. #define ext2_clear_bit(nr, addr) \
  901. test_and_clear_bit((nr)^(__BITOPS_WORDSIZE - 8), (unsigned long *)addr)
  902. #define ext2_clear_bit_atomic(lock, nr, addr) \
  903. test_and_clear_bit((nr)^(__BITOPS_WORDSIZE - 8), (unsigned long *)addr)
  904. #define ext2_test_bit(nr, addr) \
  905. test_bit((nr)^(__BITOPS_WORDSIZE - 8), (unsigned long *)addr)
  906. #ifndef __s390x__
  907. static inline int
  908. ext2_find_first_zero_bit(void *vaddr, unsigned int size)
  909. {
  910. typedef struct { long _[__BITOPS_WORDS(size)]; } addrtype;
  911. unsigned long cmp, count;
  912. unsigned int res;
  913. if (!size)
  914. return 0;
  915. __asm__(" lhi %1,-1\n"
  916. " lr %2,%3\n"
  917. " ahi %2,31\n"
  918. " srl %2,5\n"
  919. " slr %0,%0\n"
  920. "0: cl %1,0(%0,%4)\n"
  921. " jne 1f\n"
  922. " ahi %0,4\n"
  923. " brct %2,0b\n"
  924. " lr %0,%3\n"
  925. " j 4f\n"
  926. "1: l %2,0(%0,%4)\n"
  927. " sll %0,3\n"
  928. " ahi %0,24\n"
  929. " lhi %1,0xff\n"
  930. " tmh %2,0xffff\n"
  931. " jo 2f\n"
  932. " ahi %0,-16\n"
  933. " srl %2,16\n"
  934. "2: tml %2,0xff00\n"
  935. " jo 3f\n"
  936. " ahi %0,-8\n"
  937. " srl %2,8\n"
  938. "3: nr %2,%1\n"
  939. " ic %2,0(%2,%5)\n"
  940. " alr %0,%2\n"
  941. "4:"
  942. : "=&a" (res), "=&d" (cmp), "=&a" (count)
  943. : "a" (size), "a" (vaddr), "a" (&_zb_findmap),
  944. "m" (*(addrtype *) vaddr) : "cc" );
  945. return (res < size) ? res : size;
  946. }
  947. static inline int
  948. ext2_find_next_zero_bit(void *vaddr, unsigned int size, unsigned offset)
  949. {
  950. unsigned long *addr = vaddr;
  951. unsigned long *p = addr + (offset >> 5);
  952. unsigned long word, reg;
  953. unsigned int bit = offset & 31UL, res;
  954. if (offset >= size)
  955. return size;
  956. if (bit) {
  957. __asm__(" ic %0,0(%1)\n"
  958. " icm %0,2,1(%1)\n"
  959. " icm %0,4,2(%1)\n"
  960. " icm %0,8,3(%1)"
  961. : "=&a" (word) : "a" (p) : "cc" );
  962. word >>= bit;
  963. res = bit;
  964. /* Look for zero in first longword */
  965. __asm__(" lhi %2,0xff\n"
  966. " tml %1,0xffff\n"
  967. " jno 0f\n"
  968. " ahi %0,16\n"
  969. " srl %1,16\n"
  970. "0: tml %1,0x00ff\n"
  971. " jno 1f\n"
  972. " ahi %0,8\n"
  973. " srl %1,8\n"
  974. "1: nr %1,%2\n"
  975. " ic %1,0(%1,%3)\n"
  976. " alr %0,%1"
  977. : "+&d" (res), "+&a" (word), "=&d" (reg)
  978. : "a" (&_zb_findmap) : "cc" );
  979. if (res < 32)
  980. return (p - addr)*32 + res;
  981. p++;
  982. }
  983. /* No zero yet, search remaining full bytes for a zero */
  984. res = ext2_find_first_zero_bit (p, size - 32 * (p - addr));
  985. return (p - addr) * 32 + res;
  986. }
  987. #else /* __s390x__ */
  988. static inline unsigned long
  989. ext2_find_first_zero_bit(void *vaddr, unsigned long size)
  990. {
  991. typedef struct { long _[__BITOPS_WORDS(size)]; } addrtype;
  992. unsigned long res, cmp, count;
  993. if (!size)
  994. return 0;
  995. __asm__(" lghi %1,-1\n"
  996. " lgr %2,%3\n"
  997. " aghi %2,63\n"
  998. " srlg %2,%2,6\n"
  999. " slgr %0,%0\n"
  1000. "0: clg %1,0(%0,%4)\n"
  1001. " jne 1f\n"
  1002. " aghi %0,8\n"
  1003. " brct %2,0b\n"
  1004. " lgr %0,%3\n"
  1005. " j 5f\n"
  1006. "1: cl %1,0(%0,%4)\n"
  1007. " jne 2f\n"
  1008. " aghi %0,4\n"
  1009. "2: l %2,0(%0,%4)\n"
  1010. " sllg %0,%0,3\n"
  1011. " aghi %0,24\n"
  1012. " lghi %1,0xff\n"
  1013. " tmlh %2,0xffff\n"
  1014. " jo 3f\n"
  1015. " aghi %0,-16\n"
  1016. " srl %2,16\n"
  1017. "3: tmll %2,0xff00\n"
  1018. " jo 4f\n"
  1019. " aghi %0,-8\n"
  1020. " srl %2,8\n"
  1021. "4: ngr %2,%1\n"
  1022. " ic %2,0(%2,%5)\n"
  1023. " algr %0,%2\n"
  1024. "5:"
  1025. : "=&a" (res), "=&d" (cmp), "=&a" (count)
  1026. : "a" (size), "a" (vaddr), "a" (&_zb_findmap),
  1027. "m" (*(addrtype *) vaddr) : "cc" );
  1028. return (res < size) ? res : size;
  1029. }
  1030. static inline unsigned long
  1031. ext2_find_next_zero_bit(void *vaddr, unsigned long size, unsigned long offset)
  1032. {
  1033. unsigned long *addr = vaddr;
  1034. unsigned long *p = addr + (offset >> 6);
  1035. unsigned long word, reg;
  1036. unsigned long bit = offset & 63UL, res;
  1037. if (offset >= size)
  1038. return size;
  1039. if (bit) {
  1040. __asm__(" lrvg %0,%1" /* load reversed, neat instruction */
  1041. : "=a" (word) : "m" (*p) );
  1042. word >>= bit;
  1043. res = bit;
  1044. /* Look for zero in first 8 byte word */
  1045. __asm__(" lghi %2,0xff\n"
  1046. " tmll %1,0xffff\n"
  1047. " jno 2f\n"
  1048. " ahi %0,16\n"
  1049. " srlg %1,%1,16\n"
  1050. "0: tmll %1,0xffff\n"
  1051. " jno 2f\n"
  1052. " ahi %0,16\n"
  1053. " srlg %1,%1,16\n"
  1054. "1: tmll %1,0xffff\n"
  1055. " jno 2f\n"
  1056. " ahi %0,16\n"
  1057. " srl %1,16\n"
  1058. "2: tmll %1,0x00ff\n"
  1059. " jno 3f\n"
  1060. " ahi %0,8\n"
  1061. " srl %1,8\n"
  1062. "3: ngr %1,%2\n"
  1063. " ic %1,0(%1,%3)\n"
  1064. " alr %0,%1"
  1065. : "+&d" (res), "+a" (word), "=&d" (reg)
  1066. : "a" (&_zb_findmap) : "cc" );
  1067. if (res < 64)
  1068. return (p - addr)*64 + res;
  1069. p++;
  1070. }
  1071. /* No zero yet, search remaining full bytes for a zero */
  1072. res = ext2_find_first_zero_bit (p, size - 64 * (p - addr));
  1073. return (p - addr) * 64 + res;
  1074. }
  1075. #endif /* __s390x__ */
  1076. /* Bitmap functions for the minix filesystem. */
  1077. /* FIXME !!! */
  1078. #define minix_test_and_set_bit(nr,addr) \
  1079. test_and_set_bit(nr,(unsigned long *)addr)
  1080. #define minix_set_bit(nr,addr) \
  1081. set_bit(nr,(unsigned long *)addr)
  1082. #define minix_test_and_clear_bit(nr,addr) \
  1083. test_and_clear_bit(nr,(unsigned long *)addr)
  1084. #define minix_test_bit(nr,addr) \
  1085. test_bit(nr,(unsigned long *)addr)
  1086. #define minix_find_first_zero_bit(addr,size) \
  1087. find_first_zero_bit(addr,size)
  1088. #endif /* __KERNEL__ */
  1089. #endif /* _S390_BITOPS_H */