bitops.h 29 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994
  1. #ifndef _S390_BITOPS_H
  2. #define _S390_BITOPS_H
  3. /*
  4. * include/asm-s390/bitops.h
  5. *
  6. * S390 version
  7. * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
  8. * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
  9. *
  10. * Derived from "include/asm-i386/bitops.h"
  11. * Copyright (C) 1992, Linus Torvalds
  12. *
  13. */
  14. #include <linux/compiler.h>
  15. /*
  16. * 32 bit bitops format:
  17. * bit 0 is the LSB of *addr; bit 31 is the MSB of *addr;
  18. * bit 32 is the LSB of *(addr+4). That combined with the
  19. * big endian byte order on S390 give the following bit
  20. * order in memory:
  21. * 1f 1e 1d 1c 1b 1a 19 18 17 16 15 14 13 12 11 10 \
  22. * 0f 0e 0d 0c 0b 0a 09 08 07 06 05 04 03 02 01 00
  23. * after that follows the next long with bit numbers
  24. * 3f 3e 3d 3c 3b 3a 39 38 37 36 35 34 33 32 31 30
  25. * 2f 2e 2d 2c 2b 2a 29 28 27 26 25 24 23 22 21 20
  26. * The reason for this bit ordering is the fact that
  27. * in the architecture independent code bits operations
  28. * of the form "flags |= (1 << bitnr)" are used INTERMIXED
  29. * with operation of the form "set_bit(bitnr, flags)".
  30. *
  31. * 64 bit bitops format:
  32. * bit 0 is the LSB of *addr; bit 63 is the MSB of *addr;
  33. * bit 64 is the LSB of *(addr+8). That combined with the
  34. * big endian byte order on S390 give the following bit
  35. * order in memory:
  36. * 3f 3e 3d 3c 3b 3a 39 38 37 36 35 34 33 32 31 30
  37. * 2f 2e 2d 2c 2b 2a 29 28 27 26 25 24 23 22 21 20
  38. * 1f 1e 1d 1c 1b 1a 19 18 17 16 15 14 13 12 11 10
  39. * 0f 0e 0d 0c 0b 0a 09 08 07 06 05 04 03 02 01 00
  40. * after that follows the next long with bit numbers
  41. * 7f 7e 7d 7c 7b 7a 79 78 77 76 75 74 73 72 71 70
  42. * 6f 6e 6d 6c 6b 6a 69 68 67 66 65 64 63 62 61 60
  43. * 5f 5e 5d 5c 5b 5a 59 58 57 56 55 54 53 52 51 50
  44. * 4f 4e 4d 4c 4b 4a 49 48 47 46 45 44 43 42 41 40
  45. * The reason for this bit ordering is the fact that
  46. * in the architecture independent code bits operations
  47. * of the form "flags |= (1 << bitnr)" are used INTERMIXED
  48. * with operation of the form "set_bit(bitnr, flags)".
  49. */
  50. /* set ALIGN_CS to 1 if the SMP safe bit operations should
  51. * align the address to 4 byte boundary. It seems to work
  52. * without the alignment.
  53. */
  54. #ifdef __KERNEL__
  55. #define ALIGN_CS 0
  56. #else
  57. #define ALIGN_CS 1
  58. #ifndef CONFIG_SMP
  59. #error "bitops won't work without CONFIG_SMP"
  60. #endif
  61. #endif
  62. /* bitmap tables from arch/S390/kernel/bitmap.S */
  63. extern const char _oi_bitmap[];
  64. extern const char _ni_bitmap[];
  65. extern const char _zb_findmap[];
  66. extern const char _sb_findmap[];
  67. #ifndef __s390x__
  68. #define __BITOPS_ALIGN 3
  69. #define __BITOPS_WORDSIZE 32
  70. #define __BITOPS_OR "or"
  71. #define __BITOPS_AND "nr"
  72. #define __BITOPS_XOR "xr"
  73. #define __BITOPS_LOOP(__old, __new, __addr, __val, __op_string) \
  74. __asm__ __volatile__(" l %0,0(%4)\n" \
  75. "0: lr %1,%0\n" \
  76. __op_string " %1,%3\n" \
  77. " cs %0,%1,0(%4)\n" \
  78. " jl 0b" \
  79. : "=&d" (__old), "=&d" (__new), \
  80. "=m" (*(unsigned long *) __addr) \
  81. : "d" (__val), "a" (__addr), \
  82. "m" (*(unsigned long *) __addr) : "cc" );
  83. #else /* __s390x__ */
  84. #define __BITOPS_ALIGN 7
  85. #define __BITOPS_WORDSIZE 64
  86. #define __BITOPS_OR "ogr"
  87. #define __BITOPS_AND "ngr"
  88. #define __BITOPS_XOR "xgr"
  89. #define __BITOPS_LOOP(__old, __new, __addr, __val, __op_string) \
  90. __asm__ __volatile__(" lg %0,0(%4)\n" \
  91. "0: lgr %1,%0\n" \
  92. __op_string " %1,%3\n" \
  93. " csg %0,%1,0(%4)\n" \
  94. " jl 0b" \
  95. : "=&d" (__old), "=&d" (__new), \
  96. "=m" (*(unsigned long *) __addr) \
  97. : "d" (__val), "a" (__addr), \
  98. "m" (*(unsigned long *) __addr) : "cc" );
  99. #endif /* __s390x__ */
  100. #define __BITOPS_WORDS(bits) (((bits)+__BITOPS_WORDSIZE-1)/__BITOPS_WORDSIZE)
  101. #define __BITOPS_BARRIER() __asm__ __volatile__ ( "" : : : "memory" )
  102. #ifdef CONFIG_SMP
  103. /*
  104. * SMP safe set_bit routine based on compare and swap (CS)
  105. */
  106. static inline void set_bit_cs(unsigned long nr, volatile unsigned long *ptr)
  107. {
  108. unsigned long addr, old, new, mask;
  109. addr = (unsigned long) ptr;
  110. #if ALIGN_CS == 1
  111. nr += (addr & __BITOPS_ALIGN) << 3; /* add alignment to bit number */
  112. addr ^= addr & __BITOPS_ALIGN; /* align address to 8 */
  113. #endif
  114. /* calculate address for CS */
  115. addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3;
  116. /* make OR mask */
  117. mask = 1UL << (nr & (__BITOPS_WORDSIZE - 1));
  118. /* Do the atomic update. */
  119. __BITOPS_LOOP(old, new, addr, mask, __BITOPS_OR);
  120. }
  121. /*
  122. * SMP safe clear_bit routine based on compare and swap (CS)
  123. */
  124. static inline void clear_bit_cs(unsigned long nr, volatile unsigned long *ptr)
  125. {
  126. unsigned long addr, old, new, mask;
  127. addr = (unsigned long) ptr;
  128. #if ALIGN_CS == 1
  129. nr += (addr & __BITOPS_ALIGN) << 3; /* add alignment to bit number */
  130. addr ^= addr & __BITOPS_ALIGN; /* align address to 8 */
  131. #endif
  132. /* calculate address for CS */
  133. addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3;
  134. /* make AND mask */
  135. mask = ~(1UL << (nr & (__BITOPS_WORDSIZE - 1)));
  136. /* Do the atomic update. */
  137. __BITOPS_LOOP(old, new, addr, mask, __BITOPS_AND);
  138. }
  139. /*
  140. * SMP safe change_bit routine based on compare and swap (CS)
  141. */
  142. static inline void change_bit_cs(unsigned long nr, volatile unsigned long *ptr)
  143. {
  144. unsigned long addr, old, new, mask;
  145. addr = (unsigned long) ptr;
  146. #if ALIGN_CS == 1
  147. nr += (addr & __BITOPS_ALIGN) << 3; /* add alignment to bit number */
  148. addr ^= addr & __BITOPS_ALIGN; /* align address to 8 */
  149. #endif
  150. /* calculate address for CS */
  151. addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3;
  152. /* make XOR mask */
  153. mask = 1UL << (nr & (__BITOPS_WORDSIZE - 1));
  154. /* Do the atomic update. */
  155. __BITOPS_LOOP(old, new, addr, mask, __BITOPS_XOR);
  156. }
  157. /*
  158. * SMP safe test_and_set_bit routine based on compare and swap (CS)
  159. */
  160. static inline int
  161. test_and_set_bit_cs(unsigned long nr, volatile unsigned long *ptr)
  162. {
  163. unsigned long addr, old, new, mask;
  164. addr = (unsigned long) ptr;
  165. #if ALIGN_CS == 1
  166. nr += (addr & __BITOPS_ALIGN) << 3; /* add alignment to bit number */
  167. addr ^= addr & __BITOPS_ALIGN; /* align address to 8 */
  168. #endif
  169. /* calculate address for CS */
  170. addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3;
  171. /* make OR/test mask */
  172. mask = 1UL << (nr & (__BITOPS_WORDSIZE - 1));
  173. /* Do the atomic update. */
  174. __BITOPS_LOOP(old, new, addr, mask, __BITOPS_OR);
  175. __BITOPS_BARRIER();
  176. return (old & mask) != 0;
  177. }
  178. /*
  179. * SMP safe test_and_clear_bit routine based on compare and swap (CS)
  180. */
  181. static inline int
  182. test_and_clear_bit_cs(unsigned long nr, volatile unsigned long *ptr)
  183. {
  184. unsigned long addr, old, new, mask;
  185. addr = (unsigned long) ptr;
  186. #if ALIGN_CS == 1
  187. nr += (addr & __BITOPS_ALIGN) << 3; /* add alignment to bit number */
  188. addr ^= addr & __BITOPS_ALIGN; /* align address to 8 */
  189. #endif
  190. /* calculate address for CS */
  191. addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3;
  192. /* make AND/test mask */
  193. mask = ~(1UL << (nr & (__BITOPS_WORDSIZE - 1)));
  194. /* Do the atomic update. */
  195. __BITOPS_LOOP(old, new, addr, mask, __BITOPS_AND);
  196. __BITOPS_BARRIER();
  197. return (old ^ new) != 0;
  198. }
  199. /*
  200. * SMP safe test_and_change_bit routine based on compare and swap (CS)
  201. */
  202. static inline int
  203. test_and_change_bit_cs(unsigned long nr, volatile unsigned long *ptr)
  204. {
  205. unsigned long addr, old, new, mask;
  206. addr = (unsigned long) ptr;
  207. #if ALIGN_CS == 1
  208. nr += (addr & __BITOPS_ALIGN) << 3; /* add alignment to bit number */
  209. addr ^= addr & __BITOPS_ALIGN; /* align address to 8 */
  210. #endif
  211. /* calculate address for CS */
  212. addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3;
  213. /* make XOR/test mask */
  214. mask = 1UL << (nr & (__BITOPS_WORDSIZE - 1));
  215. /* Do the atomic update. */
  216. __BITOPS_LOOP(old, new, addr, mask, __BITOPS_XOR);
  217. __BITOPS_BARRIER();
  218. return (old & mask) != 0;
  219. }
  220. #endif /* CONFIG_SMP */
  221. /*
  222. * fast, non-SMP set_bit routine
  223. */
  224. static inline void __set_bit(unsigned long nr, volatile unsigned long *ptr)
  225. {
  226. unsigned long addr;
  227. addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
  228. asm volatile("oc 0(1,%1),0(%2)"
  229. : "=m" (*(char *) addr)
  230. : "a" (addr), "a" (_oi_bitmap + (nr & 7)),
  231. "m" (*(char *) addr) : "cc" );
  232. }
  233. static inline void
  234. __constant_set_bit(const unsigned long nr, volatile unsigned long *ptr)
  235. {
  236. unsigned long addr;
  237. addr = ((unsigned long) ptr) + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
  238. switch (nr&7) {
  239. case 0:
  240. asm volatile ("oi 0(%1),0x01" : "=m" (*(char *) addr)
  241. : "a" (addr), "m" (*(char *) addr) : "cc" );
  242. break;
  243. case 1:
  244. asm volatile ("oi 0(%1),0x02" : "=m" (*(char *) addr)
  245. : "a" (addr), "m" (*(char *) addr) : "cc" );
  246. break;
  247. case 2:
  248. asm volatile ("oi 0(%1),0x04" : "=m" (*(char *) addr)
  249. : "a" (addr), "m" (*(char *) addr) : "cc" );
  250. break;
  251. case 3:
  252. asm volatile ("oi 0(%1),0x08" : "=m" (*(char *) addr)
  253. : "a" (addr), "m" (*(char *) addr) : "cc" );
  254. break;
  255. case 4:
  256. asm volatile ("oi 0(%1),0x10" : "=m" (*(char *) addr)
  257. : "a" (addr), "m" (*(char *) addr) : "cc" );
  258. break;
  259. case 5:
  260. asm volatile ("oi 0(%1),0x20" : "=m" (*(char *) addr)
  261. : "a" (addr), "m" (*(char *) addr) : "cc" );
  262. break;
  263. case 6:
  264. asm volatile ("oi 0(%1),0x40" : "=m" (*(char *) addr)
  265. : "a" (addr), "m" (*(char *) addr) : "cc" );
  266. break;
  267. case 7:
  268. asm volatile ("oi 0(%1),0x80" : "=m" (*(char *) addr)
  269. : "a" (addr), "m" (*(char *) addr) : "cc" );
  270. break;
  271. }
  272. }
  273. #define set_bit_simple(nr,addr) \
  274. (__builtin_constant_p((nr)) ? \
  275. __constant_set_bit((nr),(addr)) : \
  276. __set_bit((nr),(addr)) )
  277. /*
  278. * fast, non-SMP clear_bit routine
  279. */
  280. static inline void
  281. __clear_bit(unsigned long nr, volatile unsigned long *ptr)
  282. {
  283. unsigned long addr;
  284. addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
  285. asm volatile("nc 0(1,%1),0(%2)"
  286. : "=m" (*(char *) addr)
  287. : "a" (addr), "a" (_ni_bitmap + (nr & 7)),
  288. "m" (*(char *) addr) : "cc" );
  289. }
  290. static inline void
  291. __constant_clear_bit(const unsigned long nr, volatile unsigned long *ptr)
  292. {
  293. unsigned long addr;
  294. addr = ((unsigned long) ptr) + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
  295. switch (nr&7) {
  296. case 0:
  297. asm volatile ("ni 0(%1),0xFE" : "=m" (*(char *) addr)
  298. : "a" (addr), "m" (*(char *) addr) : "cc" );
  299. break;
  300. case 1:
  301. asm volatile ("ni 0(%1),0xFD": "=m" (*(char *) addr)
  302. : "a" (addr), "m" (*(char *) addr) : "cc" );
  303. break;
  304. case 2:
  305. asm volatile ("ni 0(%1),0xFB" : "=m" (*(char *) addr)
  306. : "a" (addr), "m" (*(char *) addr) : "cc" );
  307. break;
  308. case 3:
  309. asm volatile ("ni 0(%1),0xF7" : "=m" (*(char *) addr)
  310. : "a" (addr), "m" (*(char *) addr) : "cc" );
  311. break;
  312. case 4:
  313. asm volatile ("ni 0(%1),0xEF" : "=m" (*(char *) addr)
  314. : "a" (addr), "m" (*(char *) addr) : "cc" );
  315. break;
  316. case 5:
  317. asm volatile ("ni 0(%1),0xDF" : "=m" (*(char *) addr)
  318. : "a" (addr), "m" (*(char *) addr) : "cc" );
  319. break;
  320. case 6:
  321. asm volatile ("ni 0(%1),0xBF" : "=m" (*(char *) addr)
  322. : "a" (addr), "m" (*(char *) addr) : "cc" );
  323. break;
  324. case 7:
  325. asm volatile ("ni 0(%1),0x7F" : "=m" (*(char *) addr)
  326. : "a" (addr), "m" (*(char *) addr) : "cc" );
  327. break;
  328. }
  329. }
  330. #define clear_bit_simple(nr,addr) \
  331. (__builtin_constant_p((nr)) ? \
  332. __constant_clear_bit((nr),(addr)) : \
  333. __clear_bit((nr),(addr)) )
  334. /*
  335. * fast, non-SMP change_bit routine
  336. */
  337. static inline void __change_bit(unsigned long nr, volatile unsigned long *ptr)
  338. {
  339. unsigned long addr;
  340. addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
  341. asm volatile("xc 0(1,%1),0(%2)"
  342. : "=m" (*(char *) addr)
  343. : "a" (addr), "a" (_oi_bitmap + (nr & 7)),
  344. "m" (*(char *) addr) : "cc" );
  345. }
  346. static inline void
  347. __constant_change_bit(const unsigned long nr, volatile unsigned long *ptr)
  348. {
  349. unsigned long addr;
  350. addr = ((unsigned long) ptr) + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
  351. switch (nr&7) {
  352. case 0:
  353. asm volatile ("xi 0(%1),0x01" : "=m" (*(char *) addr)
  354. : "a" (addr), "m" (*(char *) addr) : "cc" );
  355. break;
  356. case 1:
  357. asm volatile ("xi 0(%1),0x02" : "=m" (*(char *) addr)
  358. : "a" (addr), "m" (*(char *) addr) : "cc" );
  359. break;
  360. case 2:
  361. asm volatile ("xi 0(%1),0x04" : "=m" (*(char *) addr)
  362. : "a" (addr), "m" (*(char *) addr) : "cc" );
  363. break;
  364. case 3:
  365. asm volatile ("xi 0(%1),0x08" : "=m" (*(char *) addr)
  366. : "a" (addr), "m" (*(char *) addr) : "cc" );
  367. break;
  368. case 4:
  369. asm volatile ("xi 0(%1),0x10" : "=m" (*(char *) addr)
  370. : "a" (addr), "m" (*(char *) addr) : "cc" );
  371. break;
  372. case 5:
  373. asm volatile ("xi 0(%1),0x20" : "=m" (*(char *) addr)
  374. : "a" (addr), "m" (*(char *) addr) : "cc" );
  375. break;
  376. case 6:
  377. asm volatile ("xi 0(%1),0x40" : "=m" (*(char *) addr)
  378. : "a" (addr), "m" (*(char *) addr) : "cc" );
  379. break;
  380. case 7:
  381. asm volatile ("xi 0(%1),0x80" : "=m" (*(char *) addr)
  382. : "a" (addr), "m" (*(char *) addr) : "cc" );
  383. break;
  384. }
  385. }
  386. #define change_bit_simple(nr,addr) \
  387. (__builtin_constant_p((nr)) ? \
  388. __constant_change_bit((nr),(addr)) : \
  389. __change_bit((nr),(addr)) )
  390. /*
  391. * fast, non-SMP test_and_set_bit routine
  392. */
  393. static inline int
  394. test_and_set_bit_simple(unsigned long nr, volatile unsigned long *ptr)
  395. {
  396. unsigned long addr;
  397. unsigned char ch;
  398. addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
  399. ch = *(unsigned char *) addr;
  400. asm volatile("oc 0(1,%1),0(%2)"
  401. : "=m" (*(char *) addr)
  402. : "a" (addr), "a" (_oi_bitmap + (nr & 7)),
  403. "m" (*(char *) addr) : "cc", "memory" );
  404. return (ch >> (nr & 7)) & 1;
  405. }
  406. #define __test_and_set_bit(X,Y) test_and_set_bit_simple(X,Y)
  407. /*
  408. * fast, non-SMP test_and_clear_bit routine
  409. */
  410. static inline int
  411. test_and_clear_bit_simple(unsigned long nr, volatile unsigned long *ptr)
  412. {
  413. unsigned long addr;
  414. unsigned char ch;
  415. addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
  416. ch = *(unsigned char *) addr;
  417. asm volatile("nc 0(1,%1),0(%2)"
  418. : "=m" (*(char *) addr)
  419. : "a" (addr), "a" (_ni_bitmap + (nr & 7)),
  420. "m" (*(char *) addr) : "cc", "memory" );
  421. return (ch >> (nr & 7)) & 1;
  422. }
  423. #define __test_and_clear_bit(X,Y) test_and_clear_bit_simple(X,Y)
  424. /*
  425. * fast, non-SMP test_and_change_bit routine
  426. */
  427. static inline int
  428. test_and_change_bit_simple(unsigned long nr, volatile unsigned long *ptr)
  429. {
  430. unsigned long addr;
  431. unsigned char ch;
  432. addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
  433. ch = *(unsigned char *) addr;
  434. asm volatile("xc 0(1,%1),0(%2)"
  435. : "=m" (*(char *) addr)
  436. : "a" (addr), "a" (_oi_bitmap + (nr & 7)),
  437. "m" (*(char *) addr) : "cc", "memory" );
  438. return (ch >> (nr & 7)) & 1;
  439. }
  440. #define __test_and_change_bit(X,Y) test_and_change_bit_simple(X,Y)
  441. #ifdef CONFIG_SMP
  442. #define set_bit set_bit_cs
  443. #define clear_bit clear_bit_cs
  444. #define change_bit change_bit_cs
  445. #define test_and_set_bit test_and_set_bit_cs
  446. #define test_and_clear_bit test_and_clear_bit_cs
  447. #define test_and_change_bit test_and_change_bit_cs
  448. #else
  449. #define set_bit set_bit_simple
  450. #define clear_bit clear_bit_simple
  451. #define change_bit change_bit_simple
  452. #define test_and_set_bit test_and_set_bit_simple
  453. #define test_and_clear_bit test_and_clear_bit_simple
  454. #define test_and_change_bit test_and_change_bit_simple
  455. #endif
  456. /*
  457. * This routine doesn't need to be atomic.
  458. */
  459. static inline int __test_bit(unsigned long nr, const volatile unsigned long *ptr)
  460. {
  461. unsigned long addr;
  462. unsigned char ch;
  463. addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
  464. ch = *(volatile unsigned char *) addr;
  465. return (ch >> (nr & 7)) & 1;
  466. }
  467. static inline int
  468. __constant_test_bit(unsigned long nr, const volatile unsigned long *addr) {
  469. return (((volatile char *) addr)
  470. [(nr^(__BITOPS_WORDSIZE-8))>>3] & (1<<(nr&7))) != 0;
  471. }
  472. #define test_bit(nr,addr) \
  473. (__builtin_constant_p((nr)) ? \
  474. __constant_test_bit((nr),(addr)) : \
  475. __test_bit((nr),(addr)) )
  476. /*
  477. * ffz = Find First Zero in word. Undefined if no zero exists,
  478. * so code should check against ~0UL first..
  479. */
  480. static inline unsigned long ffz(unsigned long word)
  481. {
  482. unsigned long bit = 0;
  483. #ifdef __s390x__
  484. if (likely((word & 0xffffffff) == 0xffffffff)) {
  485. word >>= 32;
  486. bit += 32;
  487. }
  488. #endif
  489. if (likely((word & 0xffff) == 0xffff)) {
  490. word >>= 16;
  491. bit += 16;
  492. }
  493. if (likely((word & 0xff) == 0xff)) {
  494. word >>= 8;
  495. bit += 8;
  496. }
  497. return bit + _zb_findmap[word & 0xff];
  498. }
  499. /*
  500. * __ffs = find first bit in word. Undefined if no bit exists,
  501. * so code should check against 0UL first..
  502. */
  503. static inline unsigned long __ffs (unsigned long word)
  504. {
  505. unsigned long bit = 0;
  506. #ifdef __s390x__
  507. if (likely((word & 0xffffffff) == 0)) {
  508. word >>= 32;
  509. bit += 32;
  510. }
  511. #endif
  512. if (likely((word & 0xffff) == 0)) {
  513. word >>= 16;
  514. bit += 16;
  515. }
  516. if (likely((word & 0xff) == 0)) {
  517. word >>= 8;
  518. bit += 8;
  519. }
  520. return bit + _sb_findmap[word & 0xff];
  521. }
  522. /*
  523. * Find-bit routines..
  524. */
  525. #ifndef __s390x__
  526. static inline int
  527. find_first_zero_bit(const unsigned long * addr, unsigned long size)
  528. {
  529. typedef struct { long _[__BITOPS_WORDS(size)]; } addrtype;
  530. unsigned long cmp, count;
  531. unsigned int res;
  532. if (!size)
  533. return 0;
  534. __asm__(" lhi %1,-1\n"
  535. " lr %2,%3\n"
  536. " slr %0,%0\n"
  537. " ahi %2,31\n"
  538. " srl %2,5\n"
  539. "0: c %1,0(%0,%4)\n"
  540. " jne 1f\n"
  541. " la %0,4(%0)\n"
  542. " brct %2,0b\n"
  543. " lr %0,%3\n"
  544. " j 4f\n"
  545. "1: l %2,0(%0,%4)\n"
  546. " sll %0,3\n"
  547. " lhi %1,0xff\n"
  548. " tml %2,0xffff\n"
  549. " jno 2f\n"
  550. " ahi %0,16\n"
  551. " srl %2,16\n"
  552. "2: tml %2,0x00ff\n"
  553. " jno 3f\n"
  554. " ahi %0,8\n"
  555. " srl %2,8\n"
  556. "3: nr %2,%1\n"
  557. " ic %2,0(%2,%5)\n"
  558. " alr %0,%2\n"
  559. "4:"
  560. : "=&a" (res), "=&d" (cmp), "=&a" (count)
  561. : "a" (size), "a" (addr), "a" (&_zb_findmap),
  562. "m" (*(addrtype *) addr) : "cc" );
  563. return (res < size) ? res : size;
  564. }
  565. static inline int
  566. find_first_bit(const unsigned long * addr, unsigned long size)
  567. {
  568. typedef struct { long _[__BITOPS_WORDS(size)]; } addrtype;
  569. unsigned long cmp, count;
  570. unsigned int res;
  571. if (!size)
  572. return 0;
  573. __asm__(" slr %1,%1\n"
  574. " lr %2,%3\n"
  575. " slr %0,%0\n"
  576. " ahi %2,31\n"
  577. " srl %2,5\n"
  578. "0: c %1,0(%0,%4)\n"
  579. " jne 1f\n"
  580. " la %0,4(%0)\n"
  581. " brct %2,0b\n"
  582. " lr %0,%3\n"
  583. " j 4f\n"
  584. "1: l %2,0(%0,%4)\n"
  585. " sll %0,3\n"
  586. " lhi %1,0xff\n"
  587. " tml %2,0xffff\n"
  588. " jnz 2f\n"
  589. " ahi %0,16\n"
  590. " srl %2,16\n"
  591. "2: tml %2,0x00ff\n"
  592. " jnz 3f\n"
  593. " ahi %0,8\n"
  594. " srl %2,8\n"
  595. "3: nr %2,%1\n"
  596. " ic %2,0(%2,%5)\n"
  597. " alr %0,%2\n"
  598. "4:"
  599. : "=&a" (res), "=&d" (cmp), "=&a" (count)
  600. : "a" (size), "a" (addr), "a" (&_sb_findmap),
  601. "m" (*(addrtype *) addr) : "cc" );
  602. return (res < size) ? res : size;
  603. }
  604. #else /* __s390x__ */
  605. static inline unsigned long
  606. find_first_zero_bit(const unsigned long * addr, unsigned long size)
  607. {
  608. typedef struct { long _[__BITOPS_WORDS(size)]; } addrtype;
  609. unsigned long res, cmp, count;
  610. if (!size)
  611. return 0;
  612. __asm__(" lghi %1,-1\n"
  613. " lgr %2,%3\n"
  614. " slgr %0,%0\n"
  615. " aghi %2,63\n"
  616. " srlg %2,%2,6\n"
  617. "0: cg %1,0(%0,%4)\n"
  618. " jne 1f\n"
  619. " la %0,8(%0)\n"
  620. " brct %2,0b\n"
  621. " lgr %0,%3\n"
  622. " j 5f\n"
  623. "1: lg %2,0(%0,%4)\n"
  624. " sllg %0,%0,3\n"
  625. " clr %2,%1\n"
  626. " jne 2f\n"
  627. " aghi %0,32\n"
  628. " srlg %2,%2,32\n"
  629. "2: lghi %1,0xff\n"
  630. " tmll %2,0xffff\n"
  631. " jno 3f\n"
  632. " aghi %0,16\n"
  633. " srl %2,16\n"
  634. "3: tmll %2,0x00ff\n"
  635. " jno 4f\n"
  636. " aghi %0,8\n"
  637. " srl %2,8\n"
  638. "4: ngr %2,%1\n"
  639. " ic %2,0(%2,%5)\n"
  640. " algr %0,%2\n"
  641. "5:"
  642. : "=&a" (res), "=&d" (cmp), "=&a" (count)
  643. : "a" (size), "a" (addr), "a" (&_zb_findmap),
  644. "m" (*(addrtype *) addr) : "cc" );
  645. return (res < size) ? res : size;
  646. }
  647. static inline unsigned long
  648. find_first_bit(const unsigned long * addr, unsigned long size)
  649. {
  650. typedef struct { long _[__BITOPS_WORDS(size)]; } addrtype;
  651. unsigned long res, cmp, count;
  652. if (!size)
  653. return 0;
  654. __asm__(" slgr %1,%1\n"
  655. " lgr %2,%3\n"
  656. " slgr %0,%0\n"
  657. " aghi %2,63\n"
  658. " srlg %2,%2,6\n"
  659. "0: cg %1,0(%0,%4)\n"
  660. " jne 1f\n"
  661. " aghi %0,8\n"
  662. " brct %2,0b\n"
  663. " lgr %0,%3\n"
  664. " j 5f\n"
  665. "1: lg %2,0(%0,%4)\n"
  666. " sllg %0,%0,3\n"
  667. " clr %2,%1\n"
  668. " jne 2f\n"
  669. " aghi %0,32\n"
  670. " srlg %2,%2,32\n"
  671. "2: lghi %1,0xff\n"
  672. " tmll %2,0xffff\n"
  673. " jnz 3f\n"
  674. " aghi %0,16\n"
  675. " srl %2,16\n"
  676. "3: tmll %2,0x00ff\n"
  677. " jnz 4f\n"
  678. " aghi %0,8\n"
  679. " srl %2,8\n"
  680. "4: ngr %2,%1\n"
  681. " ic %2,0(%2,%5)\n"
  682. " algr %0,%2\n"
  683. "5:"
  684. : "=&a" (res), "=&d" (cmp), "=&a" (count)
  685. : "a" (size), "a" (addr), "a" (&_sb_findmap),
  686. "m" (*(addrtype *) addr) : "cc" );
  687. return (res < size) ? res : size;
  688. }
  689. #endif /* __s390x__ */
  690. static inline int
  691. find_next_zero_bit (const unsigned long * addr, unsigned long size,
  692. unsigned long offset)
  693. {
  694. const unsigned long *p;
  695. unsigned long bit, set;
  696. if (offset >= size)
  697. return size;
  698. bit = offset & (__BITOPS_WORDSIZE - 1);
  699. offset -= bit;
  700. size -= offset;
  701. p = addr + offset / __BITOPS_WORDSIZE;
  702. if (bit) {
  703. /*
  704. * s390 version of ffz returns __BITOPS_WORDSIZE
  705. * if no zero bit is present in the word.
  706. */
  707. set = ffz(*p >> bit) + bit;
  708. if (set >= size)
  709. return size + offset;
  710. if (set < __BITOPS_WORDSIZE)
  711. return set + offset;
  712. offset += __BITOPS_WORDSIZE;
  713. size -= __BITOPS_WORDSIZE;
  714. p++;
  715. }
  716. return offset + find_first_zero_bit(p, size);
  717. }
  718. static inline int
  719. find_next_bit (const unsigned long * addr, unsigned long size,
  720. unsigned long offset)
  721. {
  722. const unsigned long *p;
  723. unsigned long bit, set;
  724. if (offset >= size)
  725. return size;
  726. bit = offset & (__BITOPS_WORDSIZE - 1);
  727. offset -= bit;
  728. size -= offset;
  729. p = addr + offset / __BITOPS_WORDSIZE;
  730. if (bit) {
  731. /*
  732. * s390 version of __ffs returns __BITOPS_WORDSIZE
  733. * if no one bit is present in the word.
  734. */
  735. set = __ffs(*p & (~0UL << bit));
  736. if (set >= size)
  737. return size + offset;
  738. if (set < __BITOPS_WORDSIZE)
  739. return set + offset;
  740. offset += __BITOPS_WORDSIZE;
  741. size -= __BITOPS_WORDSIZE;
  742. p++;
  743. }
  744. return offset + find_first_bit(p, size);
  745. }
  746. /*
  747. * Every architecture must define this function. It's the fastest
  748. * way of searching a 140-bit bitmap where the first 100 bits are
  749. * unlikely to be set. It's guaranteed that at least one of the 140
  750. * bits is cleared.
  751. */
  752. static inline int sched_find_first_bit(unsigned long *b)
  753. {
  754. return find_first_bit(b, 140);
  755. }
  756. #include <asm-generic/bitops/ffs.h>
  757. #include <asm-generic/bitops/fls.h>
  758. #include <asm-generic/bitops/fls64.h>
  759. #include <asm-generic/bitops/hweight.h>
  760. #ifdef __KERNEL__
  761. /*
  762. * ATTENTION: intel byte ordering convention for ext2 and minix !!
  763. * bit 0 is the LSB of addr; bit 31 is the MSB of addr;
  764. * bit 32 is the LSB of (addr+4).
  765. * That combined with the little endian byte order of Intel gives the
  766. * following bit order in memory:
  767. * 07 06 05 04 03 02 01 00 15 14 13 12 11 10 09 08 \
  768. * 23 22 21 20 19 18 17 16 31 30 29 28 27 26 25 24
  769. */
  770. #define ext2_set_bit(nr, addr) \
  771. __test_and_set_bit((nr)^(__BITOPS_WORDSIZE - 8), (unsigned long *)addr)
  772. #define ext2_set_bit_atomic(lock, nr, addr) \
  773. test_and_set_bit((nr)^(__BITOPS_WORDSIZE - 8), (unsigned long *)addr)
  774. #define ext2_clear_bit(nr, addr) \
  775. __test_and_clear_bit((nr)^(__BITOPS_WORDSIZE - 8), (unsigned long *)addr)
  776. #define ext2_clear_bit_atomic(lock, nr, addr) \
  777. test_and_clear_bit((nr)^(__BITOPS_WORDSIZE - 8), (unsigned long *)addr)
  778. #define ext2_test_bit(nr, addr) \
  779. test_bit((nr)^(__BITOPS_WORDSIZE - 8), (unsigned long *)addr)
  780. #ifndef __s390x__
  781. static inline int
  782. ext2_find_first_zero_bit(void *vaddr, unsigned int size)
  783. {
  784. typedef struct { long _[__BITOPS_WORDS(size)]; } addrtype;
  785. unsigned long cmp, count;
  786. unsigned int res;
  787. if (!size)
  788. return 0;
  789. __asm__(" lhi %1,-1\n"
  790. " lr %2,%3\n"
  791. " ahi %2,31\n"
  792. " srl %2,5\n"
  793. " slr %0,%0\n"
  794. "0: cl %1,0(%0,%4)\n"
  795. " jne 1f\n"
  796. " ahi %0,4\n"
  797. " brct %2,0b\n"
  798. " lr %0,%3\n"
  799. " j 4f\n"
  800. "1: l %2,0(%0,%4)\n"
  801. " sll %0,3\n"
  802. " ahi %0,24\n"
  803. " lhi %1,0xff\n"
  804. " tmh %2,0xffff\n"
  805. " jo 2f\n"
  806. " ahi %0,-16\n"
  807. " srl %2,16\n"
  808. "2: tml %2,0xff00\n"
  809. " jo 3f\n"
  810. " ahi %0,-8\n"
  811. " srl %2,8\n"
  812. "3: nr %2,%1\n"
  813. " ic %2,0(%2,%5)\n"
  814. " alr %0,%2\n"
  815. "4:"
  816. : "=&a" (res), "=&d" (cmp), "=&a" (count)
  817. : "a" (size), "a" (vaddr), "a" (&_zb_findmap),
  818. "m" (*(addrtype *) vaddr) : "cc" );
  819. return (res < size) ? res : size;
  820. }
  821. #else /* __s390x__ */
  822. static inline unsigned long
  823. ext2_find_first_zero_bit(void *vaddr, unsigned long size)
  824. {
  825. typedef struct { long _[__BITOPS_WORDS(size)]; } addrtype;
  826. unsigned long res, cmp, count;
  827. if (!size)
  828. return 0;
  829. __asm__(" lghi %1,-1\n"
  830. " lgr %2,%3\n"
  831. " aghi %2,63\n"
  832. " srlg %2,%2,6\n"
  833. " slgr %0,%0\n"
  834. "0: clg %1,0(%0,%4)\n"
  835. " jne 1f\n"
  836. " aghi %0,8\n"
  837. " brct %2,0b\n"
  838. " lgr %0,%3\n"
  839. " j 5f\n"
  840. "1: cl %1,0(%0,%4)\n"
  841. " jne 2f\n"
  842. " aghi %0,4\n"
  843. "2: l %2,0(%0,%4)\n"
  844. " sllg %0,%0,3\n"
  845. " aghi %0,24\n"
  846. " lghi %1,0xff\n"
  847. " tmlh %2,0xffff\n"
  848. " jo 3f\n"
  849. " aghi %0,-16\n"
  850. " srl %2,16\n"
  851. "3: tmll %2,0xff00\n"
  852. " jo 4f\n"
  853. " aghi %0,-8\n"
  854. " srl %2,8\n"
  855. "4: ngr %2,%1\n"
  856. " ic %2,0(%2,%5)\n"
  857. " algr %0,%2\n"
  858. "5:"
  859. : "=&a" (res), "=&d" (cmp), "=&a" (count)
  860. : "a" (size), "a" (vaddr), "a" (&_zb_findmap),
  861. "m" (*(addrtype *) vaddr) : "cc" );
  862. return (res < size) ? res : size;
  863. }
  864. #endif /* __s390x__ */
  865. static inline int
  866. ext2_find_next_zero_bit(void *vaddr, unsigned long size, unsigned long offset)
  867. {
  868. unsigned long *addr = vaddr, *p;
  869. unsigned long word, bit, set;
  870. if (offset >= size)
  871. return size;
  872. bit = offset & (__BITOPS_WORDSIZE - 1);
  873. offset -= bit;
  874. size -= offset;
  875. p = addr + offset / __BITOPS_WORDSIZE;
  876. if (bit) {
  877. #ifndef __s390x__
  878. asm(" ic %0,0(%1)\n"
  879. " icm %0,2,1(%1)\n"
  880. " icm %0,4,2(%1)\n"
  881. " icm %0,8,3(%1)"
  882. : "=&a" (word) : "a" (p), "m" (*p) : "cc" );
  883. #else
  884. asm(" lrvg %0,%1" : "=a" (word) : "m" (*p) );
  885. #endif
  886. /*
  887. * s390 version of ffz returns __BITOPS_WORDSIZE
  888. * if no zero bit is present in the word.
  889. */
  890. set = ffz(word >> bit) + bit;
  891. if (set >= size)
  892. return size + offset;
  893. if (set < __BITOPS_WORDSIZE)
  894. return set + offset;
  895. offset += __BITOPS_WORDSIZE;
  896. size -= __BITOPS_WORDSIZE;
  897. p++;
  898. }
  899. return offset + ext2_find_first_zero_bit(p, size);
  900. }
  901. #include <asm-generic/bitops/minix.h>
  902. #endif /* __KERNEL__ */
  903. #endif /* _S390_BITOPS_H */