bitops.h 29 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995
  1. #ifndef _S390_BITOPS_H
  2. #define _S390_BITOPS_H
  3. /*
  4. * include/asm-s390/bitops.h
  5. *
  6. * S390 version
  7. * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
  8. * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
  9. *
  10. * Derived from "include/asm-i386/bitops.h"
  11. * Copyright (C) 1992, Linus Torvalds
  12. *
  13. */
  14. #include <linux/config.h>
  15. #include <linux/compiler.h>
  16. /*
  17. * 32 bit bitops format:
  18. * bit 0 is the LSB of *addr; bit 31 is the MSB of *addr;
  19. * bit 32 is the LSB of *(addr+4). That combined with the
  20. * big endian byte order on S390 give the following bit
  21. * order in memory:
  22. * 1f 1e 1d 1c 1b 1a 19 18 17 16 15 14 13 12 11 10 \
  23. * 0f 0e 0d 0c 0b 0a 09 08 07 06 05 04 03 02 01 00
  24. * after that follows the next long with bit numbers
  25. * 3f 3e 3d 3c 3b 3a 39 38 37 36 35 34 33 32 31 30
  26. * 2f 2e 2d 2c 2b 2a 29 28 27 26 25 24 23 22 21 20
  27. * The reason for this bit ordering is the fact that
  28. * in the architecture independent code bits operations
  29. * of the form "flags |= (1 << bitnr)" are used INTERMIXED
  30. * with operation of the form "set_bit(bitnr, flags)".
  31. *
  32. * 64 bit bitops format:
  33. * bit 0 is the LSB of *addr; bit 63 is the MSB of *addr;
  34. * bit 64 is the LSB of *(addr+8). That combined with the
  35. * big endian byte order on S390 give the following bit
  36. * order in memory:
  37. * 3f 3e 3d 3c 3b 3a 39 38 37 36 35 34 33 32 31 30
  38. * 2f 2e 2d 2c 2b 2a 29 28 27 26 25 24 23 22 21 20
  39. * 1f 1e 1d 1c 1b 1a 19 18 17 16 15 14 13 12 11 10
  40. * 0f 0e 0d 0c 0b 0a 09 08 07 06 05 04 03 02 01 00
  41. * after that follows the next long with bit numbers
  42. * 7f 7e 7d 7c 7b 7a 79 78 77 76 75 74 73 72 71 70
  43. * 6f 6e 6d 6c 6b 6a 69 68 67 66 65 64 63 62 61 60
  44. * 5f 5e 5d 5c 5b 5a 59 58 57 56 55 54 53 52 51 50
  45. * 4f 4e 4d 4c 4b 4a 49 48 47 46 45 44 43 42 41 40
  46. * The reason for this bit ordering is the fact that
  47. * in the architecture independent code bits operations
  48. * of the form "flags |= (1 << bitnr)" are used INTERMIXED
  49. * with operation of the form "set_bit(bitnr, flags)".
  50. */
  51. /* set ALIGN_CS to 1 if the SMP safe bit operations should
  52. * align the address to 4 byte boundary. It seems to work
  53. * without the alignment.
  54. */
  55. #ifdef __KERNEL__
  56. #define ALIGN_CS 0
  57. #else
  58. #define ALIGN_CS 1
  59. #ifndef CONFIG_SMP
  60. #error "bitops won't work without CONFIG_SMP"
  61. #endif
  62. #endif
  63. /* bitmap tables from arch/S390/kernel/bitmap.S */
  64. extern const char _oi_bitmap[];
  65. extern const char _ni_bitmap[];
  66. extern const char _zb_findmap[];
  67. extern const char _sb_findmap[];
  68. #ifndef __s390x__
  69. #define __BITOPS_ALIGN 3
  70. #define __BITOPS_WORDSIZE 32
  71. #define __BITOPS_OR "or"
  72. #define __BITOPS_AND "nr"
  73. #define __BITOPS_XOR "xr"
  74. #define __BITOPS_LOOP(__old, __new, __addr, __val, __op_string) \
  75. __asm__ __volatile__(" l %0,0(%4)\n" \
  76. "0: lr %1,%0\n" \
  77. __op_string " %1,%3\n" \
  78. " cs %0,%1,0(%4)\n" \
  79. " jl 0b" \
  80. : "=&d" (__old), "=&d" (__new), \
  81. "=m" (*(unsigned long *) __addr) \
  82. : "d" (__val), "a" (__addr), \
  83. "m" (*(unsigned long *) __addr) : "cc" );
  84. #else /* __s390x__ */
  85. #define __BITOPS_ALIGN 7
  86. #define __BITOPS_WORDSIZE 64
  87. #define __BITOPS_OR "ogr"
  88. #define __BITOPS_AND "ngr"
  89. #define __BITOPS_XOR "xgr"
  90. #define __BITOPS_LOOP(__old, __new, __addr, __val, __op_string) \
  91. __asm__ __volatile__(" lg %0,0(%4)\n" \
  92. "0: lgr %1,%0\n" \
  93. __op_string " %1,%3\n" \
  94. " csg %0,%1,0(%4)\n" \
  95. " jl 0b" \
  96. : "=&d" (__old), "=&d" (__new), \
  97. "=m" (*(unsigned long *) __addr) \
  98. : "d" (__val), "a" (__addr), \
  99. "m" (*(unsigned long *) __addr) : "cc" );
  100. #endif /* __s390x__ */
  101. #define __BITOPS_WORDS(bits) (((bits)+__BITOPS_WORDSIZE-1)/__BITOPS_WORDSIZE)
  102. #define __BITOPS_BARRIER() __asm__ __volatile__ ( "" : : : "memory" )
  103. #ifdef CONFIG_SMP
  104. /*
  105. * SMP safe set_bit routine based on compare and swap (CS)
  106. */
  107. static inline void set_bit_cs(unsigned long nr, volatile unsigned long *ptr)
  108. {
  109. unsigned long addr, old, new, mask;
  110. addr = (unsigned long) ptr;
  111. #if ALIGN_CS == 1
  112. nr += (addr & __BITOPS_ALIGN) << 3; /* add alignment to bit number */
  113. addr ^= addr & __BITOPS_ALIGN; /* align address to 8 */
  114. #endif
  115. /* calculate address for CS */
  116. addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3;
  117. /* make OR mask */
  118. mask = 1UL << (nr & (__BITOPS_WORDSIZE - 1));
  119. /* Do the atomic update. */
  120. __BITOPS_LOOP(old, new, addr, mask, __BITOPS_OR);
  121. }
  122. /*
  123. * SMP safe clear_bit routine based on compare and swap (CS)
  124. */
  125. static inline void clear_bit_cs(unsigned long nr, volatile unsigned long *ptr)
  126. {
  127. unsigned long addr, old, new, mask;
  128. addr = (unsigned long) ptr;
  129. #if ALIGN_CS == 1
  130. nr += (addr & __BITOPS_ALIGN) << 3; /* add alignment to bit number */
  131. addr ^= addr & __BITOPS_ALIGN; /* align address to 8 */
  132. #endif
  133. /* calculate address for CS */
  134. addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3;
  135. /* make AND mask */
  136. mask = ~(1UL << (nr & (__BITOPS_WORDSIZE - 1)));
  137. /* Do the atomic update. */
  138. __BITOPS_LOOP(old, new, addr, mask, __BITOPS_AND);
  139. }
  140. /*
  141. * SMP safe change_bit routine based on compare and swap (CS)
  142. */
  143. static inline void change_bit_cs(unsigned long nr, volatile unsigned long *ptr)
  144. {
  145. unsigned long addr, old, new, mask;
  146. addr = (unsigned long) ptr;
  147. #if ALIGN_CS == 1
  148. nr += (addr & __BITOPS_ALIGN) << 3; /* add alignment to bit number */
  149. addr ^= addr & __BITOPS_ALIGN; /* align address to 8 */
  150. #endif
  151. /* calculate address for CS */
  152. addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3;
  153. /* make XOR mask */
  154. mask = 1UL << (nr & (__BITOPS_WORDSIZE - 1));
  155. /* Do the atomic update. */
  156. __BITOPS_LOOP(old, new, addr, mask, __BITOPS_XOR);
  157. }
  158. /*
  159. * SMP safe test_and_set_bit routine based on compare and swap (CS)
  160. */
  161. static inline int
  162. test_and_set_bit_cs(unsigned long nr, volatile unsigned long *ptr)
  163. {
  164. unsigned long addr, old, new, mask;
  165. addr = (unsigned long) ptr;
  166. #if ALIGN_CS == 1
  167. nr += (addr & __BITOPS_ALIGN) << 3; /* add alignment to bit number */
  168. addr ^= addr & __BITOPS_ALIGN; /* align address to 8 */
  169. #endif
  170. /* calculate address for CS */
  171. addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3;
  172. /* make OR/test mask */
  173. mask = 1UL << (nr & (__BITOPS_WORDSIZE - 1));
  174. /* Do the atomic update. */
  175. __BITOPS_LOOP(old, new, addr, mask, __BITOPS_OR);
  176. __BITOPS_BARRIER();
  177. return (old & mask) != 0;
  178. }
  179. /*
  180. * SMP safe test_and_clear_bit routine based on compare and swap (CS)
  181. */
  182. static inline int
  183. test_and_clear_bit_cs(unsigned long nr, volatile unsigned long *ptr)
  184. {
  185. unsigned long addr, old, new, mask;
  186. addr = (unsigned long) ptr;
  187. #if ALIGN_CS == 1
  188. nr += (addr & __BITOPS_ALIGN) << 3; /* add alignment to bit number */
  189. addr ^= addr & __BITOPS_ALIGN; /* align address to 8 */
  190. #endif
  191. /* calculate address for CS */
  192. addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3;
  193. /* make AND/test mask */
  194. mask = ~(1UL << (nr & (__BITOPS_WORDSIZE - 1)));
  195. /* Do the atomic update. */
  196. __BITOPS_LOOP(old, new, addr, mask, __BITOPS_AND);
  197. __BITOPS_BARRIER();
  198. return (old ^ new) != 0;
  199. }
  200. /*
  201. * SMP safe test_and_change_bit routine based on compare and swap (CS)
  202. */
  203. static inline int
  204. test_and_change_bit_cs(unsigned long nr, volatile unsigned long *ptr)
  205. {
  206. unsigned long addr, old, new, mask;
  207. addr = (unsigned long) ptr;
  208. #if ALIGN_CS == 1
  209. nr += (addr & __BITOPS_ALIGN) << 3; /* add alignment to bit number */
  210. addr ^= addr & __BITOPS_ALIGN; /* align address to 8 */
  211. #endif
  212. /* calculate address for CS */
  213. addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3;
  214. /* make XOR/test mask */
  215. mask = 1UL << (nr & (__BITOPS_WORDSIZE - 1));
  216. /* Do the atomic update. */
  217. __BITOPS_LOOP(old, new, addr, mask, __BITOPS_XOR);
  218. __BITOPS_BARRIER();
  219. return (old & mask) != 0;
  220. }
  221. #endif /* CONFIG_SMP */
  222. /*
  223. * fast, non-SMP set_bit routine
  224. */
  225. static inline void __set_bit(unsigned long nr, volatile unsigned long *ptr)
  226. {
  227. unsigned long addr;
  228. addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
  229. asm volatile("oc 0(1,%1),0(%2)"
  230. : "=m" (*(char *) addr)
  231. : "a" (addr), "a" (_oi_bitmap + (nr & 7)),
  232. "m" (*(char *) addr) : "cc" );
  233. }
  234. static inline void
  235. __constant_set_bit(const unsigned long nr, volatile unsigned long *ptr)
  236. {
  237. unsigned long addr;
  238. addr = ((unsigned long) ptr) + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
  239. switch (nr&7) {
  240. case 0:
  241. asm volatile ("oi 0(%1),0x01" : "=m" (*(char *) addr)
  242. : "a" (addr), "m" (*(char *) addr) : "cc" );
  243. break;
  244. case 1:
  245. asm volatile ("oi 0(%1),0x02" : "=m" (*(char *) addr)
  246. : "a" (addr), "m" (*(char *) addr) : "cc" );
  247. break;
  248. case 2:
  249. asm volatile ("oi 0(%1),0x04" : "=m" (*(char *) addr)
  250. : "a" (addr), "m" (*(char *) addr) : "cc" );
  251. break;
  252. case 3:
  253. asm volatile ("oi 0(%1),0x08" : "=m" (*(char *) addr)
  254. : "a" (addr), "m" (*(char *) addr) : "cc" );
  255. break;
  256. case 4:
  257. asm volatile ("oi 0(%1),0x10" : "=m" (*(char *) addr)
  258. : "a" (addr), "m" (*(char *) addr) : "cc" );
  259. break;
  260. case 5:
  261. asm volatile ("oi 0(%1),0x20" : "=m" (*(char *) addr)
  262. : "a" (addr), "m" (*(char *) addr) : "cc" );
  263. break;
  264. case 6:
  265. asm volatile ("oi 0(%1),0x40" : "=m" (*(char *) addr)
  266. : "a" (addr), "m" (*(char *) addr) : "cc" );
  267. break;
  268. case 7:
  269. asm volatile ("oi 0(%1),0x80" : "=m" (*(char *) addr)
  270. : "a" (addr), "m" (*(char *) addr) : "cc" );
  271. break;
  272. }
  273. }
  274. #define set_bit_simple(nr,addr) \
  275. (__builtin_constant_p((nr)) ? \
  276. __constant_set_bit((nr),(addr)) : \
  277. __set_bit((nr),(addr)) )
  278. /*
  279. * fast, non-SMP clear_bit routine
  280. */
  281. static inline void
  282. __clear_bit(unsigned long nr, volatile unsigned long *ptr)
  283. {
  284. unsigned long addr;
  285. addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
  286. asm volatile("nc 0(1,%1),0(%2)"
  287. : "=m" (*(char *) addr)
  288. : "a" (addr), "a" (_ni_bitmap + (nr & 7)),
  289. "m" (*(char *) addr) : "cc" );
  290. }
  291. static inline void
  292. __constant_clear_bit(const unsigned long nr, volatile unsigned long *ptr)
  293. {
  294. unsigned long addr;
  295. addr = ((unsigned long) ptr) + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
  296. switch (nr&7) {
  297. case 0:
  298. asm volatile ("ni 0(%1),0xFE" : "=m" (*(char *) addr)
  299. : "a" (addr), "m" (*(char *) addr) : "cc" );
  300. break;
  301. case 1:
  302. asm volatile ("ni 0(%1),0xFD": "=m" (*(char *) addr)
  303. : "a" (addr), "m" (*(char *) addr) : "cc" );
  304. break;
  305. case 2:
  306. asm volatile ("ni 0(%1),0xFB" : "=m" (*(char *) addr)
  307. : "a" (addr), "m" (*(char *) addr) : "cc" );
  308. break;
  309. case 3:
  310. asm volatile ("ni 0(%1),0xF7" : "=m" (*(char *) addr)
  311. : "a" (addr), "m" (*(char *) addr) : "cc" );
  312. break;
  313. case 4:
  314. asm volatile ("ni 0(%1),0xEF" : "=m" (*(char *) addr)
  315. : "a" (addr), "m" (*(char *) addr) : "cc" );
  316. break;
  317. case 5:
  318. asm volatile ("ni 0(%1),0xDF" : "=m" (*(char *) addr)
  319. : "a" (addr), "m" (*(char *) addr) : "cc" );
  320. break;
  321. case 6:
  322. asm volatile ("ni 0(%1),0xBF" : "=m" (*(char *) addr)
  323. : "a" (addr), "m" (*(char *) addr) : "cc" );
  324. break;
  325. case 7:
  326. asm volatile ("ni 0(%1),0x7F" : "=m" (*(char *) addr)
  327. : "a" (addr), "m" (*(char *) addr) : "cc" );
  328. break;
  329. }
  330. }
  331. #define clear_bit_simple(nr,addr) \
  332. (__builtin_constant_p((nr)) ? \
  333. __constant_clear_bit((nr),(addr)) : \
  334. __clear_bit((nr),(addr)) )
  335. /*
  336. * fast, non-SMP change_bit routine
  337. */
  338. static inline void __change_bit(unsigned long nr, volatile unsigned long *ptr)
  339. {
  340. unsigned long addr;
  341. addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
  342. asm volatile("xc 0(1,%1),0(%2)"
  343. : "=m" (*(char *) addr)
  344. : "a" (addr), "a" (_oi_bitmap + (nr & 7)),
  345. "m" (*(char *) addr) : "cc" );
  346. }
  347. static inline void
  348. __constant_change_bit(const unsigned long nr, volatile unsigned long *ptr)
  349. {
  350. unsigned long addr;
  351. addr = ((unsigned long) ptr) + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
  352. switch (nr&7) {
  353. case 0:
  354. asm volatile ("xi 0(%1),0x01" : "=m" (*(char *) addr)
  355. : "a" (addr), "m" (*(char *) addr) : "cc" );
  356. break;
  357. case 1:
  358. asm volatile ("xi 0(%1),0x02" : "=m" (*(char *) addr)
  359. : "a" (addr), "m" (*(char *) addr) : "cc" );
  360. break;
  361. case 2:
  362. asm volatile ("xi 0(%1),0x04" : "=m" (*(char *) addr)
  363. : "a" (addr), "m" (*(char *) addr) : "cc" );
  364. break;
  365. case 3:
  366. asm volatile ("xi 0(%1),0x08" : "=m" (*(char *) addr)
  367. : "a" (addr), "m" (*(char *) addr) : "cc" );
  368. break;
  369. case 4:
  370. asm volatile ("xi 0(%1),0x10" : "=m" (*(char *) addr)
  371. : "a" (addr), "m" (*(char *) addr) : "cc" );
  372. break;
  373. case 5:
  374. asm volatile ("xi 0(%1),0x20" : "=m" (*(char *) addr)
  375. : "a" (addr), "m" (*(char *) addr) : "cc" );
  376. break;
  377. case 6:
  378. asm volatile ("xi 0(%1),0x40" : "=m" (*(char *) addr)
  379. : "a" (addr), "m" (*(char *) addr) : "cc" );
  380. break;
  381. case 7:
  382. asm volatile ("xi 0(%1),0x80" : "=m" (*(char *) addr)
  383. : "a" (addr), "m" (*(char *) addr) : "cc" );
  384. break;
  385. }
  386. }
  387. #define change_bit_simple(nr,addr) \
  388. (__builtin_constant_p((nr)) ? \
  389. __constant_change_bit((nr),(addr)) : \
  390. __change_bit((nr),(addr)) )
  391. /*
  392. * fast, non-SMP test_and_set_bit routine
  393. */
  394. static inline int
  395. test_and_set_bit_simple(unsigned long nr, volatile unsigned long *ptr)
  396. {
  397. unsigned long addr;
  398. unsigned char ch;
  399. addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
  400. ch = *(unsigned char *) addr;
  401. asm volatile("oc 0(1,%1),0(%2)"
  402. : "=m" (*(char *) addr)
  403. : "a" (addr), "a" (_oi_bitmap + (nr & 7)),
  404. "m" (*(char *) addr) : "cc", "memory" );
  405. return (ch >> (nr & 7)) & 1;
  406. }
  407. #define __test_and_set_bit(X,Y) test_and_set_bit_simple(X,Y)
  408. /*
  409. * fast, non-SMP test_and_clear_bit routine
  410. */
  411. static inline int
  412. test_and_clear_bit_simple(unsigned long nr, volatile unsigned long *ptr)
  413. {
  414. unsigned long addr;
  415. unsigned char ch;
  416. addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
  417. ch = *(unsigned char *) addr;
  418. asm volatile("nc 0(1,%1),0(%2)"
  419. : "=m" (*(char *) addr)
  420. : "a" (addr), "a" (_ni_bitmap + (nr & 7)),
  421. "m" (*(char *) addr) : "cc", "memory" );
  422. return (ch >> (nr & 7)) & 1;
  423. }
  424. #define __test_and_clear_bit(X,Y) test_and_clear_bit_simple(X,Y)
  425. /*
  426. * fast, non-SMP test_and_change_bit routine
  427. */
  428. static inline int
  429. test_and_change_bit_simple(unsigned long nr, volatile unsigned long *ptr)
  430. {
  431. unsigned long addr;
  432. unsigned char ch;
  433. addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
  434. ch = *(unsigned char *) addr;
  435. asm volatile("xc 0(1,%1),0(%2)"
  436. : "=m" (*(char *) addr)
  437. : "a" (addr), "a" (_oi_bitmap + (nr & 7)),
  438. "m" (*(char *) addr) : "cc", "memory" );
  439. return (ch >> (nr & 7)) & 1;
  440. }
  441. #define __test_and_change_bit(X,Y) test_and_change_bit_simple(X,Y)
  442. #ifdef CONFIG_SMP
  443. #define set_bit set_bit_cs
  444. #define clear_bit clear_bit_cs
  445. #define change_bit change_bit_cs
  446. #define test_and_set_bit test_and_set_bit_cs
  447. #define test_and_clear_bit test_and_clear_bit_cs
  448. #define test_and_change_bit test_and_change_bit_cs
  449. #else
  450. #define set_bit set_bit_simple
  451. #define clear_bit clear_bit_simple
  452. #define change_bit change_bit_simple
  453. #define test_and_set_bit test_and_set_bit_simple
  454. #define test_and_clear_bit test_and_clear_bit_simple
  455. #define test_and_change_bit test_and_change_bit_simple
  456. #endif
  457. /*
  458. * This routine doesn't need to be atomic.
  459. */
  460. static inline int __test_bit(unsigned long nr, const volatile unsigned long *ptr)
  461. {
  462. unsigned long addr;
  463. unsigned char ch;
  464. addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
  465. ch = *(volatile unsigned char *) addr;
  466. return (ch >> (nr & 7)) & 1;
  467. }
  468. static inline int
  469. __constant_test_bit(unsigned long nr, const volatile unsigned long *addr) {
  470. return (((volatile char *) addr)
  471. [(nr^(__BITOPS_WORDSIZE-8))>>3] & (1<<(nr&7))) != 0;
  472. }
  473. #define test_bit(nr,addr) \
  474. (__builtin_constant_p((nr)) ? \
  475. __constant_test_bit((nr),(addr)) : \
  476. __test_bit((nr),(addr)) )
  477. /*
  478. * ffz = Find First Zero in word. Undefined if no zero exists,
  479. * so code should check against ~0UL first..
  480. */
  481. static inline unsigned long ffz(unsigned long word)
  482. {
  483. unsigned long bit = 0;
  484. #ifdef __s390x__
  485. if (likely((word & 0xffffffff) == 0xffffffff)) {
  486. word >>= 32;
  487. bit += 32;
  488. }
  489. #endif
  490. if (likely((word & 0xffff) == 0xffff)) {
  491. word >>= 16;
  492. bit += 16;
  493. }
  494. if (likely((word & 0xff) == 0xff)) {
  495. word >>= 8;
  496. bit += 8;
  497. }
  498. return bit + _zb_findmap[word & 0xff];
  499. }
  500. /*
  501. * __ffs = find first bit in word. Undefined if no bit exists,
  502. * so code should check against 0UL first..
  503. */
  504. static inline unsigned long __ffs (unsigned long word)
  505. {
  506. unsigned long bit = 0;
  507. #ifdef __s390x__
  508. if (likely((word & 0xffffffff) == 0)) {
  509. word >>= 32;
  510. bit += 32;
  511. }
  512. #endif
  513. if (likely((word & 0xffff) == 0)) {
  514. word >>= 16;
  515. bit += 16;
  516. }
  517. if (likely((word & 0xff) == 0)) {
  518. word >>= 8;
  519. bit += 8;
  520. }
  521. return bit + _sb_findmap[word & 0xff];
  522. }
  523. /*
  524. * Find-bit routines..
  525. */
  526. #ifndef __s390x__
  527. static inline int
  528. find_first_zero_bit(const unsigned long * addr, unsigned long size)
  529. {
  530. typedef struct { long _[__BITOPS_WORDS(size)]; } addrtype;
  531. unsigned long cmp, count;
  532. unsigned int res;
  533. if (!size)
  534. return 0;
  535. __asm__(" lhi %1,-1\n"
  536. " lr %2,%3\n"
  537. " slr %0,%0\n"
  538. " ahi %2,31\n"
  539. " srl %2,5\n"
  540. "0: c %1,0(%0,%4)\n"
  541. " jne 1f\n"
  542. " la %0,4(%0)\n"
  543. " brct %2,0b\n"
  544. " lr %0,%3\n"
  545. " j 4f\n"
  546. "1: l %2,0(%0,%4)\n"
  547. " sll %0,3\n"
  548. " lhi %1,0xff\n"
  549. " tml %2,0xffff\n"
  550. " jno 2f\n"
  551. " ahi %0,16\n"
  552. " srl %2,16\n"
  553. "2: tml %2,0x00ff\n"
  554. " jno 3f\n"
  555. " ahi %0,8\n"
  556. " srl %2,8\n"
  557. "3: nr %2,%1\n"
  558. " ic %2,0(%2,%5)\n"
  559. " alr %0,%2\n"
  560. "4:"
  561. : "=&a" (res), "=&d" (cmp), "=&a" (count)
  562. : "a" (size), "a" (addr), "a" (&_zb_findmap),
  563. "m" (*(addrtype *) addr) : "cc" );
  564. return (res < size) ? res : size;
  565. }
  566. static inline int
  567. find_first_bit(const unsigned long * addr, unsigned long size)
  568. {
  569. typedef struct { long _[__BITOPS_WORDS(size)]; } addrtype;
  570. unsigned long cmp, count;
  571. unsigned int res;
  572. if (!size)
  573. return 0;
  574. __asm__(" slr %1,%1\n"
  575. " lr %2,%3\n"
  576. " slr %0,%0\n"
  577. " ahi %2,31\n"
  578. " srl %2,5\n"
  579. "0: c %1,0(%0,%4)\n"
  580. " jne 1f\n"
  581. " la %0,4(%0)\n"
  582. " brct %2,0b\n"
  583. " lr %0,%3\n"
  584. " j 4f\n"
  585. "1: l %2,0(%0,%4)\n"
  586. " sll %0,3\n"
  587. " lhi %1,0xff\n"
  588. " tml %2,0xffff\n"
  589. " jnz 2f\n"
  590. " ahi %0,16\n"
  591. " srl %2,16\n"
  592. "2: tml %2,0x00ff\n"
  593. " jnz 3f\n"
  594. " ahi %0,8\n"
  595. " srl %2,8\n"
  596. "3: nr %2,%1\n"
  597. " ic %2,0(%2,%5)\n"
  598. " alr %0,%2\n"
  599. "4:"
  600. : "=&a" (res), "=&d" (cmp), "=&a" (count)
  601. : "a" (size), "a" (addr), "a" (&_sb_findmap),
  602. "m" (*(addrtype *) addr) : "cc" );
  603. return (res < size) ? res : size;
  604. }
  605. #else /* __s390x__ */
  606. static inline unsigned long
  607. find_first_zero_bit(const unsigned long * addr, unsigned long size)
  608. {
  609. typedef struct { long _[__BITOPS_WORDS(size)]; } addrtype;
  610. unsigned long res, cmp, count;
  611. if (!size)
  612. return 0;
  613. __asm__(" lghi %1,-1\n"
  614. " lgr %2,%3\n"
  615. " slgr %0,%0\n"
  616. " aghi %2,63\n"
  617. " srlg %2,%2,6\n"
  618. "0: cg %1,0(%0,%4)\n"
  619. " jne 1f\n"
  620. " la %0,8(%0)\n"
  621. " brct %2,0b\n"
  622. " lgr %0,%3\n"
  623. " j 5f\n"
  624. "1: lg %2,0(%0,%4)\n"
  625. " sllg %0,%0,3\n"
  626. " clr %2,%1\n"
  627. " jne 2f\n"
  628. " aghi %0,32\n"
  629. " srlg %2,%2,32\n"
  630. "2: lghi %1,0xff\n"
  631. " tmll %2,0xffff\n"
  632. " jno 3f\n"
  633. " aghi %0,16\n"
  634. " srl %2,16\n"
  635. "3: tmll %2,0x00ff\n"
  636. " jno 4f\n"
  637. " aghi %0,8\n"
  638. " srl %2,8\n"
  639. "4: ngr %2,%1\n"
  640. " ic %2,0(%2,%5)\n"
  641. " algr %0,%2\n"
  642. "5:"
  643. : "=&a" (res), "=&d" (cmp), "=&a" (count)
  644. : "a" (size), "a" (addr), "a" (&_zb_findmap),
  645. "m" (*(addrtype *) addr) : "cc" );
  646. return (res < size) ? res : size;
  647. }
  648. static inline unsigned long
  649. find_first_bit(const unsigned long * addr, unsigned long size)
  650. {
  651. typedef struct { long _[__BITOPS_WORDS(size)]; } addrtype;
  652. unsigned long res, cmp, count;
  653. if (!size)
  654. return 0;
  655. __asm__(" slgr %1,%1\n"
  656. " lgr %2,%3\n"
  657. " slgr %0,%0\n"
  658. " aghi %2,63\n"
  659. " srlg %2,%2,6\n"
  660. "0: cg %1,0(%0,%4)\n"
  661. " jne 1f\n"
  662. " aghi %0,8\n"
  663. " brct %2,0b\n"
  664. " lgr %0,%3\n"
  665. " j 5f\n"
  666. "1: lg %2,0(%0,%4)\n"
  667. " sllg %0,%0,3\n"
  668. " clr %2,%1\n"
  669. " jne 2f\n"
  670. " aghi %0,32\n"
  671. " srlg %2,%2,32\n"
  672. "2: lghi %1,0xff\n"
  673. " tmll %2,0xffff\n"
  674. " jnz 3f\n"
  675. " aghi %0,16\n"
  676. " srl %2,16\n"
  677. "3: tmll %2,0x00ff\n"
  678. " jnz 4f\n"
  679. " aghi %0,8\n"
  680. " srl %2,8\n"
  681. "4: ngr %2,%1\n"
  682. " ic %2,0(%2,%5)\n"
  683. " algr %0,%2\n"
  684. "5:"
  685. : "=&a" (res), "=&d" (cmp), "=&a" (count)
  686. : "a" (size), "a" (addr), "a" (&_sb_findmap),
  687. "m" (*(addrtype *) addr) : "cc" );
  688. return (res < size) ? res : size;
  689. }
  690. #endif /* __s390x__ */
  691. static inline int
  692. find_next_zero_bit (const unsigned long * addr, unsigned long size,
  693. unsigned long offset)
  694. {
  695. const unsigned long *p;
  696. unsigned long bit, set;
  697. if (offset >= size)
  698. return size;
  699. bit = offset & (__BITOPS_WORDSIZE - 1);
  700. offset -= bit;
  701. size -= offset;
  702. p = addr + offset / __BITOPS_WORDSIZE;
  703. if (bit) {
  704. /*
  705. * s390 version of ffz returns __BITOPS_WORDSIZE
  706. * if no zero bit is present in the word.
  707. */
  708. set = ffz(*p >> bit) + bit;
  709. if (set >= size)
  710. return size + offset;
  711. if (set < __BITOPS_WORDSIZE)
  712. return set + offset;
  713. offset += __BITOPS_WORDSIZE;
  714. size -= __BITOPS_WORDSIZE;
  715. p++;
  716. }
  717. return offset + find_first_zero_bit(p, size);
  718. }
  719. static inline int
  720. find_next_bit (const unsigned long * addr, unsigned long size,
  721. unsigned long offset)
  722. {
  723. const unsigned long *p;
  724. unsigned long bit, set;
  725. if (offset >= size)
  726. return size;
  727. bit = offset & (__BITOPS_WORDSIZE - 1);
  728. offset -= bit;
  729. size -= offset;
  730. p = addr + offset / __BITOPS_WORDSIZE;
  731. if (bit) {
  732. /*
  733. * s390 version of __ffs returns __BITOPS_WORDSIZE
  734. * if no one bit is present in the word.
  735. */
  736. set = __ffs(*p & (~0UL << bit));
  737. if (set >= size)
  738. return size + offset;
  739. if (set < __BITOPS_WORDSIZE)
  740. return set + offset;
  741. offset += __BITOPS_WORDSIZE;
  742. size -= __BITOPS_WORDSIZE;
  743. p++;
  744. }
  745. return offset + find_first_bit(p, size);
  746. }
  747. /*
  748. * Every architecture must define this function. It's the fastest
  749. * way of searching a 140-bit bitmap where the first 100 bits are
  750. * unlikely to be set. It's guaranteed that at least one of the 140
  751. * bits is cleared.
  752. */
  753. static inline int sched_find_first_bit(unsigned long *b)
  754. {
  755. return find_first_bit(b, 140);
  756. }
  757. #include <asm-generic/bitops/ffs.h>
  758. #include <asm-generic/bitops/fls.h>
  759. #include <asm-generic/bitops/fls64.h>
  760. #include <asm-generic/bitops/hweight.h>
  761. #ifdef __KERNEL__
  762. /*
  763. * ATTENTION: intel byte ordering convention for ext2 and minix !!
  764. * bit 0 is the LSB of addr; bit 31 is the MSB of addr;
  765. * bit 32 is the LSB of (addr+4).
  766. * That combined with the little endian byte order of Intel gives the
  767. * following bit order in memory:
  768. * 07 06 05 04 03 02 01 00 15 14 13 12 11 10 09 08 \
  769. * 23 22 21 20 19 18 17 16 31 30 29 28 27 26 25 24
  770. */
  771. #define ext2_set_bit(nr, addr) \
  772. __test_and_set_bit((nr)^(__BITOPS_WORDSIZE - 8), (unsigned long *)addr)
  773. #define ext2_set_bit_atomic(lock, nr, addr) \
  774. test_and_set_bit((nr)^(__BITOPS_WORDSIZE - 8), (unsigned long *)addr)
  775. #define ext2_clear_bit(nr, addr) \
  776. __test_and_clear_bit((nr)^(__BITOPS_WORDSIZE - 8), (unsigned long *)addr)
  777. #define ext2_clear_bit_atomic(lock, nr, addr) \
  778. test_and_clear_bit((nr)^(__BITOPS_WORDSIZE - 8), (unsigned long *)addr)
  779. #define ext2_test_bit(nr, addr) \
  780. test_bit((nr)^(__BITOPS_WORDSIZE - 8), (unsigned long *)addr)
  781. #ifndef __s390x__
  782. static inline int
  783. ext2_find_first_zero_bit(void *vaddr, unsigned int size)
  784. {
  785. typedef struct { long _[__BITOPS_WORDS(size)]; } addrtype;
  786. unsigned long cmp, count;
  787. unsigned int res;
  788. if (!size)
  789. return 0;
  790. __asm__(" lhi %1,-1\n"
  791. " lr %2,%3\n"
  792. " ahi %2,31\n"
  793. " srl %2,5\n"
  794. " slr %0,%0\n"
  795. "0: cl %1,0(%0,%4)\n"
  796. " jne 1f\n"
  797. " ahi %0,4\n"
  798. " brct %2,0b\n"
  799. " lr %0,%3\n"
  800. " j 4f\n"
  801. "1: l %2,0(%0,%4)\n"
  802. " sll %0,3\n"
  803. " ahi %0,24\n"
  804. " lhi %1,0xff\n"
  805. " tmh %2,0xffff\n"
  806. " jo 2f\n"
  807. " ahi %0,-16\n"
  808. " srl %2,16\n"
  809. "2: tml %2,0xff00\n"
  810. " jo 3f\n"
  811. " ahi %0,-8\n"
  812. " srl %2,8\n"
  813. "3: nr %2,%1\n"
  814. " ic %2,0(%2,%5)\n"
  815. " alr %0,%2\n"
  816. "4:"
  817. : "=&a" (res), "=&d" (cmp), "=&a" (count)
  818. : "a" (size), "a" (vaddr), "a" (&_zb_findmap),
  819. "m" (*(addrtype *) vaddr) : "cc" );
  820. return (res < size) ? res : size;
  821. }
  822. #else /* __s390x__ */
  823. static inline unsigned long
  824. ext2_find_first_zero_bit(void *vaddr, unsigned long size)
  825. {
  826. typedef struct { long _[__BITOPS_WORDS(size)]; } addrtype;
  827. unsigned long res, cmp, count;
  828. if (!size)
  829. return 0;
  830. __asm__(" lghi %1,-1\n"
  831. " lgr %2,%3\n"
  832. " aghi %2,63\n"
  833. " srlg %2,%2,6\n"
  834. " slgr %0,%0\n"
  835. "0: clg %1,0(%0,%4)\n"
  836. " jne 1f\n"
  837. " aghi %0,8\n"
  838. " brct %2,0b\n"
  839. " lgr %0,%3\n"
  840. " j 5f\n"
  841. "1: cl %1,0(%0,%4)\n"
  842. " jne 2f\n"
  843. " aghi %0,4\n"
  844. "2: l %2,0(%0,%4)\n"
  845. " sllg %0,%0,3\n"
  846. " aghi %0,24\n"
  847. " lghi %1,0xff\n"
  848. " tmlh %2,0xffff\n"
  849. " jo 3f\n"
  850. " aghi %0,-16\n"
  851. " srl %2,16\n"
  852. "3: tmll %2,0xff00\n"
  853. " jo 4f\n"
  854. " aghi %0,-8\n"
  855. " srl %2,8\n"
  856. "4: ngr %2,%1\n"
  857. " ic %2,0(%2,%5)\n"
  858. " algr %0,%2\n"
  859. "5:"
  860. : "=&a" (res), "=&d" (cmp), "=&a" (count)
  861. : "a" (size), "a" (vaddr), "a" (&_zb_findmap),
  862. "m" (*(addrtype *) vaddr) : "cc" );
  863. return (res < size) ? res : size;
  864. }
  865. #endif /* __s390x__ */
  866. static inline int
  867. ext2_find_next_zero_bit(void *vaddr, unsigned long size, unsigned long offset)
  868. {
  869. unsigned long *addr = vaddr, *p;
  870. unsigned long word, bit, set;
  871. if (offset >= size)
  872. return size;
  873. bit = offset & (__BITOPS_WORDSIZE - 1);
  874. offset -= bit;
  875. size -= offset;
  876. p = addr + offset / __BITOPS_WORDSIZE;
  877. if (bit) {
  878. #ifndef __s390x__
  879. asm(" ic %0,0(%1)\n"
  880. " icm %0,2,1(%1)\n"
  881. " icm %0,4,2(%1)\n"
  882. " icm %0,8,3(%1)"
  883. : "=&a" (word) : "a" (p), "m" (*p) : "cc" );
  884. #else
  885. asm(" lrvg %0,%1" : "=a" (word) : "m" (*p) );
  886. #endif
  887. /*
  888. * s390 version of ffz returns __BITOPS_WORDSIZE
  889. * if no zero bit is present in the word.
  890. */
  891. set = ffz(word >> bit) + bit;
  892. if (set >= size)
  893. return size + offset;
  894. if (set < __BITOPS_WORDSIZE)
  895. return set + offset;
  896. offset += __BITOPS_WORDSIZE;
  897. size -= __BITOPS_WORDSIZE;
  898. p++;
  899. }
  900. return offset + ext2_find_first_zero_bit(p, size);
  901. }
  902. #include <asm-generic/bitops/minix.h>
  903. #endif /* __KERNEL__ */
  904. #endif /* _S390_BITOPS_H */