bitops.h 30 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028
  1. #ifndef _S390_BITOPS_H
  2. #define _S390_BITOPS_H
  3. /*
  4. * include/asm-s390/bitops.h
  5. *
  6. * S390 version
  7. * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
  8. * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
  9. *
  10. * Derived from "include/asm-i386/bitops.h"
  11. * Copyright (C) 1992, Linus Torvalds
  12. *
  13. */
  14. #include <linux/config.h>
  15. #include <linux/compiler.h>
  16. /*
  17. * 32 bit bitops format:
  18. * bit 0 is the LSB of *addr; bit 31 is the MSB of *addr;
  19. * bit 32 is the LSB of *(addr+4). That combined with the
  20. * big endian byte order on S390 give the following bit
  21. * order in memory:
  22. * 1f 1e 1d 1c 1b 1a 19 18 17 16 15 14 13 12 11 10 \
  23. * 0f 0e 0d 0c 0b 0a 09 08 07 06 05 04 03 02 01 00
  24. * after that follows the next long with bit numbers
  25. * 3f 3e 3d 3c 3b 3a 39 38 37 36 35 34 33 32 31 30
  26. * 2f 2e 2d 2c 2b 2a 29 28 27 26 25 24 23 22 21 20
  27. * The reason for this bit ordering is the fact that
  28. * in the architecture independent code bits operations
  29. * of the form "flags |= (1 << bitnr)" are used INTERMIXED
  30. * with operation of the form "set_bit(bitnr, flags)".
  31. *
  32. * 64 bit bitops format:
  33. * bit 0 is the LSB of *addr; bit 63 is the MSB of *addr;
  34. * bit 64 is the LSB of *(addr+8). That combined with the
  35. * big endian byte order on S390 give the following bit
  36. * order in memory:
  37. * 3f 3e 3d 3c 3b 3a 39 38 37 36 35 34 33 32 31 30
  38. * 2f 2e 2d 2c 2b 2a 29 28 27 26 25 24 23 22 21 20
  39. * 1f 1e 1d 1c 1b 1a 19 18 17 16 15 14 13 12 11 10
  40. * 0f 0e 0d 0c 0b 0a 09 08 07 06 05 04 03 02 01 00
  41. * after that follows the next long with bit numbers
  42. * 7f 7e 7d 7c 7b 7a 79 78 77 76 75 74 73 72 71 70
  43. * 6f 6e 6d 6c 6b 6a 69 68 67 66 65 64 63 62 61 60
  44. * 5f 5e 5d 5c 5b 5a 59 58 57 56 55 54 53 52 51 50
  45. * 4f 4e 4d 4c 4b 4a 49 48 47 46 45 44 43 42 41 40
  46. * The reason for this bit ordering is the fact that
  47. * in the architecture independent code bits operations
  48. * of the form "flags |= (1 << bitnr)" are used INTERMIXED
  49. * with operation of the form "set_bit(bitnr, flags)".
  50. */
  51. /* set ALIGN_CS to 1 if the SMP safe bit operations should
  52. * align the address to 4 byte boundary. It seems to work
  53. * without the alignment.
  54. */
  55. #ifdef __KERNEL__
  56. #define ALIGN_CS 0
  57. #else
  58. #define ALIGN_CS 1
  59. #ifndef CONFIG_SMP
  60. #error "bitops won't work without CONFIG_SMP"
  61. #endif
  62. #endif
  63. /* bitmap tables from arch/S390/kernel/bitmap.S */
  64. extern const char _oi_bitmap[];
  65. extern const char _ni_bitmap[];
  66. extern const char _zb_findmap[];
  67. extern const char _sb_findmap[];
  68. #ifndef __s390x__
  69. #define __BITOPS_ALIGN 3
  70. #define __BITOPS_WORDSIZE 32
  71. #define __BITOPS_OR "or"
  72. #define __BITOPS_AND "nr"
  73. #define __BITOPS_XOR "xr"
  74. #define __BITOPS_LOOP(__old, __new, __addr, __val, __op_string) \
  75. __asm__ __volatile__(" l %0,0(%4)\n" \
  76. "0: lr %1,%0\n" \
  77. __op_string " %1,%3\n" \
  78. " cs %0,%1,0(%4)\n" \
  79. " jl 0b" \
  80. : "=&d" (__old), "=&d" (__new), \
  81. "=m" (*(unsigned long *) __addr) \
  82. : "d" (__val), "a" (__addr), \
  83. "m" (*(unsigned long *) __addr) : "cc" );
  84. #else /* __s390x__ */
  85. #define __BITOPS_ALIGN 7
  86. #define __BITOPS_WORDSIZE 64
  87. #define __BITOPS_OR "ogr"
  88. #define __BITOPS_AND "ngr"
  89. #define __BITOPS_XOR "xgr"
  90. #define __BITOPS_LOOP(__old, __new, __addr, __val, __op_string) \
  91. __asm__ __volatile__(" lg %0,0(%4)\n" \
  92. "0: lgr %1,%0\n" \
  93. __op_string " %1,%3\n" \
  94. " csg %0,%1,0(%4)\n" \
  95. " jl 0b" \
  96. : "=&d" (__old), "=&d" (__new), \
  97. "=m" (*(unsigned long *) __addr) \
  98. : "d" (__val), "a" (__addr), \
  99. "m" (*(unsigned long *) __addr) : "cc" );
  100. #endif /* __s390x__ */
  101. #define __BITOPS_WORDS(bits) (((bits)+__BITOPS_WORDSIZE-1)/__BITOPS_WORDSIZE)
  102. #define __BITOPS_BARRIER() __asm__ __volatile__ ( "" : : : "memory" )
  103. #ifdef CONFIG_SMP
  104. /*
  105. * SMP safe set_bit routine based on compare and swap (CS)
  106. */
  107. static inline void set_bit_cs(unsigned long nr, volatile unsigned long *ptr)
  108. {
  109. unsigned long addr, old, new, mask;
  110. addr = (unsigned long) ptr;
  111. #if ALIGN_CS == 1
  112. nr += (addr & __BITOPS_ALIGN) << 3; /* add alignment to bit number */
  113. addr ^= addr & __BITOPS_ALIGN; /* align address to 8 */
  114. #endif
  115. /* calculate address for CS */
  116. addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3;
  117. /* make OR mask */
  118. mask = 1UL << (nr & (__BITOPS_WORDSIZE - 1));
  119. /* Do the atomic update. */
  120. __BITOPS_LOOP(old, new, addr, mask, __BITOPS_OR);
  121. }
  122. /*
  123. * SMP safe clear_bit routine based on compare and swap (CS)
  124. */
  125. static inline void clear_bit_cs(unsigned long nr, volatile unsigned long *ptr)
  126. {
  127. unsigned long addr, old, new, mask;
  128. addr = (unsigned long) ptr;
  129. #if ALIGN_CS == 1
  130. nr += (addr & __BITOPS_ALIGN) << 3; /* add alignment to bit number */
  131. addr ^= addr & __BITOPS_ALIGN; /* align address to 8 */
  132. #endif
  133. /* calculate address for CS */
  134. addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3;
  135. /* make AND mask */
  136. mask = ~(1UL << (nr & (__BITOPS_WORDSIZE - 1)));
  137. /* Do the atomic update. */
  138. __BITOPS_LOOP(old, new, addr, mask, __BITOPS_AND);
  139. }
  140. /*
  141. * SMP safe change_bit routine based on compare and swap (CS)
  142. */
  143. static inline void change_bit_cs(unsigned long nr, volatile unsigned long *ptr)
  144. {
  145. unsigned long addr, old, new, mask;
  146. addr = (unsigned long) ptr;
  147. #if ALIGN_CS == 1
  148. nr += (addr & __BITOPS_ALIGN) << 3; /* add alignment to bit number */
  149. addr ^= addr & __BITOPS_ALIGN; /* align address to 8 */
  150. #endif
  151. /* calculate address for CS */
  152. addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3;
  153. /* make XOR mask */
  154. mask = 1UL << (nr & (__BITOPS_WORDSIZE - 1));
  155. /* Do the atomic update. */
  156. __BITOPS_LOOP(old, new, addr, mask, __BITOPS_XOR);
  157. }
  158. /*
  159. * SMP safe test_and_set_bit routine based on compare and swap (CS)
  160. */
  161. static inline int
  162. test_and_set_bit_cs(unsigned long nr, volatile unsigned long *ptr)
  163. {
  164. unsigned long addr, old, new, mask;
  165. addr = (unsigned long) ptr;
  166. #if ALIGN_CS == 1
  167. nr += (addr & __BITOPS_ALIGN) << 3; /* add alignment to bit number */
  168. addr ^= addr & __BITOPS_ALIGN; /* align address to 8 */
  169. #endif
  170. /* calculate address for CS */
  171. addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3;
  172. /* make OR/test mask */
  173. mask = 1UL << (nr & (__BITOPS_WORDSIZE - 1));
  174. /* Do the atomic update. */
  175. __BITOPS_LOOP(old, new, addr, mask, __BITOPS_OR);
  176. __BITOPS_BARRIER();
  177. return (old & mask) != 0;
  178. }
  179. /*
  180. * SMP safe test_and_clear_bit routine based on compare and swap (CS)
  181. */
  182. static inline int
  183. test_and_clear_bit_cs(unsigned long nr, volatile unsigned long *ptr)
  184. {
  185. unsigned long addr, old, new, mask;
  186. addr = (unsigned long) ptr;
  187. #if ALIGN_CS == 1
  188. nr += (addr & __BITOPS_ALIGN) << 3; /* add alignment to bit number */
  189. addr ^= addr & __BITOPS_ALIGN; /* align address to 8 */
  190. #endif
  191. /* calculate address for CS */
  192. addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3;
  193. /* make AND/test mask */
  194. mask = ~(1UL << (nr & (__BITOPS_WORDSIZE - 1)));
  195. /* Do the atomic update. */
  196. __BITOPS_LOOP(old, new, addr, mask, __BITOPS_AND);
  197. __BITOPS_BARRIER();
  198. return (old ^ new) != 0;
  199. }
  200. /*
  201. * SMP safe test_and_change_bit routine based on compare and swap (CS)
  202. */
  203. static inline int
  204. test_and_change_bit_cs(unsigned long nr, volatile unsigned long *ptr)
  205. {
  206. unsigned long addr, old, new, mask;
  207. addr = (unsigned long) ptr;
  208. #if ALIGN_CS == 1
  209. nr += (addr & __BITOPS_ALIGN) << 3; /* add alignment to bit number */
  210. addr ^= addr & __BITOPS_ALIGN; /* align address to 8 */
  211. #endif
  212. /* calculate address for CS */
  213. addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3;
  214. /* make XOR/test mask */
  215. mask = 1UL << (nr & (__BITOPS_WORDSIZE - 1));
  216. /* Do the atomic update. */
  217. __BITOPS_LOOP(old, new, addr, mask, __BITOPS_XOR);
  218. __BITOPS_BARRIER();
  219. return (old & mask) != 0;
  220. }
  221. #endif /* CONFIG_SMP */
  222. /*
  223. * fast, non-SMP set_bit routine
  224. */
  225. static inline void __set_bit(unsigned long nr, volatile unsigned long *ptr)
  226. {
  227. unsigned long addr;
  228. addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
  229. asm volatile("oc 0(1,%1),0(%2)"
  230. : "=m" (*(char *) addr)
  231. : "a" (addr), "a" (_oi_bitmap + (nr & 7)),
  232. "m" (*(char *) addr) : "cc" );
  233. }
  234. static inline void
  235. __constant_set_bit(const unsigned long nr, volatile unsigned long *ptr)
  236. {
  237. unsigned long addr;
  238. addr = ((unsigned long) ptr) + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
  239. switch (nr&7) {
  240. case 0:
  241. asm volatile ("oi 0(%1),0x01" : "=m" (*(char *) addr)
  242. : "a" (addr), "m" (*(char *) addr) : "cc" );
  243. break;
  244. case 1:
  245. asm volatile ("oi 0(%1),0x02" : "=m" (*(char *) addr)
  246. : "a" (addr), "m" (*(char *) addr) : "cc" );
  247. break;
  248. case 2:
  249. asm volatile ("oi 0(%1),0x04" : "=m" (*(char *) addr)
  250. : "a" (addr), "m" (*(char *) addr) : "cc" );
  251. break;
  252. case 3:
  253. asm volatile ("oi 0(%1),0x08" : "=m" (*(char *) addr)
  254. : "a" (addr), "m" (*(char *) addr) : "cc" );
  255. break;
  256. case 4:
  257. asm volatile ("oi 0(%1),0x10" : "=m" (*(char *) addr)
  258. : "a" (addr), "m" (*(char *) addr) : "cc" );
  259. break;
  260. case 5:
  261. asm volatile ("oi 0(%1),0x20" : "=m" (*(char *) addr)
  262. : "a" (addr), "m" (*(char *) addr) : "cc" );
  263. break;
  264. case 6:
  265. asm volatile ("oi 0(%1),0x40" : "=m" (*(char *) addr)
  266. : "a" (addr), "m" (*(char *) addr) : "cc" );
  267. break;
  268. case 7:
  269. asm volatile ("oi 0(%1),0x80" : "=m" (*(char *) addr)
  270. : "a" (addr), "m" (*(char *) addr) : "cc" );
  271. break;
  272. }
  273. }
  274. #define set_bit_simple(nr,addr) \
  275. (__builtin_constant_p((nr)) ? \
  276. __constant_set_bit((nr),(addr)) : \
  277. __set_bit((nr),(addr)) )
  278. /*
  279. * fast, non-SMP clear_bit routine
  280. */
  281. static inline void
  282. __clear_bit(unsigned long nr, volatile unsigned long *ptr)
  283. {
  284. unsigned long addr;
  285. addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
  286. asm volatile("nc 0(1,%1),0(%2)"
  287. : "=m" (*(char *) addr)
  288. : "a" (addr), "a" (_ni_bitmap + (nr & 7)),
  289. "m" (*(char *) addr) : "cc" );
  290. }
  291. static inline void
  292. __constant_clear_bit(const unsigned long nr, volatile unsigned long *ptr)
  293. {
  294. unsigned long addr;
  295. addr = ((unsigned long) ptr) + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
  296. switch (nr&7) {
  297. case 0:
  298. asm volatile ("ni 0(%1),0xFE" : "=m" (*(char *) addr)
  299. : "a" (addr), "m" (*(char *) addr) : "cc" );
  300. break;
  301. case 1:
  302. asm volatile ("ni 0(%1),0xFD": "=m" (*(char *) addr)
  303. : "a" (addr), "m" (*(char *) addr) : "cc" );
  304. break;
  305. case 2:
  306. asm volatile ("ni 0(%1),0xFB" : "=m" (*(char *) addr)
  307. : "a" (addr), "m" (*(char *) addr) : "cc" );
  308. break;
  309. case 3:
  310. asm volatile ("ni 0(%1),0xF7" : "=m" (*(char *) addr)
  311. : "a" (addr), "m" (*(char *) addr) : "cc" );
  312. break;
  313. case 4:
  314. asm volatile ("ni 0(%1),0xEF" : "=m" (*(char *) addr)
  315. : "a" (addr), "m" (*(char *) addr) : "cc" );
  316. break;
  317. case 5:
  318. asm volatile ("ni 0(%1),0xDF" : "=m" (*(char *) addr)
  319. : "a" (addr), "m" (*(char *) addr) : "cc" );
  320. break;
  321. case 6:
  322. asm volatile ("ni 0(%1),0xBF" : "=m" (*(char *) addr)
  323. : "a" (addr), "m" (*(char *) addr) : "cc" );
  324. break;
  325. case 7:
  326. asm volatile ("ni 0(%1),0x7F" : "=m" (*(char *) addr)
  327. : "a" (addr), "m" (*(char *) addr) : "cc" );
  328. break;
  329. }
  330. }
  331. #define clear_bit_simple(nr,addr) \
  332. (__builtin_constant_p((nr)) ? \
  333. __constant_clear_bit((nr),(addr)) : \
  334. __clear_bit((nr),(addr)) )
  335. /*
  336. * fast, non-SMP change_bit routine
  337. */
  338. static inline void __change_bit(unsigned long nr, volatile unsigned long *ptr)
  339. {
  340. unsigned long addr;
  341. addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
  342. asm volatile("xc 0(1,%1),0(%2)"
  343. : "=m" (*(char *) addr)
  344. : "a" (addr), "a" (_oi_bitmap + (nr & 7)),
  345. "m" (*(char *) addr) : "cc" );
  346. }
  347. static inline void
  348. __constant_change_bit(const unsigned long nr, volatile unsigned long *ptr)
  349. {
  350. unsigned long addr;
  351. addr = ((unsigned long) ptr) + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
  352. switch (nr&7) {
  353. case 0:
  354. asm volatile ("xi 0(%1),0x01" : "=m" (*(char *) addr)
  355. : "a" (addr), "m" (*(char *) addr) : "cc" );
  356. break;
  357. case 1:
  358. asm volatile ("xi 0(%1),0x02" : "=m" (*(char *) addr)
  359. : "a" (addr), "m" (*(char *) addr) : "cc" );
  360. break;
  361. case 2:
  362. asm volatile ("xi 0(%1),0x04" : "=m" (*(char *) addr)
  363. : "a" (addr), "m" (*(char *) addr) : "cc" );
  364. break;
  365. case 3:
  366. asm volatile ("xi 0(%1),0x08" : "=m" (*(char *) addr)
  367. : "a" (addr), "m" (*(char *) addr) : "cc" );
  368. break;
  369. case 4:
  370. asm volatile ("xi 0(%1),0x10" : "=m" (*(char *) addr)
  371. : "a" (addr), "m" (*(char *) addr) : "cc" );
  372. break;
  373. case 5:
  374. asm volatile ("xi 0(%1),0x20" : "=m" (*(char *) addr)
  375. : "a" (addr), "m" (*(char *) addr) : "cc" );
  376. break;
  377. case 6:
  378. asm volatile ("xi 0(%1),0x40" : "=m" (*(char *) addr)
  379. : "a" (addr), "m" (*(char *) addr) : "cc" );
  380. break;
  381. case 7:
  382. asm volatile ("xi 0(%1),0x80" : "=m" (*(char *) addr)
  383. : "a" (addr), "m" (*(char *) addr) : "cc" );
  384. break;
  385. }
  386. }
  387. #define change_bit_simple(nr,addr) \
  388. (__builtin_constant_p((nr)) ? \
  389. __constant_change_bit((nr),(addr)) : \
  390. __change_bit((nr),(addr)) )
  391. /*
  392. * fast, non-SMP test_and_set_bit routine
  393. */
  394. static inline int
  395. test_and_set_bit_simple(unsigned long nr, volatile unsigned long *ptr)
  396. {
  397. unsigned long addr;
  398. unsigned char ch;
  399. addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
  400. ch = *(unsigned char *) addr;
  401. asm volatile("oc 0(1,%1),0(%2)"
  402. : "=m" (*(char *) addr)
  403. : "a" (addr), "a" (_oi_bitmap + (nr & 7)),
  404. "m" (*(char *) addr) : "cc", "memory" );
  405. return (ch >> (nr & 7)) & 1;
  406. }
  407. #define __test_and_set_bit(X,Y) test_and_set_bit_simple(X,Y)
  408. /*
  409. * fast, non-SMP test_and_clear_bit routine
  410. */
  411. static inline int
  412. test_and_clear_bit_simple(unsigned long nr, volatile unsigned long *ptr)
  413. {
  414. unsigned long addr;
  415. unsigned char ch;
  416. addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
  417. ch = *(unsigned char *) addr;
  418. asm volatile("nc 0(1,%1),0(%2)"
  419. : "=m" (*(char *) addr)
  420. : "a" (addr), "a" (_ni_bitmap + (nr & 7)),
  421. "m" (*(char *) addr) : "cc", "memory" );
  422. return (ch >> (nr & 7)) & 1;
  423. }
  424. #define __test_and_clear_bit(X,Y) test_and_clear_bit_simple(X,Y)
  425. /*
  426. * fast, non-SMP test_and_change_bit routine
  427. */
  428. static inline int
  429. test_and_change_bit_simple(unsigned long nr, volatile unsigned long *ptr)
  430. {
  431. unsigned long addr;
  432. unsigned char ch;
  433. addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
  434. ch = *(unsigned char *) addr;
  435. asm volatile("xc 0(1,%1),0(%2)"
  436. : "=m" (*(char *) addr)
  437. : "a" (addr), "a" (_oi_bitmap + (nr & 7)),
  438. "m" (*(char *) addr) : "cc", "memory" );
  439. return (ch >> (nr & 7)) & 1;
  440. }
  441. #define __test_and_change_bit(X,Y) test_and_change_bit_simple(X,Y)
  442. #ifdef CONFIG_SMP
  443. #define set_bit set_bit_cs
  444. #define clear_bit clear_bit_cs
  445. #define change_bit change_bit_cs
  446. #define test_and_set_bit test_and_set_bit_cs
  447. #define test_and_clear_bit test_and_clear_bit_cs
  448. #define test_and_change_bit test_and_change_bit_cs
  449. #else
  450. #define set_bit set_bit_simple
  451. #define clear_bit clear_bit_simple
  452. #define change_bit change_bit_simple
  453. #define test_and_set_bit test_and_set_bit_simple
  454. #define test_and_clear_bit test_and_clear_bit_simple
  455. #define test_and_change_bit test_and_change_bit_simple
  456. #endif
  457. /*
  458. * This routine doesn't need to be atomic.
  459. */
  460. static inline int __test_bit(unsigned long nr, const volatile unsigned long *ptr)
  461. {
  462. unsigned long addr;
  463. unsigned char ch;
  464. addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
  465. ch = *(volatile unsigned char *) addr;
  466. return (ch >> (nr & 7)) & 1;
  467. }
  468. static inline int
  469. __constant_test_bit(unsigned long nr, const volatile unsigned long *addr) {
  470. return (((volatile char *) addr)
  471. [(nr^(__BITOPS_WORDSIZE-8))>>3] & (1<<(nr&7)));
  472. }
  473. #define test_bit(nr,addr) \
  474. (__builtin_constant_p((nr)) ? \
  475. __constant_test_bit((nr),(addr)) : \
  476. __test_bit((nr),(addr)) )
  477. /*
  478. * ffz = Find First Zero in word. Undefined if no zero exists,
  479. * so code should check against ~0UL first..
  480. */
  481. static inline unsigned long ffz(unsigned long word)
  482. {
  483. unsigned long bit = 0;
  484. #ifdef __s390x__
  485. if (likely((word & 0xffffffff) == 0xffffffff)) {
  486. word >>= 32;
  487. bit += 32;
  488. }
  489. #endif
  490. if (likely((word & 0xffff) == 0xffff)) {
  491. word >>= 16;
  492. bit += 16;
  493. }
  494. if (likely((word & 0xff) == 0xff)) {
  495. word >>= 8;
  496. bit += 8;
  497. }
  498. return bit + _zb_findmap[word & 0xff];
  499. }
  500. /*
  501. * __ffs = find first bit in word. Undefined if no bit exists,
  502. * so code should check against 0UL first..
  503. */
  504. static inline unsigned long __ffs (unsigned long word)
  505. {
  506. unsigned long bit = 0;
  507. #ifdef __s390x__
  508. if (likely((word & 0xffffffff) == 0)) {
  509. word >>= 32;
  510. bit += 32;
  511. }
  512. #endif
  513. if (likely((word & 0xffff) == 0)) {
  514. word >>= 16;
  515. bit += 16;
  516. }
  517. if (likely((word & 0xff) == 0)) {
  518. word >>= 8;
  519. bit += 8;
  520. }
  521. return bit + _sb_findmap[word & 0xff];
  522. }
  523. /*
  524. * Find-bit routines..
  525. */
  526. #ifndef __s390x__
  527. static inline int
  528. find_first_zero_bit(const unsigned long * addr, unsigned long size)
  529. {
  530. typedef struct { long _[__BITOPS_WORDS(size)]; } addrtype;
  531. unsigned long cmp, count;
  532. unsigned int res;
  533. if (!size)
  534. return 0;
  535. __asm__(" lhi %1,-1\n"
  536. " lr %2,%3\n"
  537. " slr %0,%0\n"
  538. " ahi %2,31\n"
  539. " srl %2,5\n"
  540. "0: c %1,0(%0,%4)\n"
  541. " jne 1f\n"
  542. " la %0,4(%0)\n"
  543. " brct %2,0b\n"
  544. " lr %0,%3\n"
  545. " j 4f\n"
  546. "1: l %2,0(%0,%4)\n"
  547. " sll %0,3\n"
  548. " lhi %1,0xff\n"
  549. " tml %2,0xffff\n"
  550. " jno 2f\n"
  551. " ahi %0,16\n"
  552. " srl %2,16\n"
  553. "2: tml %2,0x00ff\n"
  554. " jno 3f\n"
  555. " ahi %0,8\n"
  556. " srl %2,8\n"
  557. "3: nr %2,%1\n"
  558. " ic %2,0(%2,%5)\n"
  559. " alr %0,%2\n"
  560. "4:"
  561. : "=&a" (res), "=&d" (cmp), "=&a" (count)
  562. : "a" (size), "a" (addr), "a" (&_zb_findmap),
  563. "m" (*(addrtype *) addr) : "cc" );
  564. return (res < size) ? res : size;
  565. }
  566. static inline int
  567. find_first_bit(const unsigned long * addr, unsigned long size)
  568. {
  569. typedef struct { long _[__BITOPS_WORDS(size)]; } addrtype;
  570. unsigned long cmp, count;
  571. unsigned int res;
  572. if (!size)
  573. return 0;
  574. __asm__(" slr %1,%1\n"
  575. " lr %2,%3\n"
  576. " slr %0,%0\n"
  577. " ahi %2,31\n"
  578. " srl %2,5\n"
  579. "0: c %1,0(%0,%4)\n"
  580. " jne 1f\n"
  581. " la %0,4(%0)\n"
  582. " brct %2,0b\n"
  583. " lr %0,%3\n"
  584. " j 4f\n"
  585. "1: l %2,0(%0,%4)\n"
  586. " sll %0,3\n"
  587. " lhi %1,0xff\n"
  588. " tml %2,0xffff\n"
  589. " jnz 2f\n"
  590. " ahi %0,16\n"
  591. " srl %2,16\n"
  592. "2: tml %2,0x00ff\n"
  593. " jnz 3f\n"
  594. " ahi %0,8\n"
  595. " srl %2,8\n"
  596. "3: nr %2,%1\n"
  597. " ic %2,0(%2,%5)\n"
  598. " alr %0,%2\n"
  599. "4:"
  600. : "=&a" (res), "=&d" (cmp), "=&a" (count)
  601. : "a" (size), "a" (addr), "a" (&_sb_findmap),
  602. "m" (*(addrtype *) addr) : "cc" );
  603. return (res < size) ? res : size;
  604. }
  605. #else /* __s390x__ */
  606. static inline unsigned long
  607. find_first_zero_bit(const unsigned long * addr, unsigned long size)
  608. {
  609. typedef struct { long _[__BITOPS_WORDS(size)]; } addrtype;
  610. unsigned long res, cmp, count;
  611. if (!size)
  612. return 0;
  613. __asm__(" lghi %1,-1\n"
  614. " lgr %2,%3\n"
  615. " slgr %0,%0\n"
  616. " aghi %2,63\n"
  617. " srlg %2,%2,6\n"
  618. "0: cg %1,0(%0,%4)\n"
  619. " jne 1f\n"
  620. " la %0,8(%0)\n"
  621. " brct %2,0b\n"
  622. " lgr %0,%3\n"
  623. " j 5f\n"
  624. "1: lg %2,0(%0,%4)\n"
  625. " sllg %0,%0,3\n"
  626. " clr %2,%1\n"
  627. " jne 2f\n"
  628. " aghi %0,32\n"
  629. " srlg %2,%2,32\n"
  630. "2: lghi %1,0xff\n"
  631. " tmll %2,0xffff\n"
  632. " jno 3f\n"
  633. " aghi %0,16\n"
  634. " srl %2,16\n"
  635. "3: tmll %2,0x00ff\n"
  636. " jno 4f\n"
  637. " aghi %0,8\n"
  638. " srl %2,8\n"
  639. "4: ngr %2,%1\n"
  640. " ic %2,0(%2,%5)\n"
  641. " algr %0,%2\n"
  642. "5:"
  643. : "=&a" (res), "=&d" (cmp), "=&a" (count)
  644. : "a" (size), "a" (addr), "a" (&_zb_findmap),
  645. "m" (*(addrtype *) addr) : "cc" );
  646. return (res < size) ? res : size;
  647. }
  648. static inline unsigned long
  649. find_first_bit(const unsigned long * addr, unsigned long size)
  650. {
  651. typedef struct { long _[__BITOPS_WORDS(size)]; } addrtype;
  652. unsigned long res, cmp, count;
  653. if (!size)
  654. return 0;
  655. __asm__(" slgr %1,%1\n"
  656. " lgr %2,%3\n"
  657. " slgr %0,%0\n"
  658. " aghi %2,63\n"
  659. " srlg %2,%2,6\n"
  660. "0: cg %1,0(%0,%4)\n"
  661. " jne 1f\n"
  662. " aghi %0,8\n"
  663. " brct %2,0b\n"
  664. " lgr %0,%3\n"
  665. " j 5f\n"
  666. "1: lg %2,0(%0,%4)\n"
  667. " sllg %0,%0,3\n"
  668. " clr %2,%1\n"
  669. " jne 2f\n"
  670. " aghi %0,32\n"
  671. " srlg %2,%2,32\n"
  672. "2: lghi %1,0xff\n"
  673. " tmll %2,0xffff\n"
  674. " jnz 3f\n"
  675. " aghi %0,16\n"
  676. " srl %2,16\n"
  677. "3: tmll %2,0x00ff\n"
  678. " jnz 4f\n"
  679. " aghi %0,8\n"
  680. " srl %2,8\n"
  681. "4: ngr %2,%1\n"
  682. " ic %2,0(%2,%5)\n"
  683. " algr %0,%2\n"
  684. "5:"
  685. : "=&a" (res), "=&d" (cmp), "=&a" (count)
  686. : "a" (size), "a" (addr), "a" (&_sb_findmap),
  687. "m" (*(addrtype *) addr) : "cc" );
  688. return (res < size) ? res : size;
  689. }
  690. #endif /* __s390x__ */
  691. static inline int
  692. find_next_zero_bit (const unsigned long * addr, unsigned long size,
  693. unsigned long offset)
  694. {
  695. const unsigned long *p;
  696. unsigned long bit, set;
  697. if (offset >= size)
  698. return size;
  699. bit = offset & (__BITOPS_WORDSIZE - 1);
  700. offset -= bit;
  701. size -= offset;
  702. p = addr + offset / __BITOPS_WORDSIZE;
  703. if (bit) {
  704. /*
  705. * s390 version of ffz returns __BITOPS_WORDSIZE
  706. * if no zero bit is present in the word.
  707. */
  708. set = ffz(*p >> bit) + bit;
  709. if (set >= size)
  710. return size + offset;
  711. if (set < __BITOPS_WORDSIZE)
  712. return set + offset;
  713. offset += __BITOPS_WORDSIZE;
  714. size -= __BITOPS_WORDSIZE;
  715. p++;
  716. }
  717. return offset + find_first_zero_bit(p, size);
  718. }
  719. static inline int
  720. find_next_bit (const unsigned long * addr, unsigned long size,
  721. unsigned long offset)
  722. {
  723. const unsigned long *p;
  724. unsigned long bit, set;
  725. if (offset >= size)
  726. return size;
  727. bit = offset & (__BITOPS_WORDSIZE - 1);
  728. offset -= bit;
  729. size -= offset;
  730. p = addr + offset / __BITOPS_WORDSIZE;
  731. if (bit) {
  732. /*
  733. * s390 version of __ffs returns __BITOPS_WORDSIZE
  734. * if no one bit is present in the word.
  735. */
  736. set = __ffs(*p & (~0UL << bit));
  737. if (set >= size)
  738. return size + offset;
  739. if (set < __BITOPS_WORDSIZE)
  740. return set + offset;
  741. offset += __BITOPS_WORDSIZE;
  742. size -= __BITOPS_WORDSIZE;
  743. p++;
  744. }
  745. return offset + find_first_bit(p, size);
  746. }
  747. /*
  748. * Every architecture must define this function. It's the fastest
  749. * way of searching a 140-bit bitmap where the first 100 bits are
  750. * unlikely to be set. It's guaranteed that at least one of the 140
  751. * bits is cleared.
  752. */
  753. static inline int sched_find_first_bit(unsigned long *b)
  754. {
  755. return find_first_bit(b, 140);
  756. }
  757. /*
  758. * ffs: find first bit set. This is defined the same way as
  759. * the libc and compiler builtin ffs routines, therefore
  760. * differs in spirit from the above ffz (man ffs).
  761. */
  762. #define ffs(x) generic_ffs(x)
  763. /*
  764. * fls: find last bit set.
  765. */
  766. #define fls(x) generic_fls(x)
  767. /*
  768. * hweightN: returns the hamming weight (i.e. the number
  769. * of bits set) of a N-bit word
  770. */
  771. #define hweight64(x) \
  772. ({ \
  773. unsigned long __x = (x); \
  774. unsigned int __w; \
  775. __w = generic_hweight32((unsigned int) __x); \
  776. __w += generic_hweight32((unsigned int) (__x>>32)); \
  777. __w; \
  778. })
  779. #define hweight32(x) generic_hweight32(x)
  780. #define hweight16(x) generic_hweight16(x)
  781. #define hweight8(x) generic_hweight8(x)
  782. #ifdef __KERNEL__
  783. /*
  784. * ATTENTION: intel byte ordering convention for ext2 and minix !!
  785. * bit 0 is the LSB of addr; bit 31 is the MSB of addr;
  786. * bit 32 is the LSB of (addr+4).
  787. * That combined with the little endian byte order of Intel gives the
  788. * following bit order in memory:
  789. * 07 06 05 04 03 02 01 00 15 14 13 12 11 10 09 08 \
  790. * 23 22 21 20 19 18 17 16 31 30 29 28 27 26 25 24
  791. */
  792. #define ext2_set_bit(nr, addr) \
  793. test_and_set_bit((nr)^(__BITOPS_WORDSIZE - 8), (unsigned long *)addr)
  794. #define ext2_set_bit_atomic(lock, nr, addr) \
  795. test_and_set_bit((nr)^(__BITOPS_WORDSIZE - 8), (unsigned long *)addr)
  796. #define ext2_clear_bit(nr, addr) \
  797. test_and_clear_bit((nr)^(__BITOPS_WORDSIZE - 8), (unsigned long *)addr)
  798. #define ext2_clear_bit_atomic(lock, nr, addr) \
  799. test_and_clear_bit((nr)^(__BITOPS_WORDSIZE - 8), (unsigned long *)addr)
  800. #define ext2_test_bit(nr, addr) \
  801. test_bit((nr)^(__BITOPS_WORDSIZE - 8), (unsigned long *)addr)
  802. #ifndef __s390x__
  803. static inline int
  804. ext2_find_first_zero_bit(void *vaddr, unsigned int size)
  805. {
  806. typedef struct { long _[__BITOPS_WORDS(size)]; } addrtype;
  807. unsigned long cmp, count;
  808. unsigned int res;
  809. if (!size)
  810. return 0;
  811. __asm__(" lhi %1,-1\n"
  812. " lr %2,%3\n"
  813. " ahi %2,31\n"
  814. " srl %2,5\n"
  815. " slr %0,%0\n"
  816. "0: cl %1,0(%0,%4)\n"
  817. " jne 1f\n"
  818. " ahi %0,4\n"
  819. " brct %2,0b\n"
  820. " lr %0,%3\n"
  821. " j 4f\n"
  822. "1: l %2,0(%0,%4)\n"
  823. " sll %0,3\n"
  824. " ahi %0,24\n"
  825. " lhi %1,0xff\n"
  826. " tmh %2,0xffff\n"
  827. " jo 2f\n"
  828. " ahi %0,-16\n"
  829. " srl %2,16\n"
  830. "2: tml %2,0xff00\n"
  831. " jo 3f\n"
  832. " ahi %0,-8\n"
  833. " srl %2,8\n"
  834. "3: nr %2,%1\n"
  835. " ic %2,0(%2,%5)\n"
  836. " alr %0,%2\n"
  837. "4:"
  838. : "=&a" (res), "=&d" (cmp), "=&a" (count)
  839. : "a" (size), "a" (vaddr), "a" (&_zb_findmap),
  840. "m" (*(addrtype *) vaddr) : "cc" );
  841. return (res < size) ? res : size;
  842. }
  843. #else /* __s390x__ */
  844. static inline unsigned long
  845. ext2_find_first_zero_bit(void *vaddr, unsigned long size)
  846. {
  847. typedef struct { long _[__BITOPS_WORDS(size)]; } addrtype;
  848. unsigned long res, cmp, count;
  849. if (!size)
  850. return 0;
  851. __asm__(" lghi %1,-1\n"
  852. " lgr %2,%3\n"
  853. " aghi %2,63\n"
  854. " srlg %2,%2,6\n"
  855. " slgr %0,%0\n"
  856. "0: clg %1,0(%0,%4)\n"
  857. " jne 1f\n"
  858. " aghi %0,8\n"
  859. " brct %2,0b\n"
  860. " lgr %0,%3\n"
  861. " j 5f\n"
  862. "1: cl %1,0(%0,%4)\n"
  863. " jne 2f\n"
  864. " aghi %0,4\n"
  865. "2: l %2,0(%0,%4)\n"
  866. " sllg %0,%0,3\n"
  867. " aghi %0,24\n"
  868. " lghi %1,0xff\n"
  869. " tmlh %2,0xffff\n"
  870. " jo 3f\n"
  871. " aghi %0,-16\n"
  872. " srl %2,16\n"
  873. "3: tmll %2,0xff00\n"
  874. " jo 4f\n"
  875. " aghi %0,-8\n"
  876. " srl %2,8\n"
  877. "4: ngr %2,%1\n"
  878. " ic %2,0(%2,%5)\n"
  879. " algr %0,%2\n"
  880. "5:"
  881. : "=&a" (res), "=&d" (cmp), "=&a" (count)
  882. : "a" (size), "a" (vaddr), "a" (&_zb_findmap),
  883. "m" (*(addrtype *) vaddr) : "cc" );
  884. return (res < size) ? res : size;
  885. }
  886. #endif /* __s390x__ */
  887. static inline int
  888. ext2_find_next_zero_bit(void *vaddr, unsigned long size, unsigned long offset)
  889. {
  890. unsigned long *addr = vaddr, *p;
  891. unsigned long word, bit, set;
  892. if (offset >= size)
  893. return size;
  894. bit = offset & (__BITOPS_WORDSIZE - 1);
  895. offset -= bit;
  896. size -= offset;
  897. p = addr + offset / __BITOPS_WORDSIZE;
  898. if (bit) {
  899. #ifndef __s390x__
  900. asm(" ic %0,0(%1)\n"
  901. " icm %0,2,1(%1)\n"
  902. " icm %0,4,2(%1)\n"
  903. " icm %0,8,3(%1)"
  904. : "=&a" (word) : "a" (p), "m" (*p) : "cc" );
  905. #else
  906. asm(" lrvg %0,%1" : "=a" (word) : "m" (*p) );
  907. #endif
  908. /*
  909. * s390 version of ffz returns __BITOPS_WORDSIZE
  910. * if no zero bit is present in the word.
  911. */
  912. set = ffz(word >> bit) + bit;
  913. if (set >= size)
  914. return size + offset;
  915. if (set < __BITOPS_WORDSIZE)
  916. return set + offset;
  917. offset += __BITOPS_WORDSIZE;
  918. size -= __BITOPS_WORDSIZE;
  919. p++;
  920. }
  921. return offset + ext2_find_first_zero_bit(p, size);
  922. }
  923. /* Bitmap functions for the minix filesystem. */
  924. /* FIXME !!! */
  925. #define minix_test_and_set_bit(nr,addr) \
  926. test_and_set_bit(nr,(unsigned long *)addr)
  927. #define minix_set_bit(nr,addr) \
  928. set_bit(nr,(unsigned long *)addr)
  929. #define minix_test_and_clear_bit(nr,addr) \
  930. test_and_clear_bit(nr,(unsigned long *)addr)
  931. #define minix_test_bit(nr,addr) \
  932. test_bit(nr,(unsigned long *)addr)
  933. #define minix_find_first_zero_bit(addr,size) \
  934. find_first_zero_bit(addr,size)
  935. #endif /* __KERNEL__ */
  936. #endif /* _S390_BITOPS_H */