bitops.h 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517
  1. #ifndef __ASM_SH64_BITOPS_H
  2. #define __ASM_SH64_BITOPS_H
  3. /*
  4. * This file is subject to the terms and conditions of the GNU General Public
  5. * License. See the file "COPYING" in the main directory of this archive
  6. * for more details.
  7. *
  8. * include/asm-sh64/bitops.h
  9. *
  10. * Copyright (C) 2000, 2001 Paolo Alberelli
  11. * Copyright (C) 2003 Paul Mundt
  12. */
  13. #ifdef __KERNEL__
  14. #include <linux/compiler.h>
  15. #include <asm/system.h>
  16. /* For __swab32 */
  17. #include <asm/byteorder.h>
  18. static __inline__ void set_bit(int nr, volatile void * addr)
  19. {
  20. int mask;
  21. volatile unsigned int *a = addr;
  22. unsigned long flags;
  23. a += nr >> 5;
  24. mask = 1 << (nr & 0x1f);
  25. local_irq_save(flags);
  26. *a |= mask;
  27. local_irq_restore(flags);
  28. }
  29. static inline void __set_bit(int nr, void *addr)
  30. {
  31. int mask;
  32. unsigned int *a = addr;
  33. a += nr >> 5;
  34. mask = 1 << (nr & 0x1f);
  35. *a |= mask;
  36. }
  37. /*
  38. * clear_bit() doesn't provide any barrier for the compiler.
  39. */
  40. #define smp_mb__before_clear_bit() barrier()
  41. #define smp_mb__after_clear_bit() barrier()
  42. static inline void clear_bit(int nr, volatile unsigned long *a)
  43. {
  44. int mask;
  45. unsigned long flags;
  46. a += nr >> 5;
  47. mask = 1 << (nr & 0x1f);
  48. local_irq_save(flags);
  49. *a &= ~mask;
  50. local_irq_restore(flags);
  51. }
  52. static inline void __clear_bit(int nr, volatile unsigned long *a)
  53. {
  54. int mask;
  55. a += nr >> 5;
  56. mask = 1 << (nr & 0x1f);
  57. *a &= ~mask;
  58. }
  59. static __inline__ void change_bit(int nr, volatile void * addr)
  60. {
  61. int mask;
  62. volatile unsigned int *a = addr;
  63. unsigned long flags;
  64. a += nr >> 5;
  65. mask = 1 << (nr & 0x1f);
  66. local_irq_save(flags);
  67. *a ^= mask;
  68. local_irq_restore(flags);
  69. }
  70. static __inline__ void __change_bit(int nr, volatile void * addr)
  71. {
  72. int mask;
  73. volatile unsigned int *a = addr;
  74. a += nr >> 5;
  75. mask = 1 << (nr & 0x1f);
  76. *a ^= mask;
  77. }
  78. static __inline__ int test_and_set_bit(int nr, volatile void * addr)
  79. {
  80. int mask, retval;
  81. volatile unsigned int *a = addr;
  82. unsigned long flags;
  83. a += nr >> 5;
  84. mask = 1 << (nr & 0x1f);
  85. local_irq_save(flags);
  86. retval = (mask & *a) != 0;
  87. *a |= mask;
  88. local_irq_restore(flags);
  89. return retval;
  90. }
  91. static __inline__ int __test_and_set_bit(int nr, volatile void * addr)
  92. {
  93. int mask, retval;
  94. volatile unsigned int *a = addr;
  95. a += nr >> 5;
  96. mask = 1 << (nr & 0x1f);
  97. retval = (mask & *a) != 0;
  98. *a |= mask;
  99. return retval;
  100. }
  101. static __inline__ int test_and_clear_bit(int nr, volatile void * addr)
  102. {
  103. int mask, retval;
  104. volatile unsigned int *a = addr;
  105. unsigned long flags;
  106. a += nr >> 5;
  107. mask = 1 << (nr & 0x1f);
  108. local_irq_save(flags);
  109. retval = (mask & *a) != 0;
  110. *a &= ~mask;
  111. local_irq_restore(flags);
  112. return retval;
  113. }
  114. static __inline__ int __test_and_clear_bit(int nr, volatile void * addr)
  115. {
  116. int mask, retval;
  117. volatile unsigned int *a = addr;
  118. a += nr >> 5;
  119. mask = 1 << (nr & 0x1f);
  120. retval = (mask & *a) != 0;
  121. *a &= ~mask;
  122. return retval;
  123. }
  124. static __inline__ int test_and_change_bit(int nr, volatile void * addr)
  125. {
  126. int mask, retval;
  127. volatile unsigned int *a = addr;
  128. unsigned long flags;
  129. a += nr >> 5;
  130. mask = 1 << (nr & 0x1f);
  131. local_irq_save(flags);
  132. retval = (mask & *a) != 0;
  133. *a ^= mask;
  134. local_irq_restore(flags);
  135. return retval;
  136. }
  137. static __inline__ int __test_and_change_bit(int nr, volatile void * addr)
  138. {
  139. int mask, retval;
  140. volatile unsigned int *a = addr;
  141. a += nr >> 5;
  142. mask = 1 << (nr & 0x1f);
  143. retval = (mask & *a) != 0;
  144. *a ^= mask;
  145. return retval;
  146. }
  147. static __inline__ int test_bit(int nr, const volatile void *addr)
  148. {
  149. return 1UL & (((const volatile unsigned int *) addr)[nr >> 5] >> (nr & 31));
  150. }
  151. static __inline__ unsigned long ffz(unsigned long word)
  152. {
  153. unsigned long result, __d2, __d3;
  154. __asm__("gettr tr0, %2\n\t"
  155. "pta $+32, tr0\n\t"
  156. "andi %1, 1, %3\n\t"
  157. "beq %3, r63, tr0\n\t"
  158. "pta $+4, tr0\n"
  159. "0:\n\t"
  160. "shlri.l %1, 1, %1\n\t"
  161. "addi %0, 1, %0\n\t"
  162. "andi %1, 1, %3\n\t"
  163. "beqi %3, 1, tr0\n"
  164. "1:\n\t"
  165. "ptabs %2, tr0\n\t"
  166. : "=r" (result), "=r" (word), "=r" (__d2), "=r" (__d3)
  167. : "0" (0L), "1" (word));
  168. return result;
  169. }
  170. /**
  171. * __ffs - find first bit in word
  172. * @word: The word to search
  173. *
  174. * Undefined if no bit exists, so code should check against 0 first.
  175. */
  176. static inline unsigned long __ffs(unsigned long word)
  177. {
  178. int r = 0;
  179. if (!word)
  180. return 0;
  181. if (!(word & 0xffff)) {
  182. word >>= 16;
  183. r += 16;
  184. }
  185. if (!(word & 0xff)) {
  186. word >>= 8;
  187. r += 8;
  188. }
  189. if (!(word & 0xf)) {
  190. word >>= 4;
  191. r += 4;
  192. }
  193. if (!(word & 3)) {
  194. word >>= 2;
  195. r += 2;
  196. }
  197. if (!(word & 1)) {
  198. word >>= 1;
  199. r += 1;
  200. }
  201. return r;
  202. }
  203. /**
  204. * find_next_bit - find the next set bit in a memory region
  205. * @addr: The address to base the search on
  206. * @offset: The bitnumber to start searching at
  207. * @size: The maximum size to search
  208. */
  209. static inline unsigned long find_next_bit(const unsigned long *addr,
  210. unsigned long size, unsigned long offset)
  211. {
  212. unsigned int *p = ((unsigned int *) addr) + (offset >> 5);
  213. unsigned int result = offset & ~31UL;
  214. unsigned int tmp;
  215. if (offset >= size)
  216. return size;
  217. size -= result;
  218. offset &= 31UL;
  219. if (offset) {
  220. tmp = *p++;
  221. tmp &= ~0UL << offset;
  222. if (size < 32)
  223. goto found_first;
  224. if (tmp)
  225. goto found_middle;
  226. size -= 32;
  227. result += 32;
  228. }
  229. while (size >= 32) {
  230. if ((tmp = *p++) != 0)
  231. goto found_middle;
  232. result += 32;
  233. size -= 32;
  234. }
  235. if (!size)
  236. return result;
  237. tmp = *p;
  238. found_first:
  239. tmp &= ~0UL >> (32 - size);
  240. if (tmp == 0UL) /* Are any bits set? */
  241. return result + size; /* Nope. */
  242. found_middle:
  243. return result + __ffs(tmp);
  244. }
  245. /**
  246. * find_first_bit - find the first set bit in a memory region
  247. * @addr: The address to start the search at
  248. * @size: The maximum size to search
  249. *
  250. * Returns the bit-number of the first set bit, not the number of the byte
  251. * containing a bit.
  252. */
  253. #define find_first_bit(addr, size) \
  254. find_next_bit((addr), (size), 0)
  255. static inline int find_next_zero_bit(void *addr, int size, int offset)
  256. {
  257. unsigned long *p = ((unsigned long *) addr) + (offset >> 5);
  258. unsigned long result = offset & ~31UL;
  259. unsigned long tmp;
  260. if (offset >= size)
  261. return size;
  262. size -= result;
  263. offset &= 31UL;
  264. if (offset) {
  265. tmp = *(p++);
  266. tmp |= ~0UL >> (32-offset);
  267. if (size < 32)
  268. goto found_first;
  269. if (~tmp)
  270. goto found_middle;
  271. size -= 32;
  272. result += 32;
  273. }
  274. while (size & ~31UL) {
  275. if (~(tmp = *(p++)))
  276. goto found_middle;
  277. result += 32;
  278. size -= 32;
  279. }
  280. if (!size)
  281. return result;
  282. tmp = *p;
  283. found_first:
  284. tmp |= ~0UL << size;
  285. found_middle:
  286. return result + ffz(tmp);
  287. }
  288. #define find_first_zero_bit(addr, size) \
  289. find_next_zero_bit((addr), (size), 0)
  290. /*
  291. * hweightN: returns the hamming weight (i.e. the number
  292. * of bits set) of a N-bit word
  293. */
  294. #define hweight32(x) generic_hweight32(x)
  295. #define hweight16(x) generic_hweight16(x)
  296. #define hweight8(x) generic_hweight8(x)
  297. /*
  298. * Every architecture must define this function. It's the fastest
  299. * way of searching a 140-bit bitmap where the first 100 bits are
  300. * unlikely to be set. It's guaranteed that at least one of the 140
  301. * bits is cleared.
  302. */
  303. static inline int sched_find_first_bit(unsigned long *b)
  304. {
  305. if (unlikely(b[0]))
  306. return __ffs(b[0]);
  307. if (unlikely(b[1]))
  308. return __ffs(b[1]) + 32;
  309. if (unlikely(b[2]))
  310. return __ffs(b[2]) + 64;
  311. if (b[3])
  312. return __ffs(b[3]) + 96;
  313. return __ffs(b[4]) + 128;
  314. }
  315. /*
  316. * ffs: find first bit set. This is defined the same way as
  317. * the libc and compiler builtin ffs routines, therefore
  318. * differs in spirit from the above ffz (man ffs).
  319. */
  320. #define ffs(x) generic_ffs(x)
  321. /*
  322. * hweightN: returns the hamming weight (i.e. the number
  323. * of bits set) of a N-bit word
  324. */
  325. #define hweight32(x) generic_hweight32(x)
  326. #define hweight16(x) generic_hweight16(x)
  327. #define hweight8(x) generic_hweight8(x)
  328. #ifdef __LITTLE_ENDIAN__
  329. #define ext2_set_bit(nr, addr) test_and_set_bit((nr), (addr))
  330. #define ext2_clear_bit(nr, addr) test_and_clear_bit((nr), (addr))
  331. #define ext2_test_bit(nr, addr) test_bit((nr), (addr))
  332. #define ext2_find_first_zero_bit(addr, size) find_first_zero_bit((addr), (size))
  333. #define ext2_find_next_zero_bit(addr, size, offset) \
  334. find_next_zero_bit((addr), (size), (offset))
  335. #else
  336. static __inline__ int ext2_set_bit(int nr, volatile void * addr)
  337. {
  338. int mask, retval;
  339. unsigned long flags;
  340. volatile unsigned char *ADDR = (unsigned char *) addr;
  341. ADDR += nr >> 3;
  342. mask = 1 << (nr & 0x07);
  343. local_irq_save(flags);
  344. retval = (mask & *ADDR) != 0;
  345. *ADDR |= mask;
  346. local_irq_restore(flags);
  347. return retval;
  348. }
  349. static __inline__ int ext2_clear_bit(int nr, volatile void * addr)
  350. {
  351. int mask, retval;
  352. unsigned long flags;
  353. volatile unsigned char *ADDR = (unsigned char *) addr;
  354. ADDR += nr >> 3;
  355. mask = 1 << (nr & 0x07);
  356. local_irq_save(flags);
  357. retval = (mask & *ADDR) != 0;
  358. *ADDR &= ~mask;
  359. local_irq_restore(flags);
  360. return retval;
  361. }
  362. static __inline__ int ext2_test_bit(int nr, const volatile void * addr)
  363. {
  364. int mask;
  365. const volatile unsigned char *ADDR = (const unsigned char *) addr;
  366. ADDR += nr >> 3;
  367. mask = 1 << (nr & 0x07);
  368. return ((mask & *ADDR) != 0);
  369. }
  370. #define ext2_find_first_zero_bit(addr, size) \
  371. ext2_find_next_zero_bit((addr), (size), 0)
  372. static __inline__ unsigned long ext2_find_next_zero_bit(void *addr, unsigned long size, unsigned long offset)
  373. {
  374. unsigned long *p = ((unsigned long *) addr) + (offset >> 5);
  375. unsigned long result = offset & ~31UL;
  376. unsigned long tmp;
  377. if (offset >= size)
  378. return size;
  379. size -= result;
  380. offset &= 31UL;
  381. if(offset) {
  382. /* We hold the little endian value in tmp, but then the
  383. * shift is illegal. So we could keep a big endian value
  384. * in tmp, like this:
  385. *
  386. * tmp = __swab32(*(p++));
  387. * tmp |= ~0UL >> (32-offset);
  388. *
  389. * but this would decrease preformance, so we change the
  390. * shift:
  391. */
  392. tmp = *(p++);
  393. tmp |= __swab32(~0UL >> (32-offset));
  394. if(size < 32)
  395. goto found_first;
  396. if(~tmp)
  397. goto found_middle;
  398. size -= 32;
  399. result += 32;
  400. }
  401. while(size & ~31UL) {
  402. if(~(tmp = *(p++)))
  403. goto found_middle;
  404. result += 32;
  405. size -= 32;
  406. }
  407. if(!size)
  408. return result;
  409. tmp = *p;
  410. found_first:
  411. /* tmp is little endian, so we would have to swab the shift,
  412. * see above. But then we have to swab tmp below for ffz, so
  413. * we might as well do this here.
  414. */
  415. return result + ffz(__swab32(tmp) | (~0UL << size));
  416. found_middle:
  417. return result + ffz(__swab32(tmp));
  418. }
  419. #endif
  420. #define ext2_set_bit_atomic(lock, nr, addr) \
  421. ({ \
  422. int ret; \
  423. spin_lock(lock); \
  424. ret = ext2_set_bit((nr), (addr)); \
  425. spin_unlock(lock); \
  426. ret; \
  427. })
  428. #define ext2_clear_bit_atomic(lock, nr, addr) \
  429. ({ \
  430. int ret; \
  431. spin_lock(lock); \
  432. ret = ext2_clear_bit((nr), (addr)); \
  433. spin_unlock(lock); \
  434. ret; \
  435. })
  436. /* Bitmap functions for the minix filesystem. */
  437. #define minix_test_and_set_bit(nr,addr) test_and_set_bit(nr,addr)
  438. #define minix_set_bit(nr,addr) set_bit(nr,addr)
  439. #define minix_test_and_clear_bit(nr,addr) test_and_clear_bit(nr,addr)
  440. #define minix_test_bit(nr,addr) test_bit(nr,addr)
  441. #define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size)
  442. #define ffs(x) generic_ffs(x)
  443. #define fls(x) generic_fls(x)
  444. #define fls64(x) generic_fls64(x)
  445. #endif /* __KERNEL__ */
  446. #endif /* __ASM_SH64_BITOPS_H */