bitops.h 7.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341
  1. /* bitops.h: bit operations for the Fujitsu FR-V CPUs
  2. *
  3. * For an explanation of how atomic ops work in this arch, see:
  4. * Documentation/fujitsu/frv/atomic-ops.txt
  5. *
  6. * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
  7. * Written by David Howells (dhowells@redhat.com)
  8. *
  9. * This program is free software; you can redistribute it and/or
  10. * modify it under the terms of the GNU General Public License
  11. * as published by the Free Software Foundation; either version
  12. * 2 of the License, or (at your option) any later version.
  13. */
  14. #ifndef _ASM_BITOPS_H
  15. #define _ASM_BITOPS_H
  16. #include <linux/config.h>
  17. #include <linux/compiler.h>
  18. #include <asm/byteorder.h>
  19. #include <asm/system.h>
  20. #include <asm/atomic.h>
  21. #ifdef __KERNEL__
  22. /*
  23. * ffz = Find First Zero in word. Undefined if no zero exists,
  24. * so code should check against ~0UL first..
  25. */
  26. static inline unsigned long ffz(unsigned long word)
  27. {
  28. unsigned long result = 0;
  29. while (word & 1) {
  30. result++;
  31. word >>= 1;
  32. }
  33. return result;
  34. }
  35. /*
  36. * clear_bit() doesn't provide any barrier for the compiler.
  37. */
  38. #define smp_mb__before_clear_bit() barrier()
  39. #define smp_mb__after_clear_bit() barrier()
  40. static inline int test_and_clear_bit(int nr, volatile void *addr)
  41. {
  42. volatile unsigned long *ptr = addr;
  43. unsigned long mask = 1UL << (nr & 31);
  44. ptr += nr >> 5;
  45. return (atomic_test_and_ANDNOT_mask(mask, ptr) & mask) != 0;
  46. }
  47. static inline int test_and_set_bit(int nr, volatile void *addr)
  48. {
  49. volatile unsigned long *ptr = addr;
  50. unsigned long mask = 1UL << (nr & 31);
  51. ptr += nr >> 5;
  52. return (atomic_test_and_OR_mask(mask, ptr) & mask) != 0;
  53. }
  54. static inline int test_and_change_bit(int nr, volatile void *addr)
  55. {
  56. volatile unsigned long *ptr = addr;
  57. unsigned long mask = 1UL << (nr & 31);
  58. ptr += nr >> 5;
  59. return (atomic_test_and_XOR_mask(mask, ptr) & mask) != 0;
  60. }
  61. static inline void clear_bit(int nr, volatile void *addr)
  62. {
  63. test_and_clear_bit(nr, addr);
  64. }
  65. static inline void set_bit(int nr, volatile void *addr)
  66. {
  67. test_and_set_bit(nr, addr);
  68. }
  69. static inline void change_bit(int nr, volatile void * addr)
  70. {
  71. test_and_change_bit(nr, addr);
  72. }
  73. static inline void __clear_bit(int nr, volatile void * addr)
  74. {
  75. volatile unsigned long *a = addr;
  76. int mask;
  77. a += nr >> 5;
  78. mask = 1 << (nr & 31);
  79. *a &= ~mask;
  80. }
  81. static inline void __set_bit(int nr, volatile void * addr)
  82. {
  83. volatile unsigned long *a = addr;
  84. int mask;
  85. a += nr >> 5;
  86. mask = 1 << (nr & 31);
  87. *a |= mask;
  88. }
  89. static inline void __change_bit(int nr, volatile void *addr)
  90. {
  91. volatile unsigned long *a = addr;
  92. int mask;
  93. a += nr >> 5;
  94. mask = 1 << (nr & 31);
  95. *a ^= mask;
  96. }
  97. static inline int __test_and_clear_bit(int nr, volatile void * addr)
  98. {
  99. volatile unsigned long *a = addr;
  100. int mask, retval;
  101. a += nr >> 5;
  102. mask = 1 << (nr & 31);
  103. retval = (mask & *a) != 0;
  104. *a &= ~mask;
  105. return retval;
  106. }
  107. static inline int __test_and_set_bit(int nr, volatile void * addr)
  108. {
  109. volatile unsigned long *a = addr;
  110. int mask, retval;
  111. a += nr >> 5;
  112. mask = 1 << (nr & 31);
  113. retval = (mask & *a) != 0;
  114. *a |= mask;
  115. return retval;
  116. }
  117. static inline int __test_and_change_bit(int nr, volatile void * addr)
  118. {
  119. volatile unsigned long *a = addr;
  120. int mask, retval;
  121. a += nr >> 5;
  122. mask = 1 << (nr & 31);
  123. retval = (mask & *a) != 0;
  124. *a ^= mask;
  125. return retval;
  126. }
  127. /*
  128. * This routine doesn't need to be atomic.
  129. */
  130. static inline int __constant_test_bit(int nr, const volatile void * addr)
  131. {
  132. return ((1UL << (nr & 31)) & (((const volatile unsigned int *) addr)[nr >> 5])) != 0;
  133. }
  134. static inline int __test_bit(int nr, const volatile void * addr)
  135. {
  136. int * a = (int *) addr;
  137. int mask;
  138. a += nr >> 5;
  139. mask = 1 << (nr & 0x1f);
  140. return ((mask & *a) != 0);
  141. }
  142. #define test_bit(nr,addr) \
  143. (__builtin_constant_p(nr) ? \
  144. __constant_test_bit((nr),(addr)) : \
  145. __test_bit((nr),(addr)))
  146. extern int find_next_bit(const unsigned long *addr, int size, int offset);
  147. #define find_first_bit(addr, size) find_next_bit(addr, size, 0)
  148. #define find_first_zero_bit(addr, size) \
  149. find_next_zero_bit((addr), (size), 0)
  150. static inline int find_next_zero_bit(const void *addr, int size, int offset)
  151. {
  152. const unsigned long *p = ((const unsigned long *) addr) + (offset >> 5);
  153. unsigned long result = offset & ~31UL;
  154. unsigned long tmp;
  155. if (offset >= size)
  156. return size;
  157. size -= result;
  158. offset &= 31UL;
  159. if (offset) {
  160. tmp = *(p++);
  161. tmp |= ~0UL >> (32-offset);
  162. if (size < 32)
  163. goto found_first;
  164. if (~tmp)
  165. goto found_middle;
  166. size -= 32;
  167. result += 32;
  168. }
  169. while (size & ~31UL) {
  170. if (~(tmp = *(p++)))
  171. goto found_middle;
  172. result += 32;
  173. size -= 32;
  174. }
  175. if (!size)
  176. return result;
  177. tmp = *p;
  178. found_first:
  179. tmp |= ~0UL >> size;
  180. found_middle:
  181. return result + ffz(tmp);
  182. }
  183. #define ffs(x) generic_ffs(x)
  184. #define __ffs(x) (ffs(x) - 1)
  185. /*
  186. * fls: find last bit set.
  187. */
  188. #define fls(x) \
  189. ({ \
  190. int bit; \
  191. \
  192. asm("scan %1,gr0,%0" : "=r"(bit) : "r"(x)); \
  193. \
  194. bit ? 33 - bit : bit; \
  195. })
  196. /*
  197. * Every architecture must define this function. It's the fastest
  198. * way of searching a 140-bit bitmap where the first 100 bits are
  199. * unlikely to be set. It's guaranteed that at least one of the 140
  200. * bits is cleared.
  201. */
  202. static inline int sched_find_first_bit(const unsigned long *b)
  203. {
  204. if (unlikely(b[0]))
  205. return __ffs(b[0]);
  206. if (unlikely(b[1]))
  207. return __ffs(b[1]) + 32;
  208. if (unlikely(b[2]))
  209. return __ffs(b[2]) + 64;
  210. if (b[3])
  211. return __ffs(b[3]) + 96;
  212. return __ffs(b[4]) + 128;
  213. }
  214. /*
  215. * hweightN: returns the hamming weight (i.e. the number
  216. * of bits set) of a N-bit word
  217. */
  218. #define hweight32(x) generic_hweight32(x)
  219. #define hweight16(x) generic_hweight16(x)
  220. #define hweight8(x) generic_hweight8(x)
  221. #define ext2_set_bit(nr, addr) test_and_set_bit ((nr) ^ 0x18, (addr))
  222. #define ext2_clear_bit(nr, addr) test_and_clear_bit((nr) ^ 0x18, (addr))
  223. #define ext2_set_bit_atomic(lock,nr,addr) ext2_set_bit((nr), addr)
  224. #define ext2_clear_bit_atomic(lock,nr,addr) ext2_clear_bit((nr), addr)
  225. static inline int ext2_test_bit(int nr, const volatile void * addr)
  226. {
  227. const volatile unsigned char *ADDR = (const unsigned char *) addr;
  228. int mask;
  229. ADDR += nr >> 3;
  230. mask = 1 << (nr & 0x07);
  231. return ((mask & *ADDR) != 0);
  232. }
  233. #define ext2_find_first_zero_bit(addr, size) \
  234. ext2_find_next_zero_bit((addr), (size), 0)
  235. static inline unsigned long ext2_find_next_zero_bit(const void *addr,
  236. unsigned long size,
  237. unsigned long offset)
  238. {
  239. const unsigned long *p = ((const unsigned long *) addr) + (offset >> 5);
  240. unsigned long result = offset & ~31UL;
  241. unsigned long tmp;
  242. if (offset >= size)
  243. return size;
  244. size -= result;
  245. offset &= 31UL;
  246. if(offset) {
  247. /* We hold the little endian value in tmp, but then the
  248. * shift is illegal. So we could keep a big endian value
  249. * in tmp, like this:
  250. *
  251. * tmp = __swab32(*(p++));
  252. * tmp |= ~0UL >> (32-offset);
  253. *
  254. * but this would decrease preformance, so we change the
  255. * shift:
  256. */
  257. tmp = *(p++);
  258. tmp |= __swab32(~0UL >> (32-offset));
  259. if(size < 32)
  260. goto found_first;
  261. if(~tmp)
  262. goto found_middle;
  263. size -= 32;
  264. result += 32;
  265. }
  266. while(size & ~31UL) {
  267. if(~(tmp = *(p++)))
  268. goto found_middle;
  269. result += 32;
  270. size -= 32;
  271. }
  272. if(!size)
  273. return result;
  274. tmp = *p;
  275. found_first:
  276. /* tmp is little endian, so we would have to swab the shift,
  277. * see above. But then we have to swab tmp below for ffz, so
  278. * we might as well do this here.
  279. */
  280. return result + ffz(__swab32(tmp) | (~0UL << size));
  281. found_middle:
  282. return result + ffz(__swab32(tmp));
  283. }
  284. /* Bitmap functions for the minix filesystem. */
  285. #define minix_test_and_set_bit(nr,addr) ext2_set_bit(nr,addr)
  286. #define minix_set_bit(nr,addr) ext2_set_bit(nr,addr)
  287. #define minix_test_and_clear_bit(nr,addr) ext2_clear_bit(nr,addr)
  288. #define minix_test_bit(nr,addr) ext2_test_bit(nr,addr)
  289. #define minix_find_first_zero_bit(addr,size) ext2_find_first_zero_bit(addr,size)
  290. #endif /* __KERNEL__ */
  291. #endif /* _ASM_BITOPS_H */