bitops.h 9.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355
  1. /*
  2. * include/asm-v850/bitops.h -- Bit operations
  3. *
  4. * Copyright (C) 2001,02,03,04,05 NEC Electronics Corporation
  5. * Copyright (C) 2001,02,03,04,05 Miles Bader <miles@gnu.org>
  6. * Copyright (C) 1992 Linus Torvalds.
  7. *
  8. * This file is subject to the terms and conditions of the GNU General
  9. * Public License. See the file COPYING in the main directory of this
  10. * archive for more details.
  11. */
  12. #ifndef __V850_BITOPS_H__
  13. #define __V850_BITOPS_H__
  14. #include <linux/config.h>
  15. #include <linux/compiler.h> /* unlikely */
  16. #include <asm/byteorder.h> /* swab32 */
  17. #include <asm/system.h> /* interrupt enable/disable */
  18. #ifdef __KERNEL__
  19. /*
  20. * The __ functions are not atomic
  21. */
  22. /*
  23. * ffz = Find First Zero in word. Undefined if no zero exists,
  24. * so code should check against ~0UL first..
  25. */
  26. extern __inline__ unsigned long ffz (unsigned long word)
  27. {
  28. unsigned long result = 0;
  29. while (word & 1) {
  30. result++;
  31. word >>= 1;
  32. }
  33. return result;
  34. }
  35. /* In the following constant-bit-op macros, a "g" constraint is used when
  36. we really need an integer ("i" constraint). This is to avoid
  37. warnings/errors from the compiler in the case where the associated
  38. operand _isn't_ an integer, and shouldn't produce bogus assembly because
  39. use of that form is protected by a guard statement that checks for
  40. constants, and should otherwise be removed by the optimizer. This
  41. _usually_ works -- however, __builtin_constant_p returns true for a
  42. variable with a known constant value too, and unfortunately gcc will
  43. happily put the variable in a register and use the register for the "g"
  44. constraint'd asm operand. To avoid the latter problem, we add a
  45. constant offset to the operand and subtract it back in the asm code;
  46. forcing gcc to do arithmetic on the value is usually enough to get it
  47. to use a real constant value. This is horrible, and ultimately
  48. unreliable too, but it seems to work for now (hopefully gcc will offer
  49. us more control in the future, so we can do a better job). */
  50. #define __const_bit_op(op, nr, addr) \
  51. ({ __asm__ (op " (%0 - 0x123), %1" \
  52. :: "g" (((nr) & 0x7) + 0x123), \
  53. "m" (*((char *)(addr) + ((nr) >> 3))) \
  54. : "memory"); })
  55. #define __var_bit_op(op, nr, addr) \
  56. ({ int __nr = (nr); \
  57. __asm__ (op " %0, [%1]" \
  58. :: "r" (__nr & 0x7), \
  59. "r" ((char *)(addr) + (__nr >> 3)) \
  60. : "memory"); })
  61. #define __bit_op(op, nr, addr) \
  62. ((__builtin_constant_p (nr) && (unsigned)(nr) <= 0x7FFFF) \
  63. ? __const_bit_op (op, nr, addr) \
  64. : __var_bit_op (op, nr, addr))
  65. #define __set_bit(nr, addr) __bit_op ("set1", nr, addr)
  66. #define __clear_bit(nr, addr) __bit_op ("clr1", nr, addr)
  67. #define __change_bit(nr, addr) __bit_op ("not1", nr, addr)
  68. /* The bit instructions used by `non-atomic' variants are actually atomic. */
  69. #define set_bit __set_bit
  70. #define clear_bit __clear_bit
  71. #define change_bit __change_bit
  72. #define __const_tns_bit_op(op, nr, addr) \
  73. ({ int __tns_res; \
  74. __asm__ __volatile__ ( \
  75. "tst1 (%1 - 0x123), %2; setf nz, %0; " op " (%1 - 0x123), %2" \
  76. : "=&r" (__tns_res) \
  77. : "g" (((nr) & 0x7) + 0x123), \
  78. "m" (*((char *)(addr) + ((nr) >> 3))) \
  79. : "memory"); \
  80. __tns_res; \
  81. })
  82. #define __var_tns_bit_op(op, nr, addr) \
  83. ({ int __nr = (nr); \
  84. int __tns_res; \
  85. __asm__ __volatile__ ( \
  86. "tst1 %1, [%2]; setf nz, %0; " op " %1, [%2]" \
  87. : "=&r" (__tns_res) \
  88. : "r" (__nr & 0x7), \
  89. "r" ((char *)(addr) + (__nr >> 3)) \
  90. : "memory"); \
  91. __tns_res; \
  92. })
  93. #define __tns_bit_op(op, nr, addr) \
  94. ((__builtin_constant_p (nr) && (unsigned)(nr) <= 0x7FFFF) \
  95. ? __const_tns_bit_op (op, nr, addr) \
  96. : __var_tns_bit_op (op, nr, addr))
  97. #define __tns_atomic_bit_op(op, nr, addr) \
  98. ({ int __tns_atomic_res, __tns_atomic_flags; \
  99. local_irq_save (__tns_atomic_flags); \
  100. __tns_atomic_res = __tns_bit_op (op, nr, addr); \
  101. local_irq_restore (__tns_atomic_flags); \
  102. __tns_atomic_res; \
  103. })
  104. #define __test_and_set_bit(nr, addr) __tns_bit_op ("set1", nr, addr)
  105. #define test_and_set_bit(nr, addr) __tns_atomic_bit_op ("set1", nr, addr)
  106. #define __test_and_clear_bit(nr, addr) __tns_bit_op ("clr1", nr, addr)
  107. #define test_and_clear_bit(nr, addr) __tns_atomic_bit_op ("clr1", nr, addr)
  108. #define __test_and_change_bit(nr, addr) __tns_bit_op ("not1", nr, addr)
  109. #define test_and_change_bit(nr, addr) __tns_atomic_bit_op ("not1", nr, addr)
  110. #define __const_test_bit(nr, addr) \
  111. ({ int __test_bit_res; \
  112. __asm__ __volatile__ ("tst1 (%1 - 0x123), %2; setf nz, %0" \
  113. : "=r" (__test_bit_res) \
  114. : "g" (((nr) & 0x7) + 0x123), \
  115. "m" (*((const char *)(addr) + ((nr) >> 3)))); \
  116. __test_bit_res; \
  117. })
  118. extern __inline__ int __test_bit (int nr, const void *addr)
  119. {
  120. int res;
  121. __asm__ __volatile__ ("tst1 %1, [%2]; setf nz, %0"
  122. : "=r" (res)
  123. : "r" (nr & 0x7), "r" (addr + (nr >> 3)));
  124. return res;
  125. }
  126. #define test_bit(nr,addr) \
  127. ((__builtin_constant_p (nr) && (unsigned)(nr) <= 0x7FFFF) \
  128. ? __const_test_bit ((nr), (addr)) \
  129. : __test_bit ((nr), (addr)))
  130. /* clear_bit doesn't provide any barrier for the compiler. */
  131. #define smp_mb__before_clear_bit() barrier ()
  132. #define smp_mb__after_clear_bit() barrier ()
  133. #define find_first_zero_bit(addr, size) \
  134. find_next_zero_bit ((addr), (size), 0)
  135. extern __inline__ int find_next_zero_bit(const void *addr, int size, int offset)
  136. {
  137. unsigned long *p = ((unsigned long *) addr) + (offset >> 5);
  138. unsigned long result = offset & ~31UL;
  139. unsigned long tmp;
  140. if (offset >= size)
  141. return size;
  142. size -= result;
  143. offset &= 31UL;
  144. if (offset) {
  145. tmp = * (p++);
  146. tmp |= ~0UL >> (32-offset);
  147. if (size < 32)
  148. goto found_first;
  149. if (~tmp)
  150. goto found_middle;
  151. size -= 32;
  152. result += 32;
  153. }
  154. while (size & ~31UL) {
  155. if (~ (tmp = * (p++)))
  156. goto found_middle;
  157. result += 32;
  158. size -= 32;
  159. }
  160. if (!size)
  161. return result;
  162. tmp = *p;
  163. found_first:
  164. tmp |= ~0UL >> size;
  165. found_middle:
  166. return result + ffz (tmp);
  167. }
  168. /* This is the same as generic_ffs, but we can't use that because it's
  169. inline and the #include order mucks things up. */
  170. static inline int generic_ffs_for_find_next_bit(int x)
  171. {
  172. int r = 1;
  173. if (!x)
  174. return 0;
  175. if (!(x & 0xffff)) {
  176. x >>= 16;
  177. r += 16;
  178. }
  179. if (!(x & 0xff)) {
  180. x >>= 8;
  181. r += 8;
  182. }
  183. if (!(x & 0xf)) {
  184. x >>= 4;
  185. r += 4;
  186. }
  187. if (!(x & 3)) {
  188. x >>= 2;
  189. r += 2;
  190. }
  191. if (!(x & 1)) {
  192. x >>= 1;
  193. r += 1;
  194. }
  195. return r;
  196. }
  197. /*
  198. * Find next one bit in a bitmap reasonably efficiently.
  199. */
  200. static __inline__ unsigned long find_next_bit(const unsigned long *addr,
  201. unsigned long size, unsigned long offset)
  202. {
  203. unsigned int *p = ((unsigned int *) addr) + (offset >> 5);
  204. unsigned int result = offset & ~31UL;
  205. unsigned int tmp;
  206. if (offset >= size)
  207. return size;
  208. size -= result;
  209. offset &= 31UL;
  210. if (offset) {
  211. tmp = *p++;
  212. tmp &= ~0UL << offset;
  213. if (size < 32)
  214. goto found_first;
  215. if (tmp)
  216. goto found_middle;
  217. size -= 32;
  218. result += 32;
  219. }
  220. while (size >= 32) {
  221. if ((tmp = *p++) != 0)
  222. goto found_middle;
  223. result += 32;
  224. size -= 32;
  225. }
  226. if (!size)
  227. return result;
  228. tmp = *p;
  229. found_first:
  230. tmp &= ~0UL >> (32 - size);
  231. if (tmp == 0UL) /* Are any bits set? */
  232. return result + size; /* Nope. */
  233. found_middle:
  234. return result + generic_ffs_for_find_next_bit(tmp);
  235. }
  236. /*
  237. * find_first_bit - find the first set bit in a memory region
  238. */
  239. #define find_first_bit(addr, size) \
  240. find_next_bit((addr), (size), 0)
  241. #define ffs(x) generic_ffs (x)
  242. #define fls(x) generic_fls (x)
  243. #define __ffs(x) ffs(x)
  244. /*
  245. * This is just `generic_ffs' from <linux/bitops.h>, except that it assumes
  246. * that at least one bit is set, and returns the real index of the bit
  247. * (rather than the bit index + 1, like ffs does).
  248. */
  249. static inline int sched_ffs(int x)
  250. {
  251. int r = 0;
  252. if (!(x & 0xffff)) {
  253. x >>= 16;
  254. r += 16;
  255. }
  256. if (!(x & 0xff)) {
  257. x >>= 8;
  258. r += 8;
  259. }
  260. if (!(x & 0xf)) {
  261. x >>= 4;
  262. r += 4;
  263. }
  264. if (!(x & 3)) {
  265. x >>= 2;
  266. r += 2;
  267. }
  268. if (!(x & 1)) {
  269. x >>= 1;
  270. r += 1;
  271. }
  272. return r;
  273. }
  274. /*
  275. * Every architecture must define this function. It's the fastest
  276. * way of searching a 140-bit bitmap where the first 100 bits are
  277. * unlikely to be set. It's guaranteed that at least one of the 140
  278. * bits is set.
  279. */
  280. static inline int sched_find_first_bit(unsigned long *b)
  281. {
  282. unsigned offs = 0;
  283. while (! *b) {
  284. b++;
  285. offs += 32;
  286. }
  287. return sched_ffs (*b) + offs;
  288. }
  289. /*
  290. * hweightN: returns the hamming weight (i.e. the number
  291. * of bits set) of a N-bit word
  292. */
  293. #define hweight32(x) generic_hweight32 (x)
  294. #define hweight16(x) generic_hweight16 (x)
  295. #define hweight8(x) generic_hweight8 (x)
  296. #define ext2_set_bit test_and_set_bit
  297. #define ext2_set_bit_atomic(l,n,a) test_and_set_bit(n,a)
  298. #define ext2_clear_bit test_and_clear_bit
  299. #define ext2_clear_bit_atomic(l,n,a) test_and_clear_bit(n,a)
  300. #define ext2_test_bit test_bit
  301. #define ext2_find_first_zero_bit find_first_zero_bit
  302. #define ext2_find_next_zero_bit find_next_zero_bit
  303. /* Bitmap functions for the minix filesystem. */
  304. #define minix_test_and_set_bit test_and_set_bit
  305. #define minix_set_bit set_bit
  306. #define minix_test_and_clear_bit test_and_clear_bit
  307. #define minix_test_bit test_bit
  308. #define minix_find_first_zero_bit find_first_zero_bit
  309. #endif /* __KERNEL__ */
  310. #endif /* __V850_BITOPS_H__ */