bitops.h 6.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271
  1. /* bitops.h: bit operations for the Fujitsu FR-V CPUs
  2. *
  3. * For an explanation of how atomic ops work in this arch, see:
  4. * Documentation/fujitsu/frv/atomic-ops.txt
  5. *
  6. * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
  7. * Written by David Howells (dhowells@redhat.com)
  8. *
  9. * This program is free software; you can redistribute it and/or
  10. * modify it under the terms of the GNU General Public License
  11. * as published by the Free Software Foundation; either version
  12. * 2 of the License, or (at your option) any later version.
  13. */
  14. #ifndef _ASM_BITOPS_H
  15. #define _ASM_BITOPS_H
  16. #include <linux/compiler.h>
  17. #include <asm/byteorder.h>
  18. #include <asm/system.h>
  19. #include <asm/atomic.h>
  20. #ifdef __KERNEL__
  21. #include <asm-generic/bitops/ffz.h>
  22. /*
  23. * clear_bit() doesn't provide any barrier for the compiler.
  24. */
  25. #define smp_mb__before_clear_bit() barrier()
  26. #define smp_mb__after_clear_bit() barrier()
  27. static inline int test_and_clear_bit(int nr, volatile void *addr)
  28. {
  29. volatile unsigned long *ptr = addr;
  30. unsigned long mask = 1UL << (nr & 31);
  31. ptr += nr >> 5;
  32. return (atomic_test_and_ANDNOT_mask(mask, ptr) & mask) != 0;
  33. }
  34. static inline int test_and_set_bit(int nr, volatile void *addr)
  35. {
  36. volatile unsigned long *ptr = addr;
  37. unsigned long mask = 1UL << (nr & 31);
  38. ptr += nr >> 5;
  39. return (atomic_test_and_OR_mask(mask, ptr) & mask) != 0;
  40. }
  41. static inline int test_and_change_bit(int nr, volatile void *addr)
  42. {
  43. volatile unsigned long *ptr = addr;
  44. unsigned long mask = 1UL << (nr & 31);
  45. ptr += nr >> 5;
  46. return (atomic_test_and_XOR_mask(mask, ptr) & mask) != 0;
  47. }
  48. static inline void clear_bit(int nr, volatile void *addr)
  49. {
  50. test_and_clear_bit(nr, addr);
  51. }
  52. static inline void set_bit(int nr, volatile void *addr)
  53. {
  54. test_and_set_bit(nr, addr);
  55. }
  56. static inline void change_bit(int nr, volatile void * addr)
  57. {
  58. test_and_change_bit(nr, addr);
  59. }
  60. static inline void __clear_bit(int nr, volatile void * addr)
  61. {
  62. volatile unsigned long *a = addr;
  63. int mask;
  64. a += nr >> 5;
  65. mask = 1 << (nr & 31);
  66. *a &= ~mask;
  67. }
  68. static inline void __set_bit(int nr, volatile void * addr)
  69. {
  70. volatile unsigned long *a = addr;
  71. int mask;
  72. a += nr >> 5;
  73. mask = 1 << (nr & 31);
  74. *a |= mask;
  75. }
  76. static inline void __change_bit(int nr, volatile void *addr)
  77. {
  78. volatile unsigned long *a = addr;
  79. int mask;
  80. a += nr >> 5;
  81. mask = 1 << (nr & 31);
  82. *a ^= mask;
  83. }
  84. static inline int __test_and_clear_bit(int nr, volatile void * addr)
  85. {
  86. volatile unsigned long *a = addr;
  87. int mask, retval;
  88. a += nr >> 5;
  89. mask = 1 << (nr & 31);
  90. retval = (mask & *a) != 0;
  91. *a &= ~mask;
  92. return retval;
  93. }
  94. static inline int __test_and_set_bit(int nr, volatile void * addr)
  95. {
  96. volatile unsigned long *a = addr;
  97. int mask, retval;
  98. a += nr >> 5;
  99. mask = 1 << (nr & 31);
  100. retval = (mask & *a) != 0;
  101. *a |= mask;
  102. return retval;
  103. }
  104. static inline int __test_and_change_bit(int nr, volatile void * addr)
  105. {
  106. volatile unsigned long *a = addr;
  107. int mask, retval;
  108. a += nr >> 5;
  109. mask = 1 << (nr & 31);
  110. retval = (mask & *a) != 0;
  111. *a ^= mask;
  112. return retval;
  113. }
  114. /*
  115. * This routine doesn't need to be atomic.
  116. */
  117. static inline int __constant_test_bit(int nr, const volatile void * addr)
  118. {
  119. return ((1UL << (nr & 31)) & (((const volatile unsigned int *) addr)[nr >> 5])) != 0;
  120. }
  121. static inline int __test_bit(int nr, const volatile void * addr)
  122. {
  123. int * a = (int *) addr;
  124. int mask;
  125. a += nr >> 5;
  126. mask = 1 << (nr & 0x1f);
  127. return ((mask & *a) != 0);
  128. }
  129. #define test_bit(nr,addr) \
  130. (__builtin_constant_p(nr) ? \
  131. __constant_test_bit((nr),(addr)) : \
  132. __test_bit((nr),(addr)))
  133. #include <asm-generic/bitops/find.h>
  134. /**
  135. * fls - find last bit set
  136. * @x: the word to search
  137. *
  138. * This is defined the same way as ffs:
  139. * - return 32..1 to indicate bit 31..0 most significant bit set
  140. * - return 0 to indicate no bits set
  141. */
  142. #define fls(x) \
  143. ({ \
  144. int bit; \
  145. \
  146. asm(" subcc %1,gr0,gr0,icc0 \n" \
  147. " ckne icc0,cc4 \n" \
  148. " cscan.p %1,gr0,%0 ,cc4,#1 \n" \
  149. " csub %0,%0,%0 ,cc4,#0 \n" \
  150. " csub %2,%0,%0 ,cc4,#1 \n" \
  151. : "=&r"(bit) \
  152. : "r"(x), "r"(32) \
  153. : "icc0", "cc4" \
  154. ); \
  155. \
  156. bit; \
  157. })
  158. /**
  159. * fls64 - find last bit set in a 64-bit value
  160. * @n: the value to search
  161. *
  162. * This is defined the same way as ffs:
  163. * - return 64..1 to indicate bit 63..0 most significant bit set
  164. * - return 0 to indicate no bits set
  165. */
  166. static inline __attribute__((const))
  167. int fls64(u64 n)
  168. {
  169. union {
  170. u64 ll;
  171. struct { u32 h, l; };
  172. } _;
  173. int bit, x, y;
  174. _.ll = n;
  175. asm(" subcc.p %3,gr0,gr0,icc0 \n"
  176. " subcc %4,gr0,gr0,icc1 \n"
  177. " ckne icc0,cc4 \n"
  178. " ckne icc1,cc5 \n"
  179. " norcr cc4,cc5,cc6 \n"
  180. " csub.p %0,%0,%0 ,cc6,1 \n"
  181. " orcr cc5,cc4,cc4 \n"
  182. " andcr cc4,cc5,cc4 \n"
  183. " cscan.p %3,gr0,%0 ,cc4,0 \n"
  184. " setlos #64,%1 \n"
  185. " cscan.p %4,gr0,%0 ,cc4,1 \n"
  186. " setlos #32,%2 \n"
  187. " csub.p %1,%0,%0 ,cc4,0 \n"
  188. " csub %2,%0,%0 ,cc4,1 \n"
  189. : "=&r"(bit), "=r"(x), "=r"(y)
  190. : "0r"(_.h), "r"(_.l)
  191. : "icc0", "icc1", "cc4", "cc5", "cc6"
  192. );
  193. return bit;
  194. }
  195. /**
  196. * ffs - find first bit set
  197. * @x: the word to search
  198. *
  199. * - return 32..1 to indicate bit 31..0 most least significant bit set
  200. * - return 0 to indicate no bits set
  201. */
  202. static inline __attribute__((const))
  203. int ffs(int x)
  204. {
  205. /* Note: (x & -x) gives us a mask that is the least significant
  206. * (rightmost) 1-bit of the value in x.
  207. */
  208. return fls(x & -x);
  209. }
  210. /**
  211. * __ffs - find first bit set
  212. * @x: the word to search
  213. *
  214. * - return 31..0 to indicate bit 31..0 most least significant bit set
  215. * - if no bits are set in x, the result is undefined
  216. */
  217. static inline __attribute__((const))
  218. int __ffs(unsigned long x)
  219. {
  220. int bit;
  221. asm("scan %1,gr0,%0" : "=r"(bit) : "r"(x & -x));
  222. return 31 - bit;
  223. }
  224. #include <asm-generic/bitops/sched.h>
  225. #include <asm-generic/bitops/hweight.h>
  226. #include <asm-generic/bitops/ext2-non-atomic.h>
  227. #define ext2_set_bit_atomic(lock,nr,addr) test_and_set_bit ((nr) ^ 0x18, (addr))
  228. #define ext2_clear_bit_atomic(lock,nr,addr) test_and_clear_bit((nr) ^ 0x18, (addr))
  229. #include <asm-generic/bitops/minix-le.h>
  230. #endif /* __KERNEL__ */
  231. #endif /* _ASM_BITOPS_H */