bitops.h 7.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304
  1. #ifndef _M68KNOMMU_BITOPS_H
  2. #define _M68KNOMMU_BITOPS_H
  3. /*
  4. * Copyright 1992, Linus Torvalds.
  5. */
  6. #include <linux/compiler.h>
  7. #include <asm/byteorder.h> /* swab32 */
  8. #ifdef __KERNEL__
  9. #ifndef _LINUX_BITOPS_H
  10. #error only <linux/bitops.h> can be included directly
  11. #endif
  12. #include <asm-generic/bitops/ffs.h>
  13. #include <asm-generic/bitops/__ffs.h>
  14. #include <asm-generic/bitops/sched.h>
  15. #include <asm-generic/bitops/ffz.h>
  16. static __inline__ void set_bit(int nr, volatile unsigned long * addr)
  17. {
  18. #ifdef CONFIG_COLDFIRE
  19. __asm__ __volatile__ ("lea %0,%%a0; bset %1,(%%a0)"
  20. : "+m" (((volatile char *)addr)[(nr^31) >> 3])
  21. : "d" (nr)
  22. : "%a0", "cc");
  23. #else
  24. __asm__ __volatile__ ("bset %1,%0"
  25. : "+m" (((volatile char *)addr)[(nr^31) >> 3])
  26. : "di" (nr)
  27. : "cc");
  28. #endif
  29. }
  30. #define __set_bit(nr, addr) set_bit(nr, addr)
  31. /*
  32. * clear_bit() doesn't provide any barrier for the compiler.
  33. */
  34. #define smp_mb__before_clear_bit() barrier()
  35. #define smp_mb__after_clear_bit() barrier()
  36. static __inline__ void clear_bit(int nr, volatile unsigned long * addr)
  37. {
  38. #ifdef CONFIG_COLDFIRE
  39. __asm__ __volatile__ ("lea %0,%%a0; bclr %1,(%%a0)"
  40. : "+m" (((volatile char *)addr)[(nr^31) >> 3])
  41. : "d" (nr)
  42. : "%a0", "cc");
  43. #else
  44. __asm__ __volatile__ ("bclr %1,%0"
  45. : "+m" (((volatile char *)addr)[(nr^31) >> 3])
  46. : "di" (nr)
  47. : "cc");
  48. #endif
  49. }
  50. #define __clear_bit(nr, addr) clear_bit(nr, addr)
  51. static __inline__ void change_bit(int nr, volatile unsigned long * addr)
  52. {
  53. #ifdef CONFIG_COLDFIRE
  54. __asm__ __volatile__ ("lea %0,%%a0; bchg %1,(%%a0)"
  55. : "+m" (((volatile char *)addr)[(nr^31) >> 3])
  56. : "d" (nr)
  57. : "%a0", "cc");
  58. #else
  59. __asm__ __volatile__ ("bchg %1,%0"
  60. : "+m" (((volatile char *)addr)[(nr^31) >> 3])
  61. : "di" (nr)
  62. : "cc");
  63. #endif
  64. }
  65. #define __change_bit(nr, addr) change_bit(nr, addr)
  66. static __inline__ int test_and_set_bit(int nr, volatile unsigned long * addr)
  67. {
  68. char retval;
  69. #ifdef CONFIG_COLDFIRE
  70. __asm__ __volatile__ ("lea %1,%%a0; bset %2,(%%a0); sne %0"
  71. : "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3])
  72. : "d" (nr)
  73. : "%a0");
  74. #else
  75. __asm__ __volatile__ ("bset %2,%1; sne %0"
  76. : "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3])
  77. : "di" (nr)
  78. /* No clobber */);
  79. #endif
  80. return retval;
  81. }
  82. #define __test_and_set_bit(nr, addr) test_and_set_bit(nr, addr)
  83. static __inline__ int test_and_clear_bit(int nr, volatile unsigned long * addr)
  84. {
  85. char retval;
  86. #ifdef CONFIG_COLDFIRE
  87. __asm__ __volatile__ ("lea %1,%%a0; bclr %2,(%%a0); sne %0"
  88. : "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3])
  89. : "d" (nr)
  90. : "%a0");
  91. #else
  92. __asm__ __volatile__ ("bclr %2,%1; sne %0"
  93. : "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3])
  94. : "di" (nr)
  95. /* No clobber */);
  96. #endif
  97. return retval;
  98. }
  99. #define __test_and_clear_bit(nr, addr) test_and_clear_bit(nr, addr)
  100. static __inline__ int test_and_change_bit(int nr, volatile unsigned long * addr)
  101. {
  102. char retval;
  103. #ifdef CONFIG_COLDFIRE
  104. __asm__ __volatile__ ("lea %1,%%a0\n\tbchg %2,(%%a0)\n\tsne %0"
  105. : "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3])
  106. : "d" (nr)
  107. : "%a0");
  108. #else
  109. __asm__ __volatile__ ("bchg %2,%1; sne %0"
  110. : "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3])
  111. : "di" (nr)
  112. /* No clobber */);
  113. #endif
  114. return retval;
  115. }
  116. #define __test_and_change_bit(nr, addr) test_and_change_bit(nr, addr)
  117. /*
  118. * This routine doesn't need to be atomic.
  119. */
  120. static __inline__ int __constant_test_bit(int nr, const volatile unsigned long * addr)
  121. {
  122. return ((1UL << (nr & 31)) & (((const volatile unsigned int *) addr)[nr >> 5])) != 0;
  123. }
  124. static __inline__ int __test_bit(int nr, const volatile unsigned long * addr)
  125. {
  126. int * a = (int *) addr;
  127. int mask;
  128. a += nr >> 5;
  129. mask = 1 << (nr & 0x1f);
  130. return ((mask & *a) != 0);
  131. }
  132. #define test_bit(nr,addr) \
  133. (__builtin_constant_p(nr) ? \
  134. __constant_test_bit((nr),(addr)) : \
  135. __test_bit((nr),(addr)))
  136. #include <asm-generic/bitops/find.h>
  137. #include <asm-generic/bitops/hweight.h>
  138. #include <asm-generic/bitops/lock.h>
  139. static __inline__ int ext2_set_bit(int nr, volatile void * addr)
  140. {
  141. char retval;
  142. #ifdef CONFIG_COLDFIRE
  143. __asm__ __volatile__ ("lea %1,%%a0; bset %2,(%%a0); sne %0"
  144. : "=d" (retval), "+m" (((volatile char *)addr)[nr >> 3])
  145. : "d" (nr)
  146. : "%a0");
  147. #else
  148. __asm__ __volatile__ ("bset %2,%1; sne %0"
  149. : "=d" (retval), "+m" (((volatile char *)addr)[nr >> 3])
  150. : "di" (nr)
  151. /* No clobber */);
  152. #endif
  153. return retval;
  154. }
  155. static __inline__ int ext2_clear_bit(int nr, volatile void * addr)
  156. {
  157. char retval;
  158. #ifdef CONFIG_COLDFIRE
  159. __asm__ __volatile__ ("lea %1,%%a0; bclr %2,(%%a0); sne %0"
  160. : "=d" (retval), "+m" (((volatile char *)addr)[nr >> 3])
  161. : "d" (nr)
  162. : "%a0");
  163. #else
  164. __asm__ __volatile__ ("bclr %2,%1; sne %0"
  165. : "=d" (retval), "+m" (((volatile char *)addr)[nr >> 3])
  166. : "di" (nr)
  167. /* No clobber */);
  168. #endif
  169. return retval;
  170. }
  171. #define ext2_set_bit_atomic(lock, nr, addr) \
  172. ({ \
  173. int ret; \
  174. spin_lock(lock); \
  175. ret = ext2_set_bit((nr), (addr)); \
  176. spin_unlock(lock); \
  177. ret; \
  178. })
  179. #define ext2_clear_bit_atomic(lock, nr, addr) \
  180. ({ \
  181. int ret; \
  182. spin_lock(lock); \
  183. ret = ext2_clear_bit((nr), (addr)); \
  184. spin_unlock(lock); \
  185. ret; \
  186. })
  187. static __inline__ int ext2_test_bit(int nr, const volatile void * addr)
  188. {
  189. char retval;
  190. #ifdef CONFIG_COLDFIRE
  191. __asm__ __volatile__ ("lea %1,%%a0; btst %2,(%%a0); sne %0"
  192. : "=d" (retval)
  193. : "m" (((const volatile char *)addr)[nr >> 3]), "d" (nr)
  194. : "%a0");
  195. #else
  196. __asm__ __volatile__ ("btst %2,%1; sne %0"
  197. : "=d" (retval)
  198. : "m" (((const volatile char *)addr)[nr >> 3]), "di" (nr)
  199. /* No clobber */);
  200. #endif
  201. return retval;
  202. }
  203. #define ext2_find_first_zero_bit(addr, size) \
  204. ext2_find_next_zero_bit((addr), (size), 0)
  205. static __inline__ unsigned long ext2_find_next_zero_bit(void *addr, unsigned long size, unsigned long offset)
  206. {
  207. unsigned long *p = ((unsigned long *) addr) + (offset >> 5);
  208. unsigned long result = offset & ~31UL;
  209. unsigned long tmp;
  210. if (offset >= size)
  211. return size;
  212. size -= result;
  213. offset &= 31UL;
  214. if(offset) {
  215. /* We hold the little endian value in tmp, but then the
  216. * shift is illegal. So we could keep a big endian value
  217. * in tmp, like this:
  218. *
  219. * tmp = __swab32(*(p++));
  220. * tmp |= ~0UL >> (32-offset);
  221. *
  222. * but this would decrease preformance, so we change the
  223. * shift:
  224. */
  225. tmp = *(p++);
  226. tmp |= __swab32(~0UL >> (32-offset));
  227. if(size < 32)
  228. goto found_first;
  229. if(~tmp)
  230. goto found_middle;
  231. size -= 32;
  232. result += 32;
  233. }
  234. while(size & ~31UL) {
  235. if(~(tmp = *(p++)))
  236. goto found_middle;
  237. result += 32;
  238. size -= 32;
  239. }
  240. if(!size)
  241. return result;
  242. tmp = *p;
  243. found_first:
  244. /* tmp is little endian, so we would have to swab the shift,
  245. * see above. But then we have to swab tmp below for ffz, so
  246. * we might as well do this here.
  247. */
  248. return result + ffz(__swab32(tmp) | (~0UL << size));
  249. found_middle:
  250. return result + ffz(__swab32(tmp));
  251. }
  252. #include <asm-generic/bitops/minix.h>
  253. #endif /* __KERNEL__ */
  254. #include <asm-generic/bitops/fls.h>
  255. #include <asm-generic/bitops/fls64.h>
  256. #endif /* _M68KNOMMU_BITOPS_H */