bitops.h 7.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301
  1. #ifndef _M68KNOMMU_BITOPS_H
  2. #define _M68KNOMMU_BITOPS_H
  3. /*
  4. * Copyright 1992, Linus Torvalds.
  5. */
  6. #include <linux/config.h>
  7. #include <linux/compiler.h>
  8. #include <asm/byteorder.h> /* swab32 */
  9. #include <asm/system.h> /* save_flags */
  10. #ifdef __KERNEL__
  11. #include <asm-generic/bitops/ffs.h>
  12. #include <asm-generic/bitops/__ffs.h>
  13. #include <asm-generic/bitops/sched.h>
  14. #include <asm-generic/bitops/ffz.h>
  15. static __inline__ void set_bit(int nr, volatile unsigned long * addr)
  16. {
  17. #ifdef CONFIG_COLDFIRE
  18. __asm__ __volatile__ ("lea %0,%%a0; bset %1,(%%a0)"
  19. : "+m" (((volatile char *)addr)[(nr^31) >> 3])
  20. : "d" (nr)
  21. : "%a0", "cc");
  22. #else
  23. __asm__ __volatile__ ("bset %1,%0"
  24. : "+m" (((volatile char *)addr)[(nr^31) >> 3])
  25. : "di" (nr)
  26. : "cc");
  27. #endif
  28. }
  29. #define __set_bit(nr, addr) set_bit(nr, addr)
  30. /*
  31. * clear_bit() doesn't provide any barrier for the compiler.
  32. */
  33. #define smp_mb__before_clear_bit() barrier()
  34. #define smp_mb__after_clear_bit() barrier()
  35. static __inline__ void clear_bit(int nr, volatile unsigned long * addr)
  36. {
  37. #ifdef CONFIG_COLDFIRE
  38. __asm__ __volatile__ ("lea %0,%%a0; bclr %1,(%%a0)"
  39. : "+m" (((volatile char *)addr)[(nr^31) >> 3])
  40. : "d" (nr)
  41. : "%a0", "cc");
  42. #else
  43. __asm__ __volatile__ ("bclr %1,%0"
  44. : "+m" (((volatile char *)addr)[(nr^31) >> 3])
  45. : "di" (nr)
  46. : "cc");
  47. #endif
  48. }
  49. #define __clear_bit(nr, addr) clear_bit(nr, addr)
  50. static __inline__ void change_bit(int nr, volatile unsigned long * addr)
  51. {
  52. #ifdef CONFIG_COLDFIRE
  53. __asm__ __volatile__ ("lea %0,%%a0; bchg %1,(%%a0)"
  54. : "+m" (((volatile char *)addr)[(nr^31) >> 3])
  55. : "d" (nr)
  56. : "%a0", "cc");
  57. #else
  58. __asm__ __volatile__ ("bchg %1,%0"
  59. : "+m" (((volatile char *)addr)[(nr^31) >> 3])
  60. : "di" (nr)
  61. : "cc");
  62. #endif
  63. }
  64. #define __change_bit(nr, addr) change_bit(nr, addr)
  65. static __inline__ int test_and_set_bit(int nr, volatile unsigned long * addr)
  66. {
  67. char retval;
  68. #ifdef CONFIG_COLDFIRE
  69. __asm__ __volatile__ ("lea %1,%%a0; bset %2,(%%a0); sne %0"
  70. : "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3])
  71. : "d" (nr)
  72. : "%a0");
  73. #else
  74. __asm__ __volatile__ ("bset %2,%1; sne %0"
  75. : "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3])
  76. : "di" (nr)
  77. /* No clobber */);
  78. #endif
  79. return retval;
  80. }
  81. #define __test_and_set_bit(nr, addr) test_and_set_bit(nr, addr)
  82. static __inline__ int test_and_clear_bit(int nr, volatile unsigned long * addr)
  83. {
  84. char retval;
  85. #ifdef CONFIG_COLDFIRE
  86. __asm__ __volatile__ ("lea %1,%%a0; bclr %2,(%%a0); sne %0"
  87. : "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3])
  88. : "d" (nr)
  89. : "%a0");
  90. #else
  91. __asm__ __volatile__ ("bclr %2,%1; sne %0"
  92. : "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3])
  93. : "di" (nr)
  94. /* No clobber */);
  95. #endif
  96. return retval;
  97. }
  98. #define __test_and_clear_bit(nr, addr) test_and_clear_bit(nr, addr)
  99. static __inline__ int test_and_change_bit(int nr, volatile unsigned long * addr)
  100. {
  101. char retval;
  102. #ifdef CONFIG_COLDFIRE
  103. __asm__ __volatile__ ("lea %1,%%a0\n\tbchg %2,(%%a0)\n\tsne %0"
  104. : "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3])
  105. : "d" (nr)
  106. : "%a0");
  107. #else
  108. __asm__ __volatile__ ("bchg %2,%1; sne %0"
  109. : "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3])
  110. : "di" (nr)
  111. /* No clobber */);
  112. #endif
  113. return retval;
  114. }
  115. #define __test_and_change_bit(nr, addr) test_and_change_bit(nr, addr)
  116. /*
  117. * This routine doesn't need to be atomic.
  118. */
  119. static __inline__ int __constant_test_bit(int nr, const volatile unsigned long * addr)
  120. {
  121. return ((1UL << (nr & 31)) & (((const volatile unsigned int *) addr)[nr >> 5])) != 0;
  122. }
  123. static __inline__ int __test_bit(int nr, const volatile unsigned long * addr)
  124. {
  125. int * a = (int *) addr;
  126. int mask;
  127. a += nr >> 5;
  128. mask = 1 << (nr & 0x1f);
  129. return ((mask & *a) != 0);
  130. }
  131. #define test_bit(nr,addr) \
  132. (__builtin_constant_p(nr) ? \
  133. __constant_test_bit((nr),(addr)) : \
  134. __test_bit((nr),(addr)))
  135. #include <asm-generic/bitops/find.h>
  136. #include <asm-generic/bitops/hweight.h>
  137. static __inline__ int ext2_set_bit(int nr, volatile void * addr)
  138. {
  139. char retval;
  140. #ifdef CONFIG_COLDFIRE
  141. __asm__ __volatile__ ("lea %1,%%a0; bset %2,(%%a0); sne %0"
  142. : "=d" (retval), "+m" (((volatile char *)addr)[nr >> 3])
  143. : "d" (nr)
  144. : "%a0");
  145. #else
  146. __asm__ __volatile__ ("bset %2,%1; sne %0"
  147. : "=d" (retval), "+m" (((volatile char *)addr)[nr >> 3])
  148. : "di" (nr)
  149. /* No clobber */);
  150. #endif
  151. return retval;
  152. }
  153. static __inline__ int ext2_clear_bit(int nr, volatile void * addr)
  154. {
  155. char retval;
  156. #ifdef CONFIG_COLDFIRE
  157. __asm__ __volatile__ ("lea %1,%%a0; bclr %2,(%%a0); sne %0"
  158. : "=d" (retval), "+m" (((volatile char *)addr)[nr >> 3])
  159. : "d" (nr)
  160. : "%a0");
  161. #else
  162. __asm__ __volatile__ ("bclr %2,%1; sne %0"
  163. : "=d" (retval), "+m" (((volatile char *)addr)[nr >> 3])
  164. : "di" (nr)
  165. /* No clobber */);
  166. #endif
  167. return retval;
  168. }
  169. #define ext2_set_bit_atomic(lock, nr, addr) \
  170. ({ \
  171. int ret; \
  172. spin_lock(lock); \
  173. ret = ext2_set_bit((nr), (addr)); \
  174. spin_unlock(lock); \
  175. ret; \
  176. })
  177. #define ext2_clear_bit_atomic(lock, nr, addr) \
  178. ({ \
  179. int ret; \
  180. spin_lock(lock); \
  181. ret = ext2_clear_bit((nr), (addr)); \
  182. spin_unlock(lock); \
  183. ret; \
  184. })
  185. static __inline__ int ext2_test_bit(int nr, const volatile void * addr)
  186. {
  187. char retval;
  188. #ifdef CONFIG_COLDFIRE
  189. __asm__ __volatile__ ("lea %1,%%a0; btst %2,(%%a0); sne %0"
  190. : "=d" (retval)
  191. : "m" (((const volatile char *)addr)[nr >> 3]), "d" (nr)
  192. : "%a0");
  193. #else
  194. __asm__ __volatile__ ("btst %2,%1; sne %0"
  195. : "=d" (retval)
  196. : "m" (((const volatile char *)addr)[nr >> 3]), "di" (nr)
  197. /* No clobber */);
  198. #endif
  199. return retval;
  200. }
  201. #define ext2_find_first_zero_bit(addr, size) \
  202. ext2_find_next_zero_bit((addr), (size), 0)
  203. static __inline__ unsigned long ext2_find_next_zero_bit(void *addr, unsigned long size, unsigned long offset)
  204. {
  205. unsigned long *p = ((unsigned long *) addr) + (offset >> 5);
  206. unsigned long result = offset & ~31UL;
  207. unsigned long tmp;
  208. if (offset >= size)
  209. return size;
  210. size -= result;
  211. offset &= 31UL;
  212. if(offset) {
  213. /* We hold the little endian value in tmp, but then the
  214. * shift is illegal. So we could keep a big endian value
  215. * in tmp, like this:
  216. *
  217. * tmp = __swab32(*(p++));
  218. * tmp |= ~0UL >> (32-offset);
  219. *
  220. * but this would decrease preformance, so we change the
  221. * shift:
  222. */
  223. tmp = *(p++);
  224. tmp |= __swab32(~0UL >> (32-offset));
  225. if(size < 32)
  226. goto found_first;
  227. if(~tmp)
  228. goto found_middle;
  229. size -= 32;
  230. result += 32;
  231. }
  232. while(size & ~31UL) {
  233. if(~(tmp = *(p++)))
  234. goto found_middle;
  235. result += 32;
  236. size -= 32;
  237. }
  238. if(!size)
  239. return result;
  240. tmp = *p;
  241. found_first:
  242. /* tmp is little endian, so we would have to swab the shift,
  243. * see above. But then we have to swab tmp below for ffz, so
  244. * we might as well do this here.
  245. */
  246. return result + ffz(__swab32(tmp) | (~0UL << size));
  247. found_middle:
  248. return result + ffz(__swab32(tmp));
  249. }
  250. #include <asm-generic/bitops/minix.h>
  251. #endif /* __KERNEL__ */
  252. #include <asm-generic/bitops/fls.h>
  253. #include <asm-generic/bitops/fls64.h>
  254. #endif /* _M68KNOMMU_BITOPS_H */