bitops.h 7.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300
  1. #ifndef _M68KNOMMU_BITOPS_H
  2. #define _M68KNOMMU_BITOPS_H
  3. /*
  4. * Copyright 1992, Linus Torvalds.
  5. */
  6. #include <linux/compiler.h>
  7. #include <asm/byteorder.h> /* swab32 */
  8. #ifdef __KERNEL__
  9. #include <asm-generic/bitops/ffs.h>
  10. #include <asm-generic/bitops/__ffs.h>
  11. #include <asm-generic/bitops/sched.h>
  12. #include <asm-generic/bitops/ffz.h>
  13. static __inline__ void set_bit(int nr, volatile unsigned long * addr)
  14. {
  15. #ifdef CONFIG_COLDFIRE
  16. __asm__ __volatile__ ("lea %0,%%a0; bset %1,(%%a0)"
  17. : "+m" (((volatile char *)addr)[(nr^31) >> 3])
  18. : "d" (nr)
  19. : "%a0", "cc");
  20. #else
  21. __asm__ __volatile__ ("bset %1,%0"
  22. : "+m" (((volatile char *)addr)[(nr^31) >> 3])
  23. : "di" (nr)
  24. : "cc");
  25. #endif
  26. }
  27. #define __set_bit(nr, addr) set_bit(nr, addr)
  28. /*
  29. * clear_bit() doesn't provide any barrier for the compiler.
  30. */
  31. #define smp_mb__before_clear_bit() barrier()
  32. #define smp_mb__after_clear_bit() barrier()
  33. static __inline__ void clear_bit(int nr, volatile unsigned long * addr)
  34. {
  35. #ifdef CONFIG_COLDFIRE
  36. __asm__ __volatile__ ("lea %0,%%a0; bclr %1,(%%a0)"
  37. : "+m" (((volatile char *)addr)[(nr^31) >> 3])
  38. : "d" (nr)
  39. : "%a0", "cc");
  40. #else
  41. __asm__ __volatile__ ("bclr %1,%0"
  42. : "+m" (((volatile char *)addr)[(nr^31) >> 3])
  43. : "di" (nr)
  44. : "cc");
  45. #endif
  46. }
  47. #define __clear_bit(nr, addr) clear_bit(nr, addr)
  48. static __inline__ void change_bit(int nr, volatile unsigned long * addr)
  49. {
  50. #ifdef CONFIG_COLDFIRE
  51. __asm__ __volatile__ ("lea %0,%%a0; bchg %1,(%%a0)"
  52. : "+m" (((volatile char *)addr)[(nr^31) >> 3])
  53. : "d" (nr)
  54. : "%a0", "cc");
  55. #else
  56. __asm__ __volatile__ ("bchg %1,%0"
  57. : "+m" (((volatile char *)addr)[(nr^31) >> 3])
  58. : "di" (nr)
  59. : "cc");
  60. #endif
  61. }
  62. #define __change_bit(nr, addr) change_bit(nr, addr)
  63. static __inline__ int test_and_set_bit(int nr, volatile unsigned long * addr)
  64. {
  65. char retval;
  66. #ifdef CONFIG_COLDFIRE
  67. __asm__ __volatile__ ("lea %1,%%a0; bset %2,(%%a0); sne %0"
  68. : "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3])
  69. : "d" (nr)
  70. : "%a0");
  71. #else
  72. __asm__ __volatile__ ("bset %2,%1; sne %0"
  73. : "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3])
  74. : "di" (nr)
  75. /* No clobber */);
  76. #endif
  77. return retval;
  78. }
  79. #define __test_and_set_bit(nr, addr) test_and_set_bit(nr, addr)
  80. static __inline__ int test_and_clear_bit(int nr, volatile unsigned long * addr)
  81. {
  82. char retval;
  83. #ifdef CONFIG_COLDFIRE
  84. __asm__ __volatile__ ("lea %1,%%a0; bclr %2,(%%a0); sne %0"
  85. : "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3])
  86. : "d" (nr)
  87. : "%a0");
  88. #else
  89. __asm__ __volatile__ ("bclr %2,%1; sne %0"
  90. : "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3])
  91. : "di" (nr)
  92. /* No clobber */);
  93. #endif
  94. return retval;
  95. }
  96. #define __test_and_clear_bit(nr, addr) test_and_clear_bit(nr, addr)
  97. static __inline__ int test_and_change_bit(int nr, volatile unsigned long * addr)
  98. {
  99. char retval;
  100. #ifdef CONFIG_COLDFIRE
  101. __asm__ __volatile__ ("lea %1,%%a0\n\tbchg %2,(%%a0)\n\tsne %0"
  102. : "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3])
  103. : "d" (nr)
  104. : "%a0");
  105. #else
  106. __asm__ __volatile__ ("bchg %2,%1; sne %0"
  107. : "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3])
  108. : "di" (nr)
  109. /* No clobber */);
  110. #endif
  111. return retval;
  112. }
  113. #define __test_and_change_bit(nr, addr) test_and_change_bit(nr, addr)
  114. /*
  115. * This routine doesn't need to be atomic.
  116. */
  117. static __inline__ int __constant_test_bit(int nr, const volatile unsigned long * addr)
  118. {
  119. return ((1UL << (nr & 31)) & (((const volatile unsigned int *) addr)[nr >> 5])) != 0;
  120. }
  121. static __inline__ int __test_bit(int nr, const volatile unsigned long * addr)
  122. {
  123. int * a = (int *) addr;
  124. int mask;
  125. a += nr >> 5;
  126. mask = 1 << (nr & 0x1f);
  127. return ((mask & *a) != 0);
  128. }
  129. #define test_bit(nr,addr) \
  130. (__builtin_constant_p(nr) ? \
  131. __constant_test_bit((nr),(addr)) : \
  132. __test_bit((nr),(addr)))
  133. #include <asm-generic/bitops/find.h>
  134. #include <asm-generic/bitops/hweight.h>
  135. #include <asm-generic/bitops/lock.h>
  136. static __inline__ int ext2_set_bit(int nr, volatile void * addr)
  137. {
  138. char retval;
  139. #ifdef CONFIG_COLDFIRE
  140. __asm__ __volatile__ ("lea %1,%%a0; bset %2,(%%a0); sne %0"
  141. : "=d" (retval), "+m" (((volatile char *)addr)[nr >> 3])
  142. : "d" (nr)
  143. : "%a0");
  144. #else
  145. __asm__ __volatile__ ("bset %2,%1; sne %0"
  146. : "=d" (retval), "+m" (((volatile char *)addr)[nr >> 3])
  147. : "di" (nr)
  148. /* No clobber */);
  149. #endif
  150. return retval;
  151. }
  152. static __inline__ int ext2_clear_bit(int nr, volatile void * addr)
  153. {
  154. char retval;
  155. #ifdef CONFIG_COLDFIRE
  156. __asm__ __volatile__ ("lea %1,%%a0; bclr %2,(%%a0); sne %0"
  157. : "=d" (retval), "+m" (((volatile char *)addr)[nr >> 3])
  158. : "d" (nr)
  159. : "%a0");
  160. #else
  161. __asm__ __volatile__ ("bclr %2,%1; sne %0"
  162. : "=d" (retval), "+m" (((volatile char *)addr)[nr >> 3])
  163. : "di" (nr)
  164. /* No clobber */);
  165. #endif
  166. return retval;
  167. }
  168. #define ext2_set_bit_atomic(lock, nr, addr) \
  169. ({ \
  170. int ret; \
  171. spin_lock(lock); \
  172. ret = ext2_set_bit((nr), (addr)); \
  173. spin_unlock(lock); \
  174. ret; \
  175. })
  176. #define ext2_clear_bit_atomic(lock, nr, addr) \
  177. ({ \
  178. int ret; \
  179. spin_lock(lock); \
  180. ret = ext2_clear_bit((nr), (addr)); \
  181. spin_unlock(lock); \
  182. ret; \
  183. })
  184. static __inline__ int ext2_test_bit(int nr, const volatile void * addr)
  185. {
  186. char retval;
  187. #ifdef CONFIG_COLDFIRE
  188. __asm__ __volatile__ ("lea %1,%%a0; btst %2,(%%a0); sne %0"
  189. : "=d" (retval)
  190. : "m" (((const volatile char *)addr)[nr >> 3]), "d" (nr)
  191. : "%a0");
  192. #else
  193. __asm__ __volatile__ ("btst %2,%1; sne %0"
  194. : "=d" (retval)
  195. : "m" (((const volatile char *)addr)[nr >> 3]), "di" (nr)
  196. /* No clobber */);
  197. #endif
  198. return retval;
  199. }
  200. #define ext2_find_first_zero_bit(addr, size) \
  201. ext2_find_next_zero_bit((addr), (size), 0)
  202. static __inline__ unsigned long ext2_find_next_zero_bit(void *addr, unsigned long size, unsigned long offset)
  203. {
  204. unsigned long *p = ((unsigned long *) addr) + (offset >> 5);
  205. unsigned long result = offset & ~31UL;
  206. unsigned long tmp;
  207. if (offset >= size)
  208. return size;
  209. size -= result;
  210. offset &= 31UL;
  211. if(offset) {
  212. /* We hold the little endian value in tmp, but then the
  213. * shift is illegal. So we could keep a big endian value
  214. * in tmp, like this:
  215. *
  216. * tmp = __swab32(*(p++));
  217. * tmp |= ~0UL >> (32-offset);
  218. *
  219. * but this would decrease preformance, so we change the
  220. * shift:
  221. */
  222. tmp = *(p++);
  223. tmp |= __swab32(~0UL >> (32-offset));
  224. if(size < 32)
  225. goto found_first;
  226. if(~tmp)
  227. goto found_middle;
  228. size -= 32;
  229. result += 32;
  230. }
  231. while(size & ~31UL) {
  232. if(~(tmp = *(p++)))
  233. goto found_middle;
  234. result += 32;
  235. size -= 32;
  236. }
  237. if(!size)
  238. return result;
  239. tmp = *p;
  240. found_first:
  241. /* tmp is little endian, so we would have to swab the shift,
  242. * see above. But then we have to swab tmp below for ffz, so
  243. * we might as well do this here.
  244. */
  245. return result + ffz(__swab32(tmp) | (~0UL << size));
  246. found_middle:
  247. return result + ffz(__swab32(tmp));
  248. }
  249. #include <asm-generic/bitops/minix.h>
  250. #endif /* __KERNEL__ */
  251. #include <asm-generic/bitops/fls.h>
  252. #include <asm-generic/bitops/fls64.h>
  253. #endif /* _M68KNOMMU_BITOPS_H */