bitops_no.h 8.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345
  1. #ifndef _M68KNOMMU_BITOPS_H
  2. #define _M68KNOMMU_BITOPS_H
  3. /*
  4. * Copyright 1992, Linus Torvalds.
  5. */
  6. #include <linux/compiler.h>
  7. #include <asm/byteorder.h> /* swab32 */
  8. #ifdef __KERNEL__
  9. #ifndef _LINUX_BITOPS_H
  10. #error only <linux/bitops.h> can be included directly
  11. #endif
  12. #if defined (__mcfisaaplus__) || defined (__mcfisac__)
  13. static inline int ffs(unsigned int val)
  14. {
  15. if (!val)
  16. return 0;
  17. asm volatile(
  18. "bitrev %0\n\t"
  19. "ff1 %0\n\t"
  20. : "=d" (val)
  21. : "0" (val)
  22. );
  23. val++;
  24. return val;
  25. }
  26. static inline int __ffs(unsigned int val)
  27. {
  28. asm volatile(
  29. "bitrev %0\n\t"
  30. "ff1 %0\n\t"
  31. : "=d" (val)
  32. : "0" (val)
  33. );
  34. return val;
  35. }
  36. #else
  37. #include <asm-generic/bitops/ffs.h>
  38. #include <asm-generic/bitops/__ffs.h>
  39. #endif
  40. #include <asm-generic/bitops/sched.h>
  41. #include <asm-generic/bitops/ffz.h>
  42. static __inline__ void set_bit(int nr, volatile unsigned long * addr)
  43. {
  44. #ifdef CONFIG_COLDFIRE
  45. __asm__ __volatile__ ("lea %0,%%a0; bset %1,(%%a0)"
  46. : "+m" (((volatile char *)addr)[(nr^31) >> 3])
  47. : "d" (nr)
  48. : "%a0", "cc");
  49. #else
  50. __asm__ __volatile__ ("bset %1,%0"
  51. : "+m" (((volatile char *)addr)[(nr^31) >> 3])
  52. : "di" (nr)
  53. : "cc");
  54. #endif
  55. }
  56. #define __set_bit(nr, addr) set_bit(nr, addr)
  57. /*
  58. * clear_bit() doesn't provide any barrier for the compiler.
  59. */
  60. #define smp_mb__before_clear_bit() barrier()
  61. #define smp_mb__after_clear_bit() barrier()
  62. static __inline__ void clear_bit(int nr, volatile unsigned long * addr)
  63. {
  64. #ifdef CONFIG_COLDFIRE
  65. __asm__ __volatile__ ("lea %0,%%a0; bclr %1,(%%a0)"
  66. : "+m" (((volatile char *)addr)[(nr^31) >> 3])
  67. : "d" (nr)
  68. : "%a0", "cc");
  69. #else
  70. __asm__ __volatile__ ("bclr %1,%0"
  71. : "+m" (((volatile char *)addr)[(nr^31) >> 3])
  72. : "di" (nr)
  73. : "cc");
  74. #endif
  75. }
  76. #define __clear_bit(nr, addr) clear_bit(nr, addr)
  77. static __inline__ void change_bit(int nr, volatile unsigned long * addr)
  78. {
  79. #ifdef CONFIG_COLDFIRE
  80. __asm__ __volatile__ ("lea %0,%%a0; bchg %1,(%%a0)"
  81. : "+m" (((volatile char *)addr)[(nr^31) >> 3])
  82. : "d" (nr)
  83. : "%a0", "cc");
  84. #else
  85. __asm__ __volatile__ ("bchg %1,%0"
  86. : "+m" (((volatile char *)addr)[(nr^31) >> 3])
  87. : "di" (nr)
  88. : "cc");
  89. #endif
  90. }
  91. #define __change_bit(nr, addr) change_bit(nr, addr)
  92. static __inline__ int test_and_set_bit(int nr, volatile unsigned long * addr)
  93. {
  94. char retval;
  95. #ifdef CONFIG_COLDFIRE
  96. __asm__ __volatile__ ("lea %1,%%a0; bset %2,(%%a0); sne %0"
  97. : "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3])
  98. : "d" (nr)
  99. : "%a0");
  100. #else
  101. __asm__ __volatile__ ("bset %2,%1; sne %0"
  102. : "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3])
  103. : "di" (nr)
  104. /* No clobber */);
  105. #endif
  106. return retval;
  107. }
  108. #define __test_and_set_bit(nr, addr) test_and_set_bit(nr, addr)
  109. static __inline__ int test_and_clear_bit(int nr, volatile unsigned long * addr)
  110. {
  111. char retval;
  112. #ifdef CONFIG_COLDFIRE
  113. __asm__ __volatile__ ("lea %1,%%a0; bclr %2,(%%a0); sne %0"
  114. : "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3])
  115. : "d" (nr)
  116. : "%a0");
  117. #else
  118. __asm__ __volatile__ ("bclr %2,%1; sne %0"
  119. : "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3])
  120. : "di" (nr)
  121. /* No clobber */);
  122. #endif
  123. return retval;
  124. }
  125. #define __test_and_clear_bit(nr, addr) test_and_clear_bit(nr, addr)
  126. static __inline__ int test_and_change_bit(int nr, volatile unsigned long * addr)
  127. {
  128. char retval;
  129. #ifdef CONFIG_COLDFIRE
  130. __asm__ __volatile__ ("lea %1,%%a0\n\tbchg %2,(%%a0)\n\tsne %0"
  131. : "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3])
  132. : "d" (nr)
  133. : "%a0");
  134. #else
  135. __asm__ __volatile__ ("bchg %2,%1; sne %0"
  136. : "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3])
  137. : "di" (nr)
  138. /* No clobber */);
  139. #endif
  140. return retval;
  141. }
  142. #define __test_and_change_bit(nr, addr) test_and_change_bit(nr, addr)
  143. /*
  144. * This routine doesn't need to be atomic.
  145. */
  146. static __inline__ int __constant_test_bit(int nr, const volatile unsigned long * addr)
  147. {
  148. return ((1UL << (nr & 31)) & (((const volatile unsigned int *) addr)[nr >> 5])) != 0;
  149. }
  150. static __inline__ int __test_bit(int nr, const volatile unsigned long * addr)
  151. {
  152. int * a = (int *) addr;
  153. int mask;
  154. a += nr >> 5;
  155. mask = 1 << (nr & 0x1f);
  156. return ((mask & *a) != 0);
  157. }
  158. #define test_bit(nr,addr) \
  159. (__builtin_constant_p(nr) ? \
  160. __constant_test_bit((nr),(addr)) : \
  161. __test_bit((nr),(addr)))
  162. #include <asm-generic/bitops/find.h>
  163. #include <asm-generic/bitops/hweight.h>
  164. #include <asm-generic/bitops/lock.h>
  165. #define BITOP_LE_SWIZZLE ((BITS_PER_LONG-1) & ~0x7)
  166. static inline void __set_bit_le(int nr, void *addr)
  167. {
  168. __set_bit(nr ^ BITOP_LE_SWIZZLE, addr);
  169. }
  170. static inline void __clear_bit_le(int nr, void *addr)
  171. {
  172. __clear_bit(nr ^ BITOP_LE_SWIZZLE, addr);
  173. }
  174. static inline int __test_and_set_bit_le(int nr, volatile void *addr)
  175. {
  176. char retval;
  177. #ifdef CONFIG_COLDFIRE
  178. __asm__ __volatile__ ("lea %1,%%a0; bset %2,(%%a0); sne %0"
  179. : "=d" (retval), "+m" (((volatile char *)addr)[nr >> 3])
  180. : "d" (nr)
  181. : "%a0");
  182. #else
  183. __asm__ __volatile__ ("bset %2,%1; sne %0"
  184. : "=d" (retval), "+m" (((volatile char *)addr)[nr >> 3])
  185. : "di" (nr)
  186. /* No clobber */);
  187. #endif
  188. return retval;
  189. }
  190. static inline int __test_and_clear_bit_le(int nr, volatile void *addr)
  191. {
  192. char retval;
  193. #ifdef CONFIG_COLDFIRE
  194. __asm__ __volatile__ ("lea %1,%%a0; bclr %2,(%%a0); sne %0"
  195. : "=d" (retval), "+m" (((volatile char *)addr)[nr >> 3])
  196. : "d" (nr)
  197. : "%a0");
  198. #else
  199. __asm__ __volatile__ ("bclr %2,%1; sne %0"
  200. : "=d" (retval), "+m" (((volatile char *)addr)[nr >> 3])
  201. : "di" (nr)
  202. /* No clobber */);
  203. #endif
  204. return retval;
  205. }
  206. #define ext2_set_bit_atomic(lock, nr, addr) \
  207. ({ \
  208. int ret; \
  209. spin_lock(lock); \
  210. ret = __test_and_set_bit_le((nr), (addr)); \
  211. spin_unlock(lock); \
  212. ret; \
  213. })
  214. #define ext2_clear_bit_atomic(lock, nr, addr) \
  215. ({ \
  216. int ret; \
  217. spin_lock(lock); \
  218. ret = __test_and_clear_bit_le((nr), (addr)); \
  219. spin_unlock(lock); \
  220. ret; \
  221. })
  222. static inline int test_bit_le(int nr, const volatile void *addr)
  223. {
  224. char retval;
  225. #ifdef CONFIG_COLDFIRE
  226. __asm__ __volatile__ ("lea %1,%%a0; btst %2,(%%a0); sne %0"
  227. : "=d" (retval)
  228. : "m" (((const volatile char *)addr)[nr >> 3]), "d" (nr)
  229. : "%a0");
  230. #else
  231. __asm__ __volatile__ ("btst %2,%1; sne %0"
  232. : "=d" (retval)
  233. : "m" (((const volatile char *)addr)[nr >> 3]), "di" (nr)
  234. /* No clobber */);
  235. #endif
  236. return retval;
  237. }
  238. #define find_first_zero_bit_le(addr, size) \
  239. find_next_zero_bit_le((addr), (size), 0)
  240. static inline unsigned long find_next_zero_bit_le(void *addr, unsigned long size, unsigned long offset)
  241. {
  242. unsigned long *p = ((unsigned long *) addr) + (offset >> 5);
  243. unsigned long result = offset & ~31UL;
  244. unsigned long tmp;
  245. if (offset >= size)
  246. return size;
  247. size -= result;
  248. offset &= 31UL;
  249. if(offset) {
  250. /* We hold the little endian value in tmp, but then the
  251. * shift is illegal. So we could keep a big endian value
  252. * in tmp, like this:
  253. *
  254. * tmp = __swab32(*(p++));
  255. * tmp |= ~0UL >> (32-offset);
  256. *
  257. * but this would decrease performance, so we change the
  258. * shift:
  259. */
  260. tmp = *(p++);
  261. tmp |= __swab32(~0UL >> (32-offset));
  262. if(size < 32)
  263. goto found_first;
  264. if(~tmp)
  265. goto found_middle;
  266. size -= 32;
  267. result += 32;
  268. }
  269. while(size & ~31UL) {
  270. if(~(tmp = *(p++)))
  271. goto found_middle;
  272. result += 32;
  273. size -= 32;
  274. }
  275. if(!size)
  276. return result;
  277. tmp = *p;
  278. found_first:
  279. /* tmp is little endian, so we would have to swab the shift,
  280. * see above. But then we have to swab tmp below for ffz, so
  281. * we might as well do this here.
  282. */
  283. return result + ffz(__swab32(tmp) | (~0UL << size));
  284. found_middle:
  285. return result + ffz(__swab32(tmp));
  286. }
  287. #endif /* __KERNEL__ */
  288. #include <asm-generic/bitops/fls.h>
  289. #include <asm-generic/bitops/__fls.h>
  290. #include <asm-generic/bitops/fls64.h>
  291. #endif /* _M68KNOMMU_BITOPS_H */