bitops_no.h 7.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337
  1. #ifndef _M68KNOMMU_BITOPS_H
  2. #define _M68KNOMMU_BITOPS_H
  3. /*
  4. * Copyright 1992, Linus Torvalds.
  5. */
  6. #include <linux/compiler.h>
  7. #include <asm/byteorder.h> /* swab32 */
  8. #ifdef __KERNEL__
  9. #ifndef _LINUX_BITOPS_H
  10. #error only <linux/bitops.h> can be included directly
  11. #endif
  12. #if defined (__mcfisaaplus__) || defined (__mcfisac__)
  13. static inline int ffs(unsigned int val)
  14. {
  15. if (!val)
  16. return 0;
  17. asm volatile(
  18. "bitrev %0\n\t"
  19. "ff1 %0\n\t"
  20. : "=d" (val)
  21. : "0" (val)
  22. );
  23. val++;
  24. return val;
  25. }
  26. static inline int __ffs(unsigned int val)
  27. {
  28. asm volatile(
  29. "bitrev %0\n\t"
  30. "ff1 %0\n\t"
  31. : "=d" (val)
  32. : "0" (val)
  33. );
  34. return val;
  35. }
  36. #else
  37. #include <asm-generic/bitops/ffs.h>
  38. #include <asm-generic/bitops/__ffs.h>
  39. #endif
  40. #include <asm-generic/bitops/sched.h>
  41. #include <asm-generic/bitops/ffz.h>
  42. static __inline__ void set_bit(int nr, volatile unsigned long * addr)
  43. {
  44. #ifdef CONFIG_COLDFIRE
  45. __asm__ __volatile__ ("lea %0,%%a0; bset %1,(%%a0)"
  46. : "+m" (((volatile char *)addr)[(nr^31) >> 3])
  47. : "d" (nr)
  48. : "%a0", "cc");
  49. #else
  50. __asm__ __volatile__ ("bset %1,%0"
  51. : "+m" (((volatile char *)addr)[(nr^31) >> 3])
  52. : "di" (nr)
  53. : "cc");
  54. #endif
  55. }
  56. #define __set_bit(nr, addr) set_bit(nr, addr)
  57. /*
  58. * clear_bit() doesn't provide any barrier for the compiler.
  59. */
  60. #define smp_mb__before_clear_bit() barrier()
  61. #define smp_mb__after_clear_bit() barrier()
  62. static __inline__ void clear_bit(int nr, volatile unsigned long * addr)
  63. {
  64. #ifdef CONFIG_COLDFIRE
  65. __asm__ __volatile__ ("lea %0,%%a0; bclr %1,(%%a0)"
  66. : "+m" (((volatile char *)addr)[(nr^31) >> 3])
  67. : "d" (nr)
  68. : "%a0", "cc");
  69. #else
  70. __asm__ __volatile__ ("bclr %1,%0"
  71. : "+m" (((volatile char *)addr)[(nr^31) >> 3])
  72. : "di" (nr)
  73. : "cc");
  74. #endif
  75. }
  76. #define __clear_bit(nr, addr) clear_bit(nr, addr)
  77. static __inline__ void change_bit(int nr, volatile unsigned long * addr)
  78. {
  79. #ifdef CONFIG_COLDFIRE
  80. __asm__ __volatile__ ("lea %0,%%a0; bchg %1,(%%a0)"
  81. : "+m" (((volatile char *)addr)[(nr^31) >> 3])
  82. : "d" (nr)
  83. : "%a0", "cc");
  84. #else
  85. __asm__ __volatile__ ("bchg %1,%0"
  86. : "+m" (((volatile char *)addr)[(nr^31) >> 3])
  87. : "di" (nr)
  88. : "cc");
  89. #endif
  90. }
  91. #define __change_bit(nr, addr) change_bit(nr, addr)
  92. static __inline__ int test_and_set_bit(int nr, volatile unsigned long * addr)
  93. {
  94. char retval;
  95. #ifdef CONFIG_COLDFIRE
  96. __asm__ __volatile__ ("lea %1,%%a0; bset %2,(%%a0); sne %0"
  97. : "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3])
  98. : "d" (nr)
  99. : "%a0");
  100. #else
  101. __asm__ __volatile__ ("bset %2,%1; sne %0"
  102. : "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3])
  103. : "di" (nr)
  104. /* No clobber */);
  105. #endif
  106. return retval;
  107. }
  108. #define __test_and_set_bit(nr, addr) test_and_set_bit(nr, addr)
  109. static __inline__ int test_and_clear_bit(int nr, volatile unsigned long * addr)
  110. {
  111. char retval;
  112. #ifdef CONFIG_COLDFIRE
  113. __asm__ __volatile__ ("lea %1,%%a0; bclr %2,(%%a0); sne %0"
  114. : "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3])
  115. : "d" (nr)
  116. : "%a0");
  117. #else
  118. __asm__ __volatile__ ("bclr %2,%1; sne %0"
  119. : "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3])
  120. : "di" (nr)
  121. /* No clobber */);
  122. #endif
  123. return retval;
  124. }
  125. #define __test_and_clear_bit(nr, addr) test_and_clear_bit(nr, addr)
  126. static __inline__ int test_and_change_bit(int nr, volatile unsigned long * addr)
  127. {
  128. char retval;
  129. #ifdef CONFIG_COLDFIRE
  130. __asm__ __volatile__ ("lea %1,%%a0\n\tbchg %2,(%%a0)\n\tsne %0"
  131. : "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3])
  132. : "d" (nr)
  133. : "%a0");
  134. #else
  135. __asm__ __volatile__ ("bchg %2,%1; sne %0"
  136. : "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3])
  137. : "di" (nr)
  138. /* No clobber */);
  139. #endif
  140. return retval;
  141. }
  142. #define __test_and_change_bit(nr, addr) test_and_change_bit(nr, addr)
  143. /*
  144. * This routine doesn't need to be atomic.
  145. */
  146. static __inline__ int __constant_test_bit(int nr, const volatile unsigned long * addr)
  147. {
  148. return ((1UL << (nr & 31)) & (((const volatile unsigned int *) addr)[nr >> 5])) != 0;
  149. }
  150. static __inline__ int __test_bit(int nr, const volatile unsigned long * addr)
  151. {
  152. int * a = (int *) addr;
  153. int mask;
  154. a += nr >> 5;
  155. mask = 1 << (nr & 0x1f);
  156. return ((mask & *a) != 0);
  157. }
  158. #define test_bit(nr,addr) \
  159. (__builtin_constant_p(nr) ? \
  160. __constant_test_bit((nr),(addr)) : \
  161. __test_bit((nr),(addr)))
  162. #include <asm-generic/bitops/find.h>
  163. #include <asm-generic/bitops/hweight.h>
  164. #include <asm-generic/bitops/lock.h>
  165. static __inline__ int ext2_set_bit(int nr, volatile void * addr)
  166. {
  167. char retval;
  168. #ifdef CONFIG_COLDFIRE
  169. __asm__ __volatile__ ("lea %1,%%a0; bset %2,(%%a0); sne %0"
  170. : "=d" (retval), "+m" (((volatile char *)addr)[nr >> 3])
  171. : "d" (nr)
  172. : "%a0");
  173. #else
  174. __asm__ __volatile__ ("bset %2,%1; sne %0"
  175. : "=d" (retval), "+m" (((volatile char *)addr)[nr >> 3])
  176. : "di" (nr)
  177. /* No clobber */);
  178. #endif
  179. return retval;
  180. }
  181. static __inline__ int ext2_clear_bit(int nr, volatile void * addr)
  182. {
  183. char retval;
  184. #ifdef CONFIG_COLDFIRE
  185. __asm__ __volatile__ ("lea %1,%%a0; bclr %2,(%%a0); sne %0"
  186. : "=d" (retval), "+m" (((volatile char *)addr)[nr >> 3])
  187. : "d" (nr)
  188. : "%a0");
  189. #else
  190. __asm__ __volatile__ ("bclr %2,%1; sne %0"
  191. : "=d" (retval), "+m" (((volatile char *)addr)[nr >> 3])
  192. : "di" (nr)
  193. /* No clobber */);
  194. #endif
  195. return retval;
  196. }
  197. #define ext2_set_bit_atomic(lock, nr, addr) \
  198. ({ \
  199. int ret; \
  200. spin_lock(lock); \
  201. ret = ext2_set_bit((nr), (addr)); \
  202. spin_unlock(lock); \
  203. ret; \
  204. })
  205. #define ext2_clear_bit_atomic(lock, nr, addr) \
  206. ({ \
  207. int ret; \
  208. spin_lock(lock); \
  209. ret = ext2_clear_bit((nr), (addr)); \
  210. spin_unlock(lock); \
  211. ret; \
  212. })
  213. static __inline__ int ext2_test_bit(int nr, const volatile void * addr)
  214. {
  215. char retval;
  216. #ifdef CONFIG_COLDFIRE
  217. __asm__ __volatile__ ("lea %1,%%a0; btst %2,(%%a0); sne %0"
  218. : "=d" (retval)
  219. : "m" (((const volatile char *)addr)[nr >> 3]), "d" (nr)
  220. : "%a0");
  221. #else
  222. __asm__ __volatile__ ("btst %2,%1; sne %0"
  223. : "=d" (retval)
  224. : "m" (((const volatile char *)addr)[nr >> 3]), "di" (nr)
  225. /* No clobber */);
  226. #endif
  227. return retval;
  228. }
  229. #define ext2_find_first_zero_bit(addr, size) \
  230. ext2_find_next_zero_bit((addr), (size), 0)
  231. static __inline__ unsigned long ext2_find_next_zero_bit(void *addr, unsigned long size, unsigned long offset)
  232. {
  233. unsigned long *p = ((unsigned long *) addr) + (offset >> 5);
  234. unsigned long result = offset & ~31UL;
  235. unsigned long tmp;
  236. if (offset >= size)
  237. return size;
  238. size -= result;
  239. offset &= 31UL;
  240. if(offset) {
  241. /* We hold the little endian value in tmp, but then the
  242. * shift is illegal. So we could keep a big endian value
  243. * in tmp, like this:
  244. *
  245. * tmp = __swab32(*(p++));
  246. * tmp |= ~0UL >> (32-offset);
  247. *
  248. * but this would decrease performance, so we change the
  249. * shift:
  250. */
  251. tmp = *(p++);
  252. tmp |= __swab32(~0UL >> (32-offset));
  253. if(size < 32)
  254. goto found_first;
  255. if(~tmp)
  256. goto found_middle;
  257. size -= 32;
  258. result += 32;
  259. }
  260. while(size & ~31UL) {
  261. if(~(tmp = *(p++)))
  262. goto found_middle;
  263. result += 32;
  264. size -= 32;
  265. }
  266. if(!size)
  267. return result;
  268. tmp = *p;
  269. found_first:
  270. /* tmp is little endian, so we would have to swab the shift,
  271. * see above. But then we have to swab tmp below for ffz, so
  272. * we might as well do this here.
  273. */
  274. return result + ffz(__swab32(tmp) | (~0UL << size));
  275. found_middle:
  276. return result + ffz(__swab32(tmp));
  277. }
  278. #define ext2_find_next_bit(addr, size, off) \
  279. generic_find_next_le_bit((unsigned long *)(addr), (size), (off))
  280. #include <asm-generic/bitops/minix.h>
  281. #endif /* __KERNEL__ */
  282. #include <asm-generic/bitops/fls.h>
  283. #include <asm-generic/bitops/__fls.h>
  284. #include <asm-generic/bitops/fls64.h>
  285. #endif /* _M68KNOMMU_BITOPS_H */