bitops.h 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437
  1. #ifndef _M68K_BITOPS_H
  2. #define _M68K_BITOPS_H
  3. /*
  4. * Copyright 1992, Linus Torvalds.
  5. *
  6. * This file is subject to the terms and conditions of the GNU General Public
  7. * License. See the file COPYING in the main directory of this archive
  8. * for more details.
  9. */
  10. #include <linux/compiler.h>
  11. /*
  12. * Require 68020 or better.
  13. *
  14. * They use the standard big-endian m680x0 bit ordering.
  15. */
  16. #define test_and_set_bit(nr,vaddr) \
  17. (__builtin_constant_p(nr) ? \
  18. __constant_test_and_set_bit(nr, vaddr) : \
  19. __generic_test_and_set_bit(nr, vaddr))
  20. #define __test_and_set_bit(nr,vaddr) test_and_set_bit(nr,vaddr)
  21. static inline int __constant_test_and_set_bit(int nr, unsigned long *vaddr)
  22. {
  23. char *p = (char *)vaddr + (nr ^ 31) / 8;
  24. char retval;
  25. __asm__ __volatile__ ("bset %2,%1; sne %0"
  26. : "=d" (retval), "+m" (*p)
  27. : "di" (nr & 7));
  28. return retval;
  29. }
  30. static inline int __generic_test_and_set_bit(int nr, unsigned long *vaddr)
  31. {
  32. char retval;
  33. __asm__ __volatile__ ("bfset %2{%1:#1}; sne %0"
  34. : "=d" (retval) : "d" (nr^31), "o" (*vaddr) : "memory");
  35. return retval;
  36. }
  37. #define set_bit(nr,vaddr) \
  38. (__builtin_constant_p(nr) ? \
  39. __constant_set_bit(nr, vaddr) : \
  40. __generic_set_bit(nr, vaddr))
  41. #define __set_bit(nr,vaddr) set_bit(nr,vaddr)
  42. static inline void __constant_set_bit(int nr, volatile unsigned long *vaddr)
  43. {
  44. char *p = (char *)vaddr + (nr ^ 31) / 8;
  45. __asm__ __volatile__ ("bset %1,%0"
  46. : "+m" (*p) : "di" (nr & 7));
  47. }
  48. static inline void __generic_set_bit(int nr, volatile unsigned long *vaddr)
  49. {
  50. __asm__ __volatile__ ("bfset %1{%0:#1}"
  51. : : "d" (nr^31), "o" (*vaddr) : "memory");
  52. }
  53. #define test_and_clear_bit(nr,vaddr) \
  54. (__builtin_constant_p(nr) ? \
  55. __constant_test_and_clear_bit(nr, vaddr) : \
  56. __generic_test_and_clear_bit(nr, vaddr))
  57. #define __test_and_clear_bit(nr,vaddr) test_and_clear_bit(nr,vaddr)
  58. static inline int __constant_test_and_clear_bit(int nr, unsigned long *vaddr)
  59. {
  60. char *p = (char *)vaddr + (nr ^ 31) / 8;
  61. char retval;
  62. __asm__ __volatile__ ("bclr %2,%1; sne %0"
  63. : "=d" (retval), "+m" (*p)
  64. : "di" (nr & 7));
  65. return retval;
  66. }
  67. static inline int __generic_test_and_clear_bit(int nr, unsigned long *vaddr)
  68. {
  69. char retval;
  70. __asm__ __volatile__ ("bfclr %2{%1:#1}; sne %0"
  71. : "=d" (retval) : "d" (nr^31), "o" (*vaddr) : "memory");
  72. return retval;
  73. }
  74. /*
  75. * clear_bit() doesn't provide any barrier for the compiler.
  76. */
  77. #define smp_mb__before_clear_bit() barrier()
  78. #define smp_mb__after_clear_bit() barrier()
  79. #define clear_bit(nr,vaddr) \
  80. (__builtin_constant_p(nr) ? \
  81. __constant_clear_bit(nr, vaddr) : \
  82. __generic_clear_bit(nr, vaddr))
  83. #define __clear_bit(nr,vaddr) clear_bit(nr,vaddr)
  84. static inline void __constant_clear_bit(int nr, volatile unsigned long *vaddr)
  85. {
  86. char *p = (char *)vaddr + (nr ^ 31) / 8;
  87. __asm__ __volatile__ ("bclr %1,%0"
  88. : "+m" (*p) : "di" (nr & 7));
  89. }
  90. static inline void __generic_clear_bit(int nr, volatile unsigned long *vaddr)
  91. {
  92. __asm__ __volatile__ ("bfclr %1{%0:#1}"
  93. : : "d" (nr^31), "o" (*vaddr) : "memory");
  94. }
  95. #define test_and_change_bit(nr,vaddr) \
  96. (__builtin_constant_p(nr) ? \
  97. __constant_test_and_change_bit(nr, vaddr) : \
  98. __generic_test_and_change_bit(nr, vaddr))
  99. #define __test_and_change_bit(nr,vaddr) test_and_change_bit(nr,vaddr)
  100. #define __change_bit(nr,vaddr) change_bit(nr,vaddr)
  101. static inline int __constant_test_and_change_bit(int nr, unsigned long *vaddr)
  102. {
  103. char *p = (char *)vaddr + (nr ^ 31) / 8;
  104. char retval;
  105. __asm__ __volatile__ ("bchg %2,%1; sne %0"
  106. : "=d" (retval), "+m" (*p)
  107. : "di" (nr & 7));
  108. return retval;
  109. }
  110. static inline int __generic_test_and_change_bit(int nr, unsigned long *vaddr)
  111. {
  112. char retval;
  113. __asm__ __volatile__ ("bfchg %2{%1:#1}; sne %0"
  114. : "=d" (retval) : "d" (nr^31), "o" (*vaddr) : "memory");
  115. return retval;
  116. }
  117. #define change_bit(nr,vaddr) \
  118. (__builtin_constant_p(nr) ? \
  119. __constant_change_bit(nr, vaddr) : \
  120. __generic_change_bit(nr, vaddr))
  121. static inline void __constant_change_bit(int nr, unsigned long *vaddr)
  122. {
  123. char *p = (char *)vaddr + (nr ^ 31) / 8;
  124. __asm__ __volatile__ ("bchg %1,%0"
  125. : "+m" (*p) : "di" (nr & 7));
  126. }
  127. static inline void __generic_change_bit(int nr, unsigned long *vaddr)
  128. {
  129. __asm__ __volatile__ ("bfchg %1{%0:#1}"
  130. : : "d" (nr^31), "o" (*vaddr) : "memory");
  131. }
  132. static inline int test_bit(int nr, const unsigned long *vaddr)
  133. {
  134. return (vaddr[nr >> 5] & (1UL << (nr & 31))) != 0;
  135. }
  136. static inline int find_first_zero_bit(const unsigned long *vaddr,
  137. unsigned size)
  138. {
  139. const unsigned long *p = vaddr;
  140. int res = 32;
  141. unsigned long num;
  142. if (!size)
  143. return 0;
  144. size = (size + 31) >> 5;
  145. while (!(num = ~*p++)) {
  146. if (!--size)
  147. goto out;
  148. }
  149. __asm__ __volatile__ ("bfffo %1{#0,#0},%0"
  150. : "=d" (res) : "d" (num & -num));
  151. res ^= 31;
  152. out:
  153. return ((long)p - (long)vaddr - 4) * 8 + res;
  154. }
  155. static inline int find_next_zero_bit(const unsigned long *vaddr, int size,
  156. int offset)
  157. {
  158. const unsigned long *p = vaddr + (offset >> 5);
  159. int bit = offset & 31UL, res;
  160. if (offset >= size)
  161. return size;
  162. if (bit) {
  163. unsigned long num = ~*p++ & (~0UL << bit);
  164. offset -= bit;
  165. /* Look for zero in first longword */
  166. __asm__ __volatile__ ("bfffo %1{#0,#0},%0"
  167. : "=d" (res) : "d" (num & -num));
  168. if (res < 32)
  169. return offset + (res ^ 31);
  170. offset += 32;
  171. }
  172. /* No zero yet, search remaining full bytes for a zero */
  173. res = find_first_zero_bit(p, size - ((long)p - (long)vaddr) * 8);
  174. return offset + res;
  175. }
  176. static inline int find_first_bit(const unsigned long *vaddr, unsigned size)
  177. {
  178. const unsigned long *p = vaddr;
  179. int res = 32;
  180. unsigned long num;
  181. if (!size)
  182. return 0;
  183. size = (size + 31) >> 5;
  184. while (!(num = *p++)) {
  185. if (!--size)
  186. goto out;
  187. }
  188. __asm__ __volatile__ ("bfffo %1{#0,#0},%0"
  189. : "=d" (res) : "d" (num & -num));
  190. res ^= 31;
  191. out:
  192. return ((long)p - (long)vaddr - 4) * 8 + res;
  193. }
  194. static inline int find_next_bit(const unsigned long *vaddr, int size,
  195. int offset)
  196. {
  197. const unsigned long *p = vaddr + (offset >> 5);
  198. int bit = offset & 31UL, res;
  199. if (offset >= size)
  200. return size;
  201. if (bit) {
  202. unsigned long num = *p++ & (~0UL << bit);
  203. offset -= bit;
  204. /* Look for one in first longword */
  205. __asm__ __volatile__ ("bfffo %1{#0,#0},%0"
  206. : "=d" (res) : "d" (num & -num));
  207. if (res < 32)
  208. return offset + (res ^ 31);
  209. offset += 32;
  210. }
  211. /* No one yet, search remaining full bytes for a one */
  212. res = find_first_bit(p, size - ((long)p - (long)vaddr) * 8);
  213. return offset + res;
  214. }
  215. /*
  216. * ffz = Find First Zero in word. Undefined if no zero exists,
  217. * so code should check against ~0UL first..
  218. */
  219. static inline unsigned long ffz(unsigned long word)
  220. {
  221. int res;
  222. __asm__ __volatile__ ("bfffo %1{#0,#0},%0"
  223. : "=d" (res) : "d" (~word & -~word));
  224. return res ^ 31;
  225. }
  226. #ifdef __KERNEL__
  227. /*
  228. * ffs: find first bit set. This is defined the same way as
  229. * the libc and compiler builtin ffs routines, therefore
  230. * differs in spirit from the above ffz (man ffs).
  231. */
  232. static inline int ffs(int x)
  233. {
  234. int cnt;
  235. asm ("bfffo %1{#0:#0},%0" : "=d" (cnt) : "dm" (x & -x));
  236. return 32 - cnt;
  237. }
  238. #define __ffs(x) (ffs(x) - 1)
  239. /*
  240. * fls: find last bit set.
  241. */
  242. static inline int fls(int x)
  243. {
  244. int cnt;
  245. asm ("bfffo %1{#0,#0},%0" : "=d" (cnt) : "dm" (x));
  246. return 32 - cnt;
  247. }
  248. #define fls64(x) generic_fls64(x)
  249. /*
  250. * Every architecture must define this function. It's the fastest
  251. * way of searching a 140-bit bitmap where the first 100 bits are
  252. * unlikely to be set. It's guaranteed that at least one of the 140
  253. * bits is cleared.
  254. */
  255. static inline int sched_find_first_bit(const unsigned long *b)
  256. {
  257. if (unlikely(b[0]))
  258. return __ffs(b[0]);
  259. if (unlikely(b[1]))
  260. return __ffs(b[1]) + 32;
  261. if (unlikely(b[2]))
  262. return __ffs(b[2]) + 64;
  263. if (b[3])
  264. return __ffs(b[3]) + 96;
  265. return __ffs(b[4]) + 128;
  266. }
  267. /*
  268. * hweightN: returns the hamming weight (i.e. the number
  269. * of bits set) of a N-bit word
  270. */
  271. #define hweight32(x) generic_hweight32(x)
  272. #define hweight16(x) generic_hweight16(x)
  273. #define hweight8(x) generic_hweight8(x)
  274. /* Bitmap functions for the minix filesystem */
  275. static inline int minix_find_first_zero_bit(const void *vaddr, unsigned size)
  276. {
  277. const unsigned short *p = vaddr, *addr = vaddr;
  278. int res;
  279. unsigned short num;
  280. if (!size)
  281. return 0;
  282. size = (size >> 4) + ((size & 15) > 0);
  283. while (*p++ == 0xffff)
  284. {
  285. if (--size == 0)
  286. return (p - addr) << 4;
  287. }
  288. num = ~*--p;
  289. __asm__ __volatile__ ("bfffo %1{#16,#16},%0"
  290. : "=d" (res) : "d" (num & -num));
  291. return ((p - addr) << 4) + (res ^ 31);
  292. }
  293. #define minix_test_and_set_bit(nr, addr) test_and_set_bit((nr) ^ 16, (unsigned long *)(addr))
  294. #define minix_set_bit(nr,addr) set_bit((nr) ^ 16, (unsigned long *)(addr))
  295. #define minix_test_and_clear_bit(nr, addr) test_and_clear_bit((nr) ^ 16, (unsigned long *)(addr))
  296. static inline int minix_test_bit(int nr, const void *vaddr)
  297. {
  298. const unsigned short *p = vaddr;
  299. return (p[nr >> 4] & (1U << (nr & 15))) != 0;
  300. }
  301. /* Bitmap functions for the ext2 filesystem. */
  302. #define ext2_set_bit(nr, addr) test_and_set_bit((nr) ^ 24, (unsigned long *)(addr))
  303. #define ext2_set_bit_atomic(lock, nr, addr) test_and_set_bit((nr) ^ 24, (unsigned long *)(addr))
  304. #define ext2_clear_bit(nr, addr) test_and_clear_bit((nr) ^ 24, (unsigned long *)(addr))
  305. #define ext2_clear_bit_atomic(lock, nr, addr) test_and_clear_bit((nr) ^ 24, (unsigned long *)(addr))
  306. static inline int ext2_test_bit(int nr, const void *vaddr)
  307. {
  308. const unsigned char *p = vaddr;
  309. return (p[nr >> 3] & (1U << (nr & 7))) != 0;
  310. }
  311. static inline int ext2_find_first_zero_bit(const void *vaddr, unsigned size)
  312. {
  313. const unsigned long *p = vaddr, *addr = vaddr;
  314. int res;
  315. if (!size)
  316. return 0;
  317. size = (size >> 5) + ((size & 31) > 0);
  318. while (*p++ == ~0UL)
  319. {
  320. if (--size == 0)
  321. return (p - addr) << 5;
  322. }
  323. --p;
  324. for (res = 0; res < 32; res++)
  325. if (!ext2_test_bit (res, p))
  326. break;
  327. return (p - addr) * 32 + res;
  328. }
  329. static inline int ext2_find_next_zero_bit(const void *vaddr, unsigned size,
  330. unsigned offset)
  331. {
  332. const unsigned long *addr = vaddr;
  333. const unsigned long *p = addr + (offset >> 5);
  334. int bit = offset & 31UL, res;
  335. if (offset >= size)
  336. return size;
  337. if (bit) {
  338. /* Look for zero in first longword */
  339. for (res = bit; res < 32; res++)
  340. if (!ext2_test_bit (res, p))
  341. return (p - addr) * 32 + res;
  342. p++;
  343. }
  344. /* No zero yet, search remaining full bytes for a zero */
  345. res = ext2_find_first_zero_bit (p, size - 32 * (p - addr));
  346. return (p - addr) * 32 + res;
  347. }
  348. #endif /* __KERNEL__ */
  349. #endif /* _M68K_BITOPS_H */