bitops_mm.h 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493
  1. #ifndef _M68K_BITOPS_H
  2. #define _M68K_BITOPS_H
  3. /*
  4. * Copyright 1992, Linus Torvalds.
  5. *
  6. * This file is subject to the terms and conditions of the GNU General Public
  7. * License. See the file COPYING in the main directory of this archive
  8. * for more details.
  9. */
  10. #ifndef _LINUX_BITOPS_H
  11. #error only <linux/bitops.h> can be included directly
  12. #endif
  13. #include <linux/compiler.h>
  14. /*
  15. * Require 68020 or better.
  16. *
  17. * They use the standard big-endian m680x0 bit ordering.
  18. */
  19. #define test_and_set_bit(nr,vaddr) \
  20. (__builtin_constant_p(nr) ? \
  21. __constant_test_and_set_bit(nr, vaddr) : \
  22. __generic_test_and_set_bit(nr, vaddr))
  23. #define __test_and_set_bit(nr,vaddr) test_and_set_bit(nr,vaddr)
  24. static inline int __constant_test_and_set_bit(int nr, unsigned long *vaddr)
  25. {
  26. char *p = (char *)vaddr + (nr ^ 31) / 8;
  27. char retval;
  28. __asm__ __volatile__ ("bset %2,%1; sne %0"
  29. : "=d" (retval), "+m" (*p)
  30. : "di" (nr & 7));
  31. return retval;
  32. }
  33. static inline int __generic_test_and_set_bit(int nr, unsigned long *vaddr)
  34. {
  35. char retval;
  36. __asm__ __volatile__ ("bfset %2{%1:#1}; sne %0"
  37. : "=d" (retval) : "d" (nr^31), "o" (*vaddr) : "memory");
  38. return retval;
  39. }
  40. #define set_bit(nr,vaddr) \
  41. (__builtin_constant_p(nr) ? \
  42. __constant_set_bit(nr, vaddr) : \
  43. __generic_set_bit(nr, vaddr))
  44. #define __set_bit(nr,vaddr) set_bit(nr,vaddr)
  45. static inline void __constant_set_bit(int nr, volatile unsigned long *vaddr)
  46. {
  47. char *p = (char *)vaddr + (nr ^ 31) / 8;
  48. __asm__ __volatile__ ("bset %1,%0"
  49. : "+m" (*p) : "di" (nr & 7));
  50. }
  51. static inline void __generic_set_bit(int nr, volatile unsigned long *vaddr)
  52. {
  53. __asm__ __volatile__ ("bfset %1{%0:#1}"
  54. : : "d" (nr^31), "o" (*vaddr) : "memory");
  55. }
  56. #define test_and_clear_bit(nr,vaddr) \
  57. (__builtin_constant_p(nr) ? \
  58. __constant_test_and_clear_bit(nr, vaddr) : \
  59. __generic_test_and_clear_bit(nr, vaddr))
  60. #define __test_and_clear_bit(nr,vaddr) test_and_clear_bit(nr,vaddr)
  61. static inline int __constant_test_and_clear_bit(int nr, unsigned long *vaddr)
  62. {
  63. char *p = (char *)vaddr + (nr ^ 31) / 8;
  64. char retval;
  65. __asm__ __volatile__ ("bclr %2,%1; sne %0"
  66. : "=d" (retval), "+m" (*p)
  67. : "di" (nr & 7));
  68. return retval;
  69. }
  70. static inline int __generic_test_and_clear_bit(int nr, unsigned long *vaddr)
  71. {
  72. char retval;
  73. __asm__ __volatile__ ("bfclr %2{%1:#1}; sne %0"
  74. : "=d" (retval) : "d" (nr^31), "o" (*vaddr) : "memory");
  75. return retval;
  76. }
  77. /*
  78. * clear_bit() doesn't provide any barrier for the compiler.
  79. */
  80. #define smp_mb__before_clear_bit() barrier()
  81. #define smp_mb__after_clear_bit() barrier()
  82. #define clear_bit(nr,vaddr) \
  83. (__builtin_constant_p(nr) ? \
  84. __constant_clear_bit(nr, vaddr) : \
  85. __generic_clear_bit(nr, vaddr))
  86. #define __clear_bit(nr,vaddr) clear_bit(nr,vaddr)
  87. static inline void __constant_clear_bit(int nr, volatile unsigned long *vaddr)
  88. {
  89. char *p = (char *)vaddr + (nr ^ 31) / 8;
  90. __asm__ __volatile__ ("bclr %1,%0"
  91. : "+m" (*p) : "di" (nr & 7));
  92. }
  93. static inline void __generic_clear_bit(int nr, volatile unsigned long *vaddr)
  94. {
  95. __asm__ __volatile__ ("bfclr %1{%0:#1}"
  96. : : "d" (nr^31), "o" (*vaddr) : "memory");
  97. }
  98. #define test_and_change_bit(nr,vaddr) \
  99. (__builtin_constant_p(nr) ? \
  100. __constant_test_and_change_bit(nr, vaddr) : \
  101. __generic_test_and_change_bit(nr, vaddr))
  102. #define __test_and_change_bit(nr,vaddr) test_and_change_bit(nr,vaddr)
  103. #define __change_bit(nr,vaddr) change_bit(nr,vaddr)
  104. static inline int __constant_test_and_change_bit(int nr, unsigned long *vaddr)
  105. {
  106. char *p = (char *)vaddr + (nr ^ 31) / 8;
  107. char retval;
  108. __asm__ __volatile__ ("bchg %2,%1; sne %0"
  109. : "=d" (retval), "+m" (*p)
  110. : "di" (nr & 7));
  111. return retval;
  112. }
  113. static inline int __generic_test_and_change_bit(int nr, unsigned long *vaddr)
  114. {
  115. char retval;
  116. __asm__ __volatile__ ("bfchg %2{%1:#1}; sne %0"
  117. : "=d" (retval) : "d" (nr^31), "o" (*vaddr) : "memory");
  118. return retval;
  119. }
  120. #define change_bit(nr,vaddr) \
  121. (__builtin_constant_p(nr) ? \
  122. __constant_change_bit(nr, vaddr) : \
  123. __generic_change_bit(nr, vaddr))
  124. static inline void __constant_change_bit(int nr, unsigned long *vaddr)
  125. {
  126. char *p = (char *)vaddr + (nr ^ 31) / 8;
  127. __asm__ __volatile__ ("bchg %1,%0"
  128. : "+m" (*p) : "di" (nr & 7));
  129. }
  130. static inline void __generic_change_bit(int nr, unsigned long *vaddr)
  131. {
  132. __asm__ __volatile__ ("bfchg %1{%0:#1}"
  133. : : "d" (nr^31), "o" (*vaddr) : "memory");
  134. }
  135. static inline int test_bit(int nr, const unsigned long *vaddr)
  136. {
  137. return (vaddr[nr >> 5] & (1UL << (nr & 31))) != 0;
  138. }
  139. static inline int find_first_zero_bit(const unsigned long *vaddr,
  140. unsigned size)
  141. {
  142. const unsigned long *p = vaddr;
  143. int res = 32;
  144. unsigned int words;
  145. unsigned long num;
  146. if (!size)
  147. return 0;
  148. words = (size + 31) >> 5;
  149. while (!(num = ~*p++)) {
  150. if (!--words)
  151. goto out;
  152. }
  153. __asm__ __volatile__ ("bfffo %1{#0,#0},%0"
  154. : "=d" (res) : "d" (num & -num));
  155. res ^= 31;
  156. out:
  157. res += ((long)p - (long)vaddr - 4) * 8;
  158. return res < size ? res : size;
  159. }
  160. static inline int find_next_zero_bit(const unsigned long *vaddr, int size,
  161. int offset)
  162. {
  163. const unsigned long *p = vaddr + (offset >> 5);
  164. int bit = offset & 31UL, res;
  165. if (offset >= size)
  166. return size;
  167. if (bit) {
  168. unsigned long num = ~*p++ & (~0UL << bit);
  169. offset -= bit;
  170. /* Look for zero in first longword */
  171. __asm__ __volatile__ ("bfffo %1{#0,#0},%0"
  172. : "=d" (res) : "d" (num & -num));
  173. if (res < 32) {
  174. offset += res ^ 31;
  175. return offset < size ? offset : size;
  176. }
  177. offset += 32;
  178. if (offset >= size)
  179. return size;
  180. }
  181. /* No zero yet, search remaining full bytes for a zero */
  182. return offset + find_first_zero_bit(p, size - offset);
  183. }
  184. static inline int find_first_bit(const unsigned long *vaddr, unsigned size)
  185. {
  186. const unsigned long *p = vaddr;
  187. int res = 32;
  188. unsigned int words;
  189. unsigned long num;
  190. if (!size)
  191. return 0;
  192. words = (size + 31) >> 5;
  193. while (!(num = *p++)) {
  194. if (!--words)
  195. goto out;
  196. }
  197. __asm__ __volatile__ ("bfffo %1{#0,#0},%0"
  198. : "=d" (res) : "d" (num & -num));
  199. res ^= 31;
  200. out:
  201. res += ((long)p - (long)vaddr - 4) * 8;
  202. return res < size ? res : size;
  203. }
  204. static inline int find_next_bit(const unsigned long *vaddr, int size,
  205. int offset)
  206. {
  207. const unsigned long *p = vaddr + (offset >> 5);
  208. int bit = offset & 31UL, res;
  209. if (offset >= size)
  210. return size;
  211. if (bit) {
  212. unsigned long num = *p++ & (~0UL << bit);
  213. offset -= bit;
  214. /* Look for one in first longword */
  215. __asm__ __volatile__ ("bfffo %1{#0,#0},%0"
  216. : "=d" (res) : "d" (num & -num));
  217. if (res < 32) {
  218. offset += res ^ 31;
  219. return offset < size ? offset : size;
  220. }
  221. offset += 32;
  222. if (offset >= size)
  223. return size;
  224. }
  225. /* No one yet, search remaining full bytes for a one */
  226. return offset + find_first_bit(p, size - offset);
  227. }
  228. /*
  229. * ffz = Find First Zero in word. Undefined if no zero exists,
  230. * so code should check against ~0UL first..
  231. */
  232. static inline unsigned long ffz(unsigned long word)
  233. {
  234. int res;
  235. __asm__ __volatile__ ("bfffo %1{#0,#0},%0"
  236. : "=d" (res) : "d" (~word & -~word));
  237. return res ^ 31;
  238. }
  239. #ifdef __KERNEL__
  240. /*
  241. * ffs: find first bit set. This is defined the same way as
  242. * the libc and compiler builtin ffs routines, therefore
  243. * differs in spirit from the above ffz (man ffs).
  244. */
  245. static inline int ffs(int x)
  246. {
  247. int cnt;
  248. asm ("bfffo %1{#0:#0},%0" : "=d" (cnt) : "dm" (x & -x));
  249. return 32 - cnt;
  250. }
  251. #define __ffs(x) (ffs(x) - 1)
  252. /*
  253. * fls: find last bit set.
  254. */
  255. static inline int fls(int x)
  256. {
  257. int cnt;
  258. asm ("bfffo %1{#0,#0},%0" : "=d" (cnt) : "dm" (x));
  259. return 32 - cnt;
  260. }
  261. static inline int __fls(int x)
  262. {
  263. return fls(x) - 1;
  264. }
  265. #include <asm-generic/bitops/fls64.h>
  266. #include <asm-generic/bitops/sched.h>
  267. #include <asm-generic/bitops/hweight.h>
  268. #include <asm-generic/bitops/lock.h>
  269. /* Bitmap functions for the little endian bitmap. */
  270. static inline void __set_bit_le(int nr, void *addr)
  271. {
  272. __set_bit(nr ^ 24, addr);
  273. }
  274. static inline void __clear_bit_le(int nr, void *addr)
  275. {
  276. __clear_bit(nr ^ 24, addr);
  277. }
  278. static inline int __test_and_set_bit_le(int nr, void *addr)
  279. {
  280. return __test_and_set_bit(nr ^ 24, addr);
  281. }
  282. static inline int test_and_set_bit_le(int nr, void *addr)
  283. {
  284. return test_and_set_bit(nr ^ 24, addr);
  285. }
  286. static inline int __test_and_clear_bit_le(int nr, void *addr)
  287. {
  288. return __test_and_clear_bit(nr ^ 24, addr);
  289. }
  290. static inline int test_and_clear_bit_le(int nr, void *addr)
  291. {
  292. return test_and_clear_bit(nr ^ 24, addr);
  293. }
  294. static inline int test_bit_le(int nr, const void *vaddr)
  295. {
  296. const unsigned char *p = vaddr;
  297. return (p[nr >> 3] & (1U << (nr & 7))) != 0;
  298. }
  299. static inline int find_first_zero_bit_le(const void *vaddr, unsigned size)
  300. {
  301. const unsigned long *p = vaddr, *addr = vaddr;
  302. int res = 0;
  303. unsigned int words;
  304. if (!size)
  305. return 0;
  306. words = (size >> 5) + ((size & 31) > 0);
  307. while (*p++ == ~0UL) {
  308. if (--words == 0)
  309. goto out;
  310. }
  311. --p;
  312. for (res = 0; res < 32; res++)
  313. if (!test_bit_le(res, p))
  314. break;
  315. out:
  316. res += (p - addr) * 32;
  317. return res < size ? res : size;
  318. }
  319. static inline unsigned long find_next_zero_bit_le(const void *addr,
  320. unsigned long size, unsigned long offset)
  321. {
  322. const unsigned long *p = addr;
  323. int bit = offset & 31UL, res;
  324. if (offset >= size)
  325. return size;
  326. p += offset >> 5;
  327. if (bit) {
  328. offset -= bit;
  329. /* Look for zero in first longword */
  330. for (res = bit; res < 32; res++)
  331. if (!test_bit_le(res, p)) {
  332. offset += res;
  333. return offset < size ? offset : size;
  334. }
  335. p++;
  336. offset += 32;
  337. if (offset >= size)
  338. return size;
  339. }
  340. /* No zero yet, search remaining full bytes for a zero */
  341. return offset + find_first_zero_bit_le(p, size - offset);
  342. }
  343. static inline int find_first_bit_le(const void *vaddr, unsigned size)
  344. {
  345. const unsigned long *p = vaddr, *addr = vaddr;
  346. int res = 0;
  347. unsigned int words;
  348. if (!size)
  349. return 0;
  350. words = (size >> 5) + ((size & 31) > 0);
  351. while (*p++ == 0UL) {
  352. if (--words == 0)
  353. goto out;
  354. }
  355. --p;
  356. for (res = 0; res < 32; res++)
  357. if (test_bit_le(res, p))
  358. break;
  359. out:
  360. res += (p - addr) * 32;
  361. return res < size ? res : size;
  362. }
  363. static inline unsigned long find_next_bit_le(const void *addr,
  364. unsigned long size, unsigned long offset)
  365. {
  366. const unsigned long *p = addr;
  367. int bit = offset & 31UL, res;
  368. if (offset >= size)
  369. return size;
  370. p += offset >> 5;
  371. if (bit) {
  372. offset -= bit;
  373. /* Look for one in first longword */
  374. for (res = bit; res < 32; res++)
  375. if (test_bit_le(res, p)) {
  376. offset += res;
  377. return offset < size ? offset : size;
  378. }
  379. p++;
  380. offset += 32;
  381. if (offset >= size)
  382. return size;
  383. }
  384. /* No set bit yet, search remaining full bytes for a set bit */
  385. return offset + find_first_bit_le(p, size - offset);
  386. }
  387. /* Bitmap functions for the ext2 filesystem. */
  388. #define ext2_set_bit_atomic(lock, nr, addr) \
  389. test_and_set_bit_le(nr, addr)
  390. #define ext2_clear_bit_atomic(lock, nr, addr) \
  391. test_and_clear_bit_le(nr, addr)
  392. #endif /* __KERNEL__ */
  393. #endif /* _M68K_BITOPS_H */