bitops.h 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530
  1. #ifndef _M68K_BITOPS_H
  2. #define _M68K_BITOPS_H
  3. /*
  4. * Copyright 1992, Linus Torvalds.
  5. *
  6. * This file is subject to the terms and conditions of the GNU General Public
  7. * License. See the file COPYING in the main directory of this archive
  8. * for more details.
  9. */
  10. #ifndef _LINUX_BITOPS_H
  11. #error only <linux/bitops.h> can be included directly
  12. #endif
  13. #include <linux/compiler.h>
  14. /*
  15. * Bit access functions vary across the ColdFire and 68k families.
  16. * So we will break them out here, and then macro in the ones we want.
  17. *
  18. * ColdFire - supports standard bset/bclr/bchg with register operand only
  19. * 68000 - supports standard bset/bclr/bchg with memory operand
  20. * >= 68020 - also supports the bfset/bfclr/bfchg instructions
  21. *
  22. * Although it is possible to use only the bset/bclr/bchg with register
  23. * operands on all platforms you end up with larger generated code.
  24. * So we use the best form possible on a given platform.
  25. */
  26. static inline void bset_reg_set_bit(int nr, volatile unsigned long *vaddr)
  27. {
  28. char *p = (char *)vaddr + (nr ^ 31) / 8;
  29. __asm__ __volatile__ ("bset %1,(%0)"
  30. :
  31. : "a" (p), "di" (nr & 7)
  32. : "memory");
  33. }
  34. static inline void bset_mem_set_bit(int nr, volatile unsigned long *vaddr)
  35. {
  36. char *p = (char *)vaddr + (nr ^ 31) / 8;
  37. __asm__ __volatile__ ("bset %1,%0"
  38. : "+m" (*p)
  39. : "di" (nr & 7));
  40. }
  41. static inline void bfset_mem_set_bit(int nr, volatile unsigned long *vaddr)
  42. {
  43. __asm__ __volatile__ ("bfset %1{%0:#1}"
  44. :
  45. : "d" (nr ^ 31), "o" (*vaddr)
  46. : "memory");
  47. }
  48. #if defined(CONFIG_COLDFIRE)
  49. #define set_bit(nr, vaddr) bset_reg_set_bit(nr, vaddr)
  50. #elif defined(CONFIG_CPU_HAS_NO_BITFIELDS)
  51. #define set_bit(nr, vaddr) bset_mem_set_bit(nr, vaddr)
  52. #else
  53. #define set_bit(nr, vaddr) (__builtin_constant_p(nr) ? \
  54. bset_mem_set_bit(nr, vaddr) : \
  55. bfset_mem_set_bit(nr, vaddr))
  56. #endif
  57. #define __set_bit(nr, vaddr) set_bit(nr, vaddr)
  58. /*
  59. * clear_bit() doesn't provide any barrier for the compiler.
  60. */
  61. #define smp_mb__before_clear_bit() barrier()
  62. #define smp_mb__after_clear_bit() barrier()
  63. static inline void bclr_reg_clear_bit(int nr, volatile unsigned long *vaddr)
  64. {
  65. char *p = (char *)vaddr + (nr ^ 31) / 8;
  66. __asm__ __volatile__ ("bclr %1,(%0)"
  67. :
  68. : "a" (p), "di" (nr & 7)
  69. : "memory");
  70. }
  71. static inline void bclr_mem_clear_bit(int nr, volatile unsigned long *vaddr)
  72. {
  73. char *p = (char *)vaddr + (nr ^ 31) / 8;
  74. __asm__ __volatile__ ("bclr %1,%0"
  75. : "+m" (*p)
  76. : "di" (nr & 7));
  77. }
  78. static inline void bfclr_mem_clear_bit(int nr, volatile unsigned long *vaddr)
  79. {
  80. __asm__ __volatile__ ("bfclr %1{%0:#1}"
  81. :
  82. : "d" (nr ^ 31), "o" (*vaddr)
  83. : "memory");
  84. }
  85. #if defined(CONFIG_COLDFIRE)
  86. #define clear_bit(nr, vaddr) bclr_reg_clear_bit(nr, vaddr)
  87. #elif defined(CONFIG_CPU_HAS_NO_BITFIELDS)
  88. #define clear_bit(nr, vaddr) bclr_mem_clear_bit(nr, vaddr)
  89. #else
  90. #define clear_bit(nr, vaddr) (__builtin_constant_p(nr) ? \
  91. bclr_mem_clear_bit(nr, vaddr) : \
  92. bfclr_mem_clear_bit(nr, vaddr))
  93. #endif
  94. #define __clear_bit(nr, vaddr) clear_bit(nr, vaddr)
  95. static inline void bchg_reg_change_bit(int nr, volatile unsigned long *vaddr)
  96. {
  97. char *p = (char *)vaddr + (nr ^ 31) / 8;
  98. __asm__ __volatile__ ("bchg %1,(%0)"
  99. :
  100. : "a" (p), "di" (nr & 7)
  101. : "memory");
  102. }
  103. static inline void bchg_mem_change_bit(int nr, volatile unsigned long *vaddr)
  104. {
  105. char *p = (char *)vaddr + (nr ^ 31) / 8;
  106. __asm__ __volatile__ ("bchg %1,%0"
  107. : "+m" (*p)
  108. : "di" (nr & 7));
  109. }
  110. static inline void bfchg_mem_change_bit(int nr, volatile unsigned long *vaddr)
  111. {
  112. __asm__ __volatile__ ("bfchg %1{%0:#1}"
  113. :
  114. : "d" (nr ^ 31), "o" (*vaddr)
  115. : "memory");
  116. }
  117. #if defined(CONFIG_COLDFIRE)
  118. #define change_bit(nr, vaddr) bchg_reg_change_bit(nr, vaddr)
  119. #elif defined(CONFIG_CPU_HAS_NO_BITFIELDS)
  120. #define change_bit(nr, vaddr) bchg_mem_change_bit(nr, vaddr)
  121. #else
  122. #define change_bit(nr, vaddr) (__builtin_constant_p(nr) ? \
  123. bchg_mem_change_bit(nr, vaddr) : \
  124. bfchg_mem_change_bit(nr, vaddr))
  125. #endif
  126. #define __change_bit(nr, vaddr) change_bit(nr, vaddr)
  127. static inline int test_bit(int nr, const unsigned long *vaddr)
  128. {
  129. return (vaddr[nr >> 5] & (1UL << (nr & 31))) != 0;
  130. }
  131. static inline int bset_reg_test_and_set_bit(int nr,
  132. volatile unsigned long *vaddr)
  133. {
  134. char *p = (char *)vaddr + (nr ^ 31) / 8;
  135. char retval;
  136. __asm__ __volatile__ ("bset %2,(%1); sne %0"
  137. : "=d" (retval)
  138. : "a" (p), "di" (nr & 7)
  139. : "memory");
  140. return retval;
  141. }
  142. static inline int bset_mem_test_and_set_bit(int nr,
  143. volatile unsigned long *vaddr)
  144. {
  145. char *p = (char *)vaddr + (nr ^ 31) / 8;
  146. char retval;
  147. __asm__ __volatile__ ("bset %2,%1; sne %0"
  148. : "=d" (retval), "+m" (*p)
  149. : "di" (nr & 7));
  150. return retval;
  151. }
  152. static inline int bfset_mem_test_and_set_bit(int nr,
  153. volatile unsigned long *vaddr)
  154. {
  155. char retval;
  156. __asm__ __volatile__ ("bfset %2{%1:#1}; sne %0"
  157. : "=d" (retval)
  158. : "d" (nr ^ 31), "o" (*vaddr)
  159. : "memory");
  160. return retval;
  161. }
  162. #if defined(CONFIG_COLDFIRE)
  163. #define test_and_set_bit(nr, vaddr) bset_reg_test_and_set_bit(nr, vaddr)
  164. #elif defined(CONFIG_CPU_HAS_NO_BITFIELDS)
  165. #define test_and_set_bit(nr, vaddr) bset_mem_test_and_set_bit(nr, vaddr)
  166. #else
  167. #define test_and_set_bit(nr, vaddr) (__builtin_constant_p(nr) ? \
  168. bset_mem_test_and_set_bit(nr, vaddr) : \
  169. bfset_mem_test_and_set_bit(nr, vaddr))
  170. #endif
  171. #define __test_and_set_bit(nr, vaddr) test_and_set_bit(nr, vaddr)
  172. static inline int bclr_reg_test_and_clear_bit(int nr,
  173. volatile unsigned long *vaddr)
  174. {
  175. char *p = (char *)vaddr + (nr ^ 31) / 8;
  176. char retval;
  177. __asm__ __volatile__ ("bclr %2,(%1); sne %0"
  178. : "=d" (retval)
  179. : "a" (p), "di" (nr & 7)
  180. : "memory");
  181. return retval;
  182. }
  183. static inline int bclr_mem_test_and_clear_bit(int nr,
  184. volatile unsigned long *vaddr)
  185. {
  186. char *p = (char *)vaddr + (nr ^ 31) / 8;
  187. char retval;
  188. __asm__ __volatile__ ("bclr %2,%1; sne %0"
  189. : "=d" (retval), "+m" (*p)
  190. : "di" (nr & 7));
  191. return retval;
  192. }
  193. static inline int bfclr_mem_test_and_clear_bit(int nr,
  194. volatile unsigned long *vaddr)
  195. {
  196. char retval;
  197. __asm__ __volatile__ ("bfclr %2{%1:#1}; sne %0"
  198. : "=d" (retval)
  199. : "d" (nr ^ 31), "o" (*vaddr)
  200. : "memory");
  201. return retval;
  202. }
  203. #if defined(CONFIG_COLDFIRE)
  204. #define test_and_clear_bit(nr, vaddr) bclr_reg_test_and_clear_bit(nr, vaddr)
  205. #elif defined(CONFIG_CPU_HAS_NO_BITFIELDS)
  206. #define test_and_clear_bit(nr, vaddr) bclr_mem_test_and_clear_bit(nr, vaddr)
  207. #else
  208. #define test_and_clear_bit(nr, vaddr) (__builtin_constant_p(nr) ? \
  209. bclr_mem_test_and_clear_bit(nr, vaddr) : \
  210. bfclr_mem_test_and_clear_bit(nr, vaddr))
  211. #endif
  212. #define __test_and_clear_bit(nr, vaddr) test_and_clear_bit(nr, vaddr)
  213. static inline int bchg_reg_test_and_change_bit(int nr,
  214. volatile unsigned long *vaddr)
  215. {
  216. char *p = (char *)vaddr + (nr ^ 31) / 8;
  217. char retval;
  218. __asm__ __volatile__ ("bchg %2,(%1); sne %0"
  219. : "=d" (retval)
  220. : "a" (p), "di" (nr & 7)
  221. : "memory");
  222. return retval;
  223. }
  224. static inline int bchg_mem_test_and_change_bit(int nr,
  225. volatile unsigned long *vaddr)
  226. {
  227. char *p = (char *)vaddr + (nr ^ 31) / 8;
  228. char retval;
  229. __asm__ __volatile__ ("bchg %2,%1; sne %0"
  230. : "=d" (retval), "+m" (*p)
  231. : "di" (nr & 7));
  232. return retval;
  233. }
  234. static inline int bfchg_mem_test_and_change_bit(int nr,
  235. volatile unsigned long *vaddr)
  236. {
  237. char retval;
  238. __asm__ __volatile__ ("bfchg %2{%1:#1}; sne %0"
  239. : "=d" (retval)
  240. : "d" (nr ^ 31), "o" (*vaddr)
  241. : "memory");
  242. return retval;
  243. }
  244. #if defined(CONFIG_COLDFIRE)
  245. #define test_and_change_bit(nr, vaddr) bchg_reg_test_and_change_bit(nr, vaddr)
  246. #elif defined(CONFIG_CPU_HAS_NO_BITFIELDS)
  247. #define test_and_change_bit(nr, vaddr) bchg_mem_test_and_change_bit(nr, vaddr)
  248. #else
  249. #define test_and_change_bit(nr, vaddr) (__builtin_constant_p(nr) ? \
  250. bchg_mem_test_and_change_bit(nr, vaddr) : \
  251. bfchg_mem_test_and_change_bit(nr, vaddr))
  252. #endif
  253. #define __test_and_change_bit(nr, vaddr) test_and_change_bit(nr, vaddr)
  254. /*
  255. * The true 68020 and more advanced processors support the "bfffo"
  256. * instruction for finding bits. ColdFire and simple 68000 parts
  257. * (including CPU32) do not support this. They simply use the generic
  258. * functions.
  259. */
  260. #if defined(CONFIG_CPU_HAS_NO_BITFIELDS)
  261. #include <asm-generic/bitops/find.h>
  262. #include <asm-generic/bitops/ffz.h>
  263. #else
  264. static inline int find_first_zero_bit(const unsigned long *vaddr,
  265. unsigned size)
  266. {
  267. const unsigned long *p = vaddr;
  268. int res = 32;
  269. unsigned int words;
  270. unsigned long num;
  271. if (!size)
  272. return 0;
  273. words = (size + 31) >> 5;
  274. while (!(num = ~*p++)) {
  275. if (!--words)
  276. goto out;
  277. }
  278. __asm__ __volatile__ ("bfffo %1{#0,#0},%0"
  279. : "=d" (res) : "d" (num & -num));
  280. res ^= 31;
  281. out:
  282. res += ((long)p - (long)vaddr - 4) * 8;
  283. return res < size ? res : size;
  284. }
  285. #define find_first_zero_bit find_first_zero_bit
  286. static inline int find_next_zero_bit(const unsigned long *vaddr, int size,
  287. int offset)
  288. {
  289. const unsigned long *p = vaddr + (offset >> 5);
  290. int bit = offset & 31UL, res;
  291. if (offset >= size)
  292. return size;
  293. if (bit) {
  294. unsigned long num = ~*p++ & (~0UL << bit);
  295. offset -= bit;
  296. /* Look for zero in first longword */
  297. __asm__ __volatile__ ("bfffo %1{#0,#0},%0"
  298. : "=d" (res) : "d" (num & -num));
  299. if (res < 32) {
  300. offset += res ^ 31;
  301. return offset < size ? offset : size;
  302. }
  303. offset += 32;
  304. if (offset >= size)
  305. return size;
  306. }
  307. /* No zero yet, search remaining full bytes for a zero */
  308. return offset + find_first_zero_bit(p, size - offset);
  309. }
  310. #define find_next_zero_bit find_next_zero_bit
  311. static inline int find_first_bit(const unsigned long *vaddr, unsigned size)
  312. {
  313. const unsigned long *p = vaddr;
  314. int res = 32;
  315. unsigned int words;
  316. unsigned long num;
  317. if (!size)
  318. return 0;
  319. words = (size + 31) >> 5;
  320. while (!(num = *p++)) {
  321. if (!--words)
  322. goto out;
  323. }
  324. __asm__ __volatile__ ("bfffo %1{#0,#0},%0"
  325. : "=d" (res) : "d" (num & -num));
  326. res ^= 31;
  327. out:
  328. res += ((long)p - (long)vaddr - 4) * 8;
  329. return res < size ? res : size;
  330. }
  331. #define find_first_bit find_first_bit
  332. static inline int find_next_bit(const unsigned long *vaddr, int size,
  333. int offset)
  334. {
  335. const unsigned long *p = vaddr + (offset >> 5);
  336. int bit = offset & 31UL, res;
  337. if (offset >= size)
  338. return size;
  339. if (bit) {
  340. unsigned long num = *p++ & (~0UL << bit);
  341. offset -= bit;
  342. /* Look for one in first longword */
  343. __asm__ __volatile__ ("bfffo %1{#0,#0},%0"
  344. : "=d" (res) : "d" (num & -num));
  345. if (res < 32) {
  346. offset += res ^ 31;
  347. return offset < size ? offset : size;
  348. }
  349. offset += 32;
  350. if (offset >= size)
  351. return size;
  352. }
  353. /* No one yet, search remaining full bytes for a one */
  354. return offset + find_first_bit(p, size - offset);
  355. }
  356. #define find_next_bit find_next_bit
  357. /*
  358. * ffz = Find First Zero in word. Undefined if no zero exists,
  359. * so code should check against ~0UL first..
  360. */
  361. static inline unsigned long ffz(unsigned long word)
  362. {
  363. int res;
  364. __asm__ __volatile__ ("bfffo %1{#0,#0},%0"
  365. : "=d" (res) : "d" (~word & -~word));
  366. return res ^ 31;
  367. }
  368. #endif
  369. #ifdef __KERNEL__
  370. #if defined(CONFIG_CPU_HAS_NO_BITFIELDS)
  371. /*
  372. * The newer ColdFire family members support a "bitrev" instruction
  373. * and we can use that to implement a fast ffs. Older Coldfire parts,
  374. * and normal 68000 parts don't have anything special, so we use the
  375. * generic functions for those.
  376. */
  377. #if (defined(__mcfisaaplus__) || defined(__mcfisac__)) && \
  378. !defined(CONFIG_M68000) && !defined(CONFIG_MCPU32)
  379. static inline int __ffs(int x)
  380. {
  381. __asm__ __volatile__ ("bitrev %0; ff1 %0"
  382. : "=d" (x)
  383. : "0" (x));
  384. return x;
  385. }
  386. static inline int ffs(int x)
  387. {
  388. if (!x)
  389. return 0;
  390. return __ffs(x) + 1;
  391. }
  392. #else
  393. #include <asm-generic/bitops/ffs.h>
  394. #include <asm-generic/bitops/__ffs.h>
  395. #endif
  396. #include <asm-generic/bitops/fls.h>
  397. #include <asm-generic/bitops/__fls.h>
  398. #else
  399. /*
  400. * ffs: find first bit set. This is defined the same way as
  401. * the libc and compiler builtin ffs routines, therefore
  402. * differs in spirit from the above ffz (man ffs).
  403. */
  404. static inline int ffs(int x)
  405. {
  406. int cnt;
  407. __asm__ ("bfffo %1{#0:#0},%0"
  408. : "=d" (cnt)
  409. : "dm" (x & -x));
  410. return 32 - cnt;
  411. }
  412. #define __ffs(x) (ffs(x) - 1)
  413. /*
  414. * fls: find last bit set.
  415. */
  416. static inline int fls(int x)
  417. {
  418. int cnt;
  419. __asm__ ("bfffo %1{#0,#0},%0"
  420. : "=d" (cnt)
  421. : "dm" (x));
  422. return 32 - cnt;
  423. }
  424. static inline int __fls(int x)
  425. {
  426. return fls(x) - 1;
  427. }
  428. #endif
  429. #include <asm-generic/bitops/ext2-atomic.h>
  430. #include <asm-generic/bitops/le.h>
  431. #include <asm-generic/bitops/fls64.h>
  432. #include <asm-generic/bitops/sched.h>
  433. #include <asm-generic/bitops/hweight.h>
  434. #include <asm-generic/bitops/lock.h>
  435. #endif /* __KERNEL__ */
  436. #endif /* _M68K_BITOPS_H */