bitops.h 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503
  1. #ifndef _M68KNOMMU_BITOPS_H
  2. #define _M68KNOMMU_BITOPS_H
  3. /*
  4. * Copyright 1992, Linus Torvalds.
  5. */
  6. #include <linux/config.h>
  7. #include <linux/compiler.h>
  8. #include <asm/byteorder.h> /* swab32 */
  9. #include <asm/system.h> /* save_flags */
  10. #ifdef __KERNEL__
  11. /*
  12. * Generic ffs().
  13. */
  14. static inline int ffs(int x)
  15. {
  16. int r = 1;
  17. if (!x)
  18. return 0;
  19. if (!(x & 0xffff)) {
  20. x >>= 16;
  21. r += 16;
  22. }
  23. if (!(x & 0xff)) {
  24. x >>= 8;
  25. r += 8;
  26. }
  27. if (!(x & 0xf)) {
  28. x >>= 4;
  29. r += 4;
  30. }
  31. if (!(x & 3)) {
  32. x >>= 2;
  33. r += 2;
  34. }
  35. if (!(x & 1)) {
  36. x >>= 1;
  37. r += 1;
  38. }
  39. return r;
  40. }
  41. /*
  42. * Generic __ffs().
  43. */
  44. static inline int __ffs(int x)
  45. {
  46. int r = 0;
  47. if (!x)
  48. return 0;
  49. if (!(x & 0xffff)) {
  50. x >>= 16;
  51. r += 16;
  52. }
  53. if (!(x & 0xff)) {
  54. x >>= 8;
  55. r += 8;
  56. }
  57. if (!(x & 0xf)) {
  58. x >>= 4;
  59. r += 4;
  60. }
  61. if (!(x & 3)) {
  62. x >>= 2;
  63. r += 2;
  64. }
  65. if (!(x & 1)) {
  66. x >>= 1;
  67. r += 1;
  68. }
  69. return r;
  70. }
  71. /*
  72. * Every architecture must define this function. It's the fastest
  73. * way of searching a 140-bit bitmap where the first 100 bits are
  74. * unlikely to be set. It's guaranteed that at least one of the 140
  75. * bits is cleared.
  76. */
  77. static inline int sched_find_first_bit(unsigned long *b)
  78. {
  79. if (unlikely(b[0]))
  80. return __ffs(b[0]);
  81. if (unlikely(b[1]))
  82. return __ffs(b[1]) + 32;
  83. if (unlikely(b[2]))
  84. return __ffs(b[2]) + 64;
  85. if (b[3])
  86. return __ffs(b[3]) + 96;
  87. return __ffs(b[4]) + 128;
  88. }
  89. /*
  90. * ffz = Find First Zero in word. Undefined if no zero exists,
  91. * so code should check against ~0UL first..
  92. */
  93. static __inline__ unsigned long ffz(unsigned long word)
  94. {
  95. unsigned long result = 0;
  96. while(word & 1) {
  97. result++;
  98. word >>= 1;
  99. }
  100. return result;
  101. }
  102. static __inline__ void set_bit(int nr, volatile unsigned long * addr)
  103. {
  104. #ifdef CONFIG_COLDFIRE
  105. __asm__ __volatile__ ("lea %0,%%a0; bset %1,(%%a0)"
  106. : "+m" (((volatile char *)addr)[(nr^31) >> 3])
  107. : "d" (nr)
  108. : "%a0", "cc");
  109. #else
  110. __asm__ __volatile__ ("bset %1,%0"
  111. : "+m" (((volatile char *)addr)[(nr^31) >> 3])
  112. : "di" (nr)
  113. : "cc");
  114. #endif
  115. }
  116. #define __set_bit(nr, addr) set_bit(nr, addr)
  117. /*
  118. * clear_bit() doesn't provide any barrier for the compiler.
  119. */
  120. #define smp_mb__before_clear_bit() barrier()
  121. #define smp_mb__after_clear_bit() barrier()
  122. static __inline__ void clear_bit(int nr, volatile unsigned long * addr)
  123. {
  124. #ifdef CONFIG_COLDFIRE
  125. __asm__ __volatile__ ("lea %0,%%a0; bclr %1,(%%a0)"
  126. : "+m" (((volatile char *)addr)[(nr^31) >> 3])
  127. : "d" (nr)
  128. : "%a0", "cc");
  129. #else
  130. __asm__ __volatile__ ("bclr %1,%0"
  131. : "+m" (((volatile char *)addr)[(nr^31) >> 3])
  132. : "di" (nr)
  133. : "cc");
  134. #endif
  135. }
  136. #define __clear_bit(nr, addr) clear_bit(nr, addr)
  137. static __inline__ void change_bit(int nr, volatile unsigned long * addr)
  138. {
  139. #ifdef CONFIG_COLDFIRE
  140. __asm__ __volatile__ ("lea %0,%%a0; bchg %1,(%%a0)"
  141. : "+m" (((volatile char *)addr)[(nr^31) >> 3])
  142. : "d" (nr)
  143. : "%a0", "cc");
  144. #else
  145. __asm__ __volatile__ ("bchg %1,%0"
  146. : "+m" (((volatile char *)addr)[(nr^31) >> 3])
  147. : "di" (nr)
  148. : "cc");
  149. #endif
  150. }
  151. #define __change_bit(nr, addr) change_bit(nr, addr)
  152. static __inline__ int test_and_set_bit(int nr, volatile unsigned long * addr)
  153. {
  154. char retval;
  155. #ifdef CONFIG_COLDFIRE
  156. __asm__ __volatile__ ("lea %1,%%a0; bset %2,(%%a0); sne %0"
  157. : "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3])
  158. : "d" (nr)
  159. : "%a0");
  160. #else
  161. __asm__ __volatile__ ("bset %2,%1; sne %0"
  162. : "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3])
  163. : "di" (nr)
  164. /* No clobber */);
  165. #endif
  166. return retval;
  167. }
  168. #define __test_and_set_bit(nr, addr) test_and_set_bit(nr, addr)
  169. static __inline__ int test_and_clear_bit(int nr, volatile unsigned long * addr)
  170. {
  171. char retval;
  172. #ifdef CONFIG_COLDFIRE
  173. __asm__ __volatile__ ("lea %1,%%a0; bclr %2,(%%a0); sne %0"
  174. : "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3])
  175. : "d" (nr)
  176. : "%a0");
  177. #else
  178. __asm__ __volatile__ ("bclr %2,%1; sne %0"
  179. : "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3])
  180. : "di" (nr)
  181. /* No clobber */);
  182. #endif
  183. return retval;
  184. }
  185. #define __test_and_clear_bit(nr, addr) test_and_clear_bit(nr, addr)
  186. static __inline__ int test_and_change_bit(int nr, volatile unsigned long * addr)
  187. {
  188. char retval;
  189. #ifdef CONFIG_COLDFIRE
  190. __asm__ __volatile__ ("lea %1,%%a0\n\tbchg %2,(%%a0)\n\tsne %0"
  191. : "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3])
  192. : "d" (nr)
  193. : "%a0");
  194. #else
  195. __asm__ __volatile__ ("bchg %2,%1; sne %0"
  196. : "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3])
  197. : "di" (nr)
  198. /* No clobber */);
  199. #endif
  200. return retval;
  201. }
  202. #define __test_and_change_bit(nr, addr) test_and_change_bit(nr, addr)
  203. /*
  204. * This routine doesn't need to be atomic.
  205. */
  206. static __inline__ int __constant_test_bit(int nr, const volatile unsigned long * addr)
  207. {
  208. return ((1UL << (nr & 31)) & (((const volatile unsigned int *) addr)[nr >> 5])) != 0;
  209. }
  210. static __inline__ int __test_bit(int nr, const volatile unsigned long * addr)
  211. {
  212. int * a = (int *) addr;
  213. int mask;
  214. a += nr >> 5;
  215. mask = 1 << (nr & 0x1f);
  216. return ((mask & *a) != 0);
  217. }
  218. #define test_bit(nr,addr) \
  219. (__builtin_constant_p(nr) ? \
  220. __constant_test_bit((nr),(addr)) : \
  221. __test_bit((nr),(addr)))
  222. #define find_first_zero_bit(addr, size) \
  223. find_next_zero_bit((addr), (size), 0)
  224. #define find_first_bit(addr, size) \
  225. find_next_bit((addr), (size), 0)
  226. static __inline__ int find_next_zero_bit (void * addr, int size, int offset)
  227. {
  228. unsigned long *p = ((unsigned long *) addr) + (offset >> 5);
  229. unsigned long result = offset & ~31UL;
  230. unsigned long tmp;
  231. if (offset >= size)
  232. return size;
  233. size -= result;
  234. offset &= 31UL;
  235. if (offset) {
  236. tmp = *(p++);
  237. tmp |= ~0UL >> (32-offset);
  238. if (size < 32)
  239. goto found_first;
  240. if (~tmp)
  241. goto found_middle;
  242. size -= 32;
  243. result += 32;
  244. }
  245. while (size & ~31UL) {
  246. if (~(tmp = *(p++)))
  247. goto found_middle;
  248. result += 32;
  249. size -= 32;
  250. }
  251. if (!size)
  252. return result;
  253. tmp = *p;
  254. found_first:
  255. tmp |= ~0UL >> size;
  256. found_middle:
  257. return result + ffz(tmp);
  258. }
  259. /*
  260. * Find next one bit in a bitmap reasonably efficiently.
  261. */
  262. static __inline__ unsigned long find_next_bit(const unsigned long *addr,
  263. unsigned long size, unsigned long offset)
  264. {
  265. unsigned int *p = ((unsigned int *) addr) + (offset >> 5);
  266. unsigned int result = offset & ~31UL;
  267. unsigned int tmp;
  268. if (offset >= size)
  269. return size;
  270. size -= result;
  271. offset &= 31UL;
  272. if (offset) {
  273. tmp = *p++;
  274. tmp &= ~0UL << offset;
  275. if (size < 32)
  276. goto found_first;
  277. if (tmp)
  278. goto found_middle;
  279. size -= 32;
  280. result += 32;
  281. }
  282. while (size >= 32) {
  283. if ((tmp = *p++) != 0)
  284. goto found_middle;
  285. result += 32;
  286. size -= 32;
  287. }
  288. if (!size)
  289. return result;
  290. tmp = *p;
  291. found_first:
  292. tmp &= ~0UL >> (32 - size);
  293. if (tmp == 0UL) /* Are any bits set? */
  294. return result + size; /* Nope. */
  295. found_middle:
  296. return result + __ffs(tmp);
  297. }
  298. /*
  299. * hweightN: returns the hamming weight (i.e. the number
  300. * of bits set) of a N-bit word
  301. */
  302. #define hweight32(x) generic_hweight32(x)
  303. #define hweight16(x) generic_hweight16(x)
  304. #define hweight8(x) generic_hweight8(x)
  305. static __inline__ int ext2_set_bit(int nr, volatile void * addr)
  306. {
  307. char retval;
  308. #ifdef CONFIG_COLDFIRE
  309. __asm__ __volatile__ ("lea %1,%%a0; bset %2,(%%a0); sne %0"
  310. : "=d" (retval), "+m" (((volatile char *)addr)[nr >> 3])
  311. : "d" (nr)
  312. : "%a0");
  313. #else
  314. __asm__ __volatile__ ("bset %2,%1; sne %0"
  315. : "=d" (retval), "+m" (((volatile char *)addr)[nr >> 3])
  316. : "di" (nr)
  317. /* No clobber */);
  318. #endif
  319. return retval;
  320. }
  321. static __inline__ int ext2_clear_bit(int nr, volatile void * addr)
  322. {
  323. char retval;
  324. #ifdef CONFIG_COLDFIRE
  325. __asm__ __volatile__ ("lea %1,%%a0; bclr %2,(%%a0); sne %0"
  326. : "=d" (retval), "+m" (((volatile char *)addr)[nr >> 3])
  327. : "d" (nr)
  328. : "%a0");
  329. #else
  330. __asm__ __volatile__ ("bclr %2,%1; sne %0"
  331. : "=d" (retval), "+m" (((volatile char *)addr)[nr >> 3])
  332. : "di" (nr)
  333. /* No clobber */);
  334. #endif
  335. return retval;
  336. }
  337. #define ext2_set_bit_atomic(lock, nr, addr) \
  338. ({ \
  339. int ret; \
  340. spin_lock(lock); \
  341. ret = ext2_set_bit((nr), (addr)); \
  342. spin_unlock(lock); \
  343. ret; \
  344. })
  345. #define ext2_clear_bit_atomic(lock, nr, addr) \
  346. ({ \
  347. int ret; \
  348. spin_lock(lock); \
  349. ret = ext2_clear_bit((nr), (addr)); \
  350. spin_unlock(lock); \
  351. ret; \
  352. })
  353. static __inline__ int ext2_test_bit(int nr, const volatile void * addr)
  354. {
  355. char retval;
  356. #ifdef CONFIG_COLDFIRE
  357. __asm__ __volatile__ ("lea %1,%%a0; btst %2,(%%a0); sne %0"
  358. : "=d" (retval)
  359. : "m" (((const volatile char *)addr)[nr >> 3]), "d" (nr)
  360. : "%a0");
  361. #else
  362. __asm__ __volatile__ ("btst %2,%1; sne %0"
  363. : "=d" (retval)
  364. : "m" (((const volatile char *)addr)[nr >> 3]), "di" (nr)
  365. /* No clobber */);
  366. #endif
  367. return retval;
  368. }
  369. #define ext2_find_first_zero_bit(addr, size) \
  370. ext2_find_next_zero_bit((addr), (size), 0)
  371. static __inline__ unsigned long ext2_find_next_zero_bit(void *addr, unsigned long size, unsigned long offset)
  372. {
  373. unsigned long *p = ((unsigned long *) addr) + (offset >> 5);
  374. unsigned long result = offset & ~31UL;
  375. unsigned long tmp;
  376. if (offset >= size)
  377. return size;
  378. size -= result;
  379. offset &= 31UL;
  380. if(offset) {
  381. /* We hold the little endian value in tmp, but then the
  382. * shift is illegal. So we could keep a big endian value
  383. * in tmp, like this:
  384. *
  385. * tmp = __swab32(*(p++));
  386. * tmp |= ~0UL >> (32-offset);
  387. *
  388. * but this would decrease preformance, so we change the
  389. * shift:
  390. */
  391. tmp = *(p++);
  392. tmp |= __swab32(~0UL >> (32-offset));
  393. if(size < 32)
  394. goto found_first;
  395. if(~tmp)
  396. goto found_middle;
  397. size -= 32;
  398. result += 32;
  399. }
  400. while(size & ~31UL) {
  401. if(~(tmp = *(p++)))
  402. goto found_middle;
  403. result += 32;
  404. size -= 32;
  405. }
  406. if(!size)
  407. return result;
  408. tmp = *p;
  409. found_first:
  410. /* tmp is little endian, so we would have to swab the shift,
  411. * see above. But then we have to swab tmp below for ffz, so
  412. * we might as well do this here.
  413. */
  414. return result + ffz(__swab32(tmp) | (~0UL << size));
  415. found_middle:
  416. return result + ffz(__swab32(tmp));
  417. }
  418. /* Bitmap functions for the minix filesystem. */
  419. #define minix_test_and_set_bit(nr,addr) test_and_set_bit(nr,addr)
  420. #define minix_set_bit(nr,addr) set_bit(nr,addr)
  421. #define minix_test_and_clear_bit(nr,addr) test_and_clear_bit(nr,addr)
  422. #define minix_test_bit(nr,addr) test_bit(nr,addr)
  423. #define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size)
  424. /**
  425. * hweightN - returns the hamming weight of a N-bit word
  426. * @x: the word to weigh
  427. *
  428. * The Hamming Weight of a number is the total number of bits set in it.
  429. */
  430. #define hweight32(x) generic_hweight32(x)
  431. #define hweight16(x) generic_hweight16(x)
  432. #define hweight8(x) generic_hweight8(x)
  433. #endif /* __KERNEL__ */
  434. /*
  435. * fls: find last bit set.
  436. */
  437. #define fls(x) generic_fls(x)
  438. #endif /* _M68KNOMMU_BITOPS_H */