bitops.h 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477
  1. #ifndef __ASM_SH_BITOPS_H
  2. #define __ASM_SH_BITOPS_H
  3. #ifdef __KERNEL__
  4. #include <asm/system.h>
  5. /* For __swab32 */
  6. #include <asm/byteorder.h>
  7. static __inline__ void set_bit(int nr, volatile void * addr)
  8. {
  9. int mask;
  10. volatile unsigned int *a = addr;
  11. unsigned long flags;
  12. a += nr >> 5;
  13. mask = 1 << (nr & 0x1f);
  14. local_irq_save(flags);
  15. *a |= mask;
  16. local_irq_restore(flags);
  17. }
  18. static __inline__ void __set_bit(int nr, volatile void * addr)
  19. {
  20. int mask;
  21. volatile unsigned int *a = addr;
  22. a += nr >> 5;
  23. mask = 1 << (nr & 0x1f);
  24. *a |= mask;
  25. }
  26. /*
  27. * clear_bit() doesn't provide any barrier for the compiler.
  28. */
  29. #define smp_mb__before_clear_bit() barrier()
  30. #define smp_mb__after_clear_bit() barrier()
  31. static __inline__ void clear_bit(int nr, volatile void * addr)
  32. {
  33. int mask;
  34. volatile unsigned int *a = addr;
  35. unsigned long flags;
  36. a += nr >> 5;
  37. mask = 1 << (nr & 0x1f);
  38. local_irq_save(flags);
  39. *a &= ~mask;
  40. local_irq_restore(flags);
  41. }
  42. static __inline__ void __clear_bit(int nr, volatile void * addr)
  43. {
  44. int mask;
  45. volatile unsigned int *a = addr;
  46. a += nr >> 5;
  47. mask = 1 << (nr & 0x1f);
  48. *a &= ~mask;
  49. }
  50. static __inline__ void change_bit(int nr, volatile void * addr)
  51. {
  52. int mask;
  53. volatile unsigned int *a = addr;
  54. unsigned long flags;
  55. a += nr >> 5;
  56. mask = 1 << (nr & 0x1f);
  57. local_irq_save(flags);
  58. *a ^= mask;
  59. local_irq_restore(flags);
  60. }
  61. static __inline__ void __change_bit(int nr, volatile void * addr)
  62. {
  63. int mask;
  64. volatile unsigned int *a = addr;
  65. a += nr >> 5;
  66. mask = 1 << (nr & 0x1f);
  67. *a ^= mask;
  68. }
  69. static __inline__ int test_and_set_bit(int nr, volatile void * addr)
  70. {
  71. int mask, retval;
  72. volatile unsigned int *a = addr;
  73. unsigned long flags;
  74. a += nr >> 5;
  75. mask = 1 << (nr & 0x1f);
  76. local_irq_save(flags);
  77. retval = (mask & *a) != 0;
  78. *a |= mask;
  79. local_irq_restore(flags);
  80. return retval;
  81. }
  82. static __inline__ int __test_and_set_bit(int nr, volatile void * addr)
  83. {
  84. int mask, retval;
  85. volatile unsigned int *a = addr;
  86. a += nr >> 5;
  87. mask = 1 << (nr & 0x1f);
  88. retval = (mask & *a) != 0;
  89. *a |= mask;
  90. return retval;
  91. }
  92. static __inline__ int test_and_clear_bit(int nr, volatile void * addr)
  93. {
  94. int mask, retval;
  95. volatile unsigned int *a = addr;
  96. unsigned long flags;
  97. a += nr >> 5;
  98. mask = 1 << (nr & 0x1f);
  99. local_irq_save(flags);
  100. retval = (mask & *a) != 0;
  101. *a &= ~mask;
  102. local_irq_restore(flags);
  103. return retval;
  104. }
  105. static __inline__ int __test_and_clear_bit(int nr, volatile void * addr)
  106. {
  107. int mask, retval;
  108. volatile unsigned int *a = addr;
  109. a += nr >> 5;
  110. mask = 1 << (nr & 0x1f);
  111. retval = (mask & *a) != 0;
  112. *a &= ~mask;
  113. return retval;
  114. }
  115. static __inline__ int test_and_change_bit(int nr, volatile void * addr)
  116. {
  117. int mask, retval;
  118. volatile unsigned int *a = addr;
  119. unsigned long flags;
  120. a += nr >> 5;
  121. mask = 1 << (nr & 0x1f);
  122. local_irq_save(flags);
  123. retval = (mask & *a) != 0;
  124. *a ^= mask;
  125. local_irq_restore(flags);
  126. return retval;
  127. }
  128. static __inline__ int __test_and_change_bit(int nr, volatile void * addr)
  129. {
  130. int mask, retval;
  131. volatile unsigned int *a = addr;
  132. a += nr >> 5;
  133. mask = 1 << (nr & 0x1f);
  134. retval = (mask & *a) != 0;
  135. *a ^= mask;
  136. return retval;
  137. }
  138. static __inline__ int test_bit(int nr, const volatile void *addr)
  139. {
  140. return 1UL & (((const volatile unsigned int *) addr)[nr >> 5] >> (nr & 31));
  141. }
  142. static __inline__ unsigned long ffz(unsigned long word)
  143. {
  144. unsigned long result;
  145. __asm__("1:\n\t"
  146. "shlr %1\n\t"
  147. "bt/s 1b\n\t"
  148. " add #1, %0"
  149. : "=r" (result), "=r" (word)
  150. : "0" (~0L), "1" (word)
  151. : "t");
  152. return result;
  153. }
  154. /**
  155. * __ffs - find first bit in word.
  156. * @word: The word to search
  157. *
  158. * Undefined if no bit exists, so code should check against 0 first.
  159. */
  160. static __inline__ unsigned long __ffs(unsigned long word)
  161. {
  162. unsigned long result;
  163. __asm__("1:\n\t"
  164. "shlr %1\n\t"
  165. "bf/s 1b\n\t"
  166. " add #1, %0"
  167. : "=r" (result), "=r" (word)
  168. : "0" (~0L), "1" (word)
  169. : "t");
  170. return result;
  171. }
  172. /**
  173. * find_next_bit - find the next set bit in a memory region
  174. * @addr: The address to base the search on
  175. * @offset: The bitnumber to start searching at
  176. * @size: The maximum size to search
  177. */
  178. static __inline__ unsigned long find_next_bit(const unsigned long *addr,
  179. unsigned long size, unsigned long offset)
  180. {
  181. unsigned int *p = ((unsigned int *) addr) + (offset >> 5);
  182. unsigned int result = offset & ~31UL;
  183. unsigned int tmp;
  184. if (offset >= size)
  185. return size;
  186. size -= result;
  187. offset &= 31UL;
  188. if (offset) {
  189. tmp = *p++;
  190. tmp &= ~0UL << offset;
  191. if (size < 32)
  192. goto found_first;
  193. if (tmp)
  194. goto found_middle;
  195. size -= 32;
  196. result += 32;
  197. }
  198. while (size >= 32) {
  199. if ((tmp = *p++) != 0)
  200. goto found_middle;
  201. result += 32;
  202. size -= 32;
  203. }
  204. if (!size)
  205. return result;
  206. tmp = *p;
  207. found_first:
  208. tmp &= ~0UL >> (32 - size);
  209. if (tmp == 0UL) /* Are any bits set? */
  210. return result + size; /* Nope. */
  211. found_middle:
  212. return result + __ffs(tmp);
  213. }
  214. /**
  215. * find_first_bit - find the first set bit in a memory region
  216. * @addr: The address to start the search at
  217. * @size: The maximum size to search
  218. *
  219. * Returns the bit-number of the first set bit, not the number of the byte
  220. * containing a bit.
  221. */
  222. #define find_first_bit(addr, size) \
  223. find_next_bit((addr), (size), 0)
  224. static __inline__ int find_next_zero_bit(const unsigned long *addr, int size, int offset)
  225. {
  226. const unsigned long *p = ((unsigned long *) addr) + (offset >> 5);
  227. unsigned long result = offset & ~31UL;
  228. unsigned long tmp;
  229. if (offset >= size)
  230. return size;
  231. size -= result;
  232. offset &= 31UL;
  233. if (offset) {
  234. tmp = *(p++);
  235. tmp |= ~0UL >> (32-offset);
  236. if (size < 32)
  237. goto found_first;
  238. if (~tmp)
  239. goto found_middle;
  240. size -= 32;
  241. result += 32;
  242. }
  243. while (size & ~31UL) {
  244. if (~(tmp = *(p++)))
  245. goto found_middle;
  246. result += 32;
  247. size -= 32;
  248. }
  249. if (!size)
  250. return result;
  251. tmp = *p;
  252. found_first:
  253. tmp |= ~0UL << size;
  254. found_middle:
  255. return result + ffz(tmp);
  256. }
  257. #define find_first_zero_bit(addr, size) \
  258. find_next_zero_bit((addr), (size), 0)
  259. /*
  260. * ffs: find first bit set. This is defined the same way as
  261. * the libc and compiler builtin ffs routines, therefore
  262. * differs in spirit from the above ffz (man ffs).
  263. */
  264. #define ffs(x) generic_ffs(x)
  265. /*
  266. * hweightN: returns the hamming weight (i.e. the number
  267. * of bits set) of a N-bit word
  268. */
  269. #define hweight32(x) generic_hweight32(x)
  270. #define hweight16(x) generic_hweight16(x)
  271. #define hweight8(x) generic_hweight8(x)
  272. /*
  273. * Every architecture must define this function. It's the fastest
  274. * way of searching a 140-bit bitmap where the first 100 bits are
  275. * unlikely to be set. It's guaranteed that at least one of the 140
  276. * bits is cleared.
  277. */
  278. static inline int sched_find_first_bit(const unsigned long *b)
  279. {
  280. if (unlikely(b[0]))
  281. return __ffs(b[0]);
  282. if (unlikely(b[1]))
  283. return __ffs(b[1]) + 32;
  284. if (unlikely(b[2]))
  285. return __ffs(b[2]) + 64;
  286. if (b[3])
  287. return __ffs(b[3]) + 96;
  288. return __ffs(b[4]) + 128;
  289. }
  290. #ifdef __LITTLE_ENDIAN__
  291. #define ext2_set_bit(nr, addr) test_and_set_bit((nr), (addr))
  292. #define ext2_clear_bit(nr, addr) test_and_clear_bit((nr), (addr))
  293. #define ext2_test_bit(nr, addr) test_bit((nr), (addr))
  294. #define ext2_find_first_zero_bit(addr, size) find_first_zero_bit((addr), (size))
  295. #define ext2_find_next_zero_bit(addr, size, offset) \
  296. find_next_zero_bit((unsigned long *)(addr), (size), (offset))
  297. #else
  298. static __inline__ int ext2_set_bit(int nr, volatile void * addr)
  299. {
  300. int mask, retval;
  301. unsigned long flags;
  302. volatile unsigned char *ADDR = (unsigned char *) addr;
  303. ADDR += nr >> 3;
  304. mask = 1 << (nr & 0x07);
  305. local_irq_save(flags);
  306. retval = (mask & *ADDR) != 0;
  307. *ADDR |= mask;
  308. local_irq_restore(flags);
  309. return retval;
  310. }
  311. static __inline__ int ext2_clear_bit(int nr, volatile void * addr)
  312. {
  313. int mask, retval;
  314. unsigned long flags;
  315. volatile unsigned char *ADDR = (unsigned char *) addr;
  316. ADDR += nr >> 3;
  317. mask = 1 << (nr & 0x07);
  318. local_irq_save(flags);
  319. retval = (mask & *ADDR) != 0;
  320. *ADDR &= ~mask;
  321. local_irq_restore(flags);
  322. return retval;
  323. }
  324. static __inline__ int ext2_test_bit(int nr, const volatile void * addr)
  325. {
  326. int mask;
  327. const volatile unsigned char *ADDR = (const unsigned char *) addr;
  328. ADDR += nr >> 3;
  329. mask = 1 << (nr & 0x07);
  330. return ((mask & *ADDR) != 0);
  331. }
  332. #define ext2_find_first_zero_bit(addr, size) \
  333. ext2_find_next_zero_bit((addr), (size), 0)
  334. static __inline__ unsigned long ext2_find_next_zero_bit(void *addr, unsigned long size, unsigned long offset)
  335. {
  336. unsigned long *p = ((unsigned long *) addr) + (offset >> 5);
  337. unsigned long result = offset & ~31UL;
  338. unsigned long tmp;
  339. if (offset >= size)
  340. return size;
  341. size -= result;
  342. offset &= 31UL;
  343. if(offset) {
  344. /* We hold the little endian value in tmp, but then the
  345. * shift is illegal. So we could keep a big endian value
  346. * in tmp, like this:
  347. *
  348. * tmp = __swab32(*(p++));
  349. * tmp |= ~0UL >> (32-offset);
  350. *
  351. * but this would decrease preformance, so we change the
  352. * shift:
  353. */
  354. tmp = *(p++);
  355. tmp |= __swab32(~0UL >> (32-offset));
  356. if(size < 32)
  357. goto found_first;
  358. if(~tmp)
  359. goto found_middle;
  360. size -= 32;
  361. result += 32;
  362. }
  363. while(size & ~31UL) {
  364. if(~(tmp = *(p++)))
  365. goto found_middle;
  366. result += 32;
  367. size -= 32;
  368. }
  369. if(!size)
  370. return result;
  371. tmp = *p;
  372. found_first:
  373. /* tmp is little endian, so we would have to swab the shift,
  374. * see above. But then we have to swab tmp below for ffz, so
  375. * we might as well do this here.
  376. */
  377. return result + ffz(__swab32(tmp) | (~0UL << size));
  378. found_middle:
  379. return result + ffz(__swab32(tmp));
  380. }
  381. #endif
  382. #define ext2_set_bit_atomic(lock, nr, addr) \
  383. ({ \
  384. int ret; \
  385. spin_lock(lock); \
  386. ret = ext2_set_bit((nr), (addr)); \
  387. spin_unlock(lock); \
  388. ret; \
  389. })
  390. #define ext2_clear_bit_atomic(lock, nr, addr) \
  391. ({ \
  392. int ret; \
  393. spin_lock(lock); \
  394. ret = ext2_clear_bit((nr), (addr)); \
  395. spin_unlock(lock); \
  396. ret; \
  397. })
  398. /* Bitmap functions for the minix filesystem. */
  399. #define minix_test_and_set_bit(nr,addr) test_and_set_bit(nr,addr)
  400. #define minix_set_bit(nr,addr) set_bit(nr,addr)
  401. #define minix_test_and_clear_bit(nr,addr) test_and_clear_bit(nr,addr)
  402. #define minix_test_bit(nr,addr) test_bit(nr,addr)
  403. #define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size)
  404. /*
  405. * fls: find last bit set.
  406. */
  407. #define fls(x) generic_fls(x)
  408. #define fls64(x) generic_fls64(x)
  409. #endif /* __KERNEL__ */
  410. #endif /* __ASM_SH_BITOPS_H */