bitops.h 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411
  1. #ifndef _H8300_BITOPS_H
  2. #define _H8300_BITOPS_H
  3. /*
  4. * Copyright 1992, Linus Torvalds.
  5. * Copyright 2002, Yoshinori Sato
  6. */
  7. #include <linux/config.h>
  8. #include <linux/compiler.h>
  9. #include <asm/byteorder.h> /* swab32 */
  10. #include <asm/system.h>
  11. #ifdef __KERNEL__
  12. /*
  13. * Function prototypes to keep gcc -Wall happy
  14. */
  15. /*
  16. * ffz = Find First Zero in word. Undefined if no zero exists,
  17. * so code should check against ~0UL first..
  18. */
  19. static __inline__ unsigned long ffz(unsigned long word)
  20. {
  21. unsigned long result;
  22. result = -1;
  23. __asm__("1:\n\t"
  24. "shlr.l %2\n\t"
  25. "adds #1,%0\n\t"
  26. "bcs 1b"
  27. : "=r" (result)
  28. : "0" (result),"r" (word));
  29. return result;
  30. }
  31. #define H8300_GEN_BITOP_CONST(OP,BIT) \
  32. case BIT: \
  33. __asm__(OP " #" #BIT ",@%0"::"r"(b_addr):"memory"); \
  34. break;
  35. #define H8300_GEN_BITOP(FNAME,OP) \
  36. static __inline__ void FNAME(int nr, volatile unsigned long* addr) \
  37. { \
  38. volatile unsigned char *b_addr; \
  39. b_addr = (volatile unsigned char *)addr + ((nr >> 3) ^ 3); \
  40. if (__builtin_constant_p(nr)) { \
  41. switch(nr & 7) { \
  42. H8300_GEN_BITOP_CONST(OP,0) \
  43. H8300_GEN_BITOP_CONST(OP,1) \
  44. H8300_GEN_BITOP_CONST(OP,2) \
  45. H8300_GEN_BITOP_CONST(OP,3) \
  46. H8300_GEN_BITOP_CONST(OP,4) \
  47. H8300_GEN_BITOP_CONST(OP,5) \
  48. H8300_GEN_BITOP_CONST(OP,6) \
  49. H8300_GEN_BITOP_CONST(OP,7) \
  50. } \
  51. } else { \
  52. __asm__(OP " %w0,@%1"::"r"(nr),"r"(b_addr):"memory"); \
  53. } \
  54. }
  55. /*
  56. * clear_bit() doesn't provide any barrier for the compiler.
  57. */
  58. #define smp_mb__before_clear_bit() barrier()
  59. #define smp_mb__after_clear_bit() barrier()
  60. H8300_GEN_BITOP(set_bit ,"bset")
  61. H8300_GEN_BITOP(clear_bit ,"bclr")
  62. H8300_GEN_BITOP(change_bit,"bnot")
  63. #define __set_bit(nr,addr) set_bit((nr),(addr))
  64. #define __clear_bit(nr,addr) clear_bit((nr),(addr))
  65. #define __change_bit(nr,addr) change_bit((nr),(addr))
  66. #undef H8300_GEN_BITOP
  67. #undef H8300_GEN_BITOP_CONST
  68. static __inline__ int test_bit(int nr, const unsigned long* addr)
  69. {
  70. return (*((volatile unsigned char *)addr +
  71. ((nr >> 3) ^ 3)) & (1UL << (nr & 7))) != 0;
  72. }
  73. #define __test_bit(nr, addr) test_bit(nr, addr)
  74. #define H8300_GEN_TEST_BITOP_CONST_INT(OP,BIT) \
  75. case BIT: \
  76. __asm__("stc ccr,%w1\n\t" \
  77. "orc #0x80,ccr\n\t" \
  78. "bld #" #BIT ",@%4\n\t" \
  79. OP " #" #BIT ",@%4\n\t" \
  80. "rotxl.l %0\n\t" \
  81. "ldc %w1,ccr" \
  82. : "=r"(retval),"=&r"(ccrsave),"=m"(*b_addr) \
  83. : "0" (retval),"r" (b_addr) \
  84. : "memory"); \
  85. break;
  86. #define H8300_GEN_TEST_BITOP_CONST(OP,BIT) \
  87. case BIT: \
  88. __asm__("bld #" #BIT ",@%3\n\t" \
  89. OP " #" #BIT ",@%3\n\t" \
  90. "rotxl.l %0\n\t" \
  91. : "=r"(retval),"=m"(*b_addr) \
  92. : "0" (retval),"r" (b_addr) \
  93. : "memory"); \
  94. break;
  95. #define H8300_GEN_TEST_BITOP(FNNAME,OP) \
  96. static __inline__ int FNNAME(int nr, volatile void * addr) \
  97. { \
  98. int retval = 0; \
  99. char ccrsave; \
  100. volatile unsigned char *b_addr; \
  101. b_addr = (volatile unsigned char *)addr + ((nr >> 3) ^ 3); \
  102. if (__builtin_constant_p(nr)) { \
  103. switch(nr & 7) { \
  104. H8300_GEN_TEST_BITOP_CONST_INT(OP,0) \
  105. H8300_GEN_TEST_BITOP_CONST_INT(OP,1) \
  106. H8300_GEN_TEST_BITOP_CONST_INT(OP,2) \
  107. H8300_GEN_TEST_BITOP_CONST_INT(OP,3) \
  108. H8300_GEN_TEST_BITOP_CONST_INT(OP,4) \
  109. H8300_GEN_TEST_BITOP_CONST_INT(OP,5) \
  110. H8300_GEN_TEST_BITOP_CONST_INT(OP,6) \
  111. H8300_GEN_TEST_BITOP_CONST_INT(OP,7) \
  112. } \
  113. } else { \
  114. __asm__("stc ccr,%w1\n\t" \
  115. "orc #0x80,ccr\n\t" \
  116. "btst %w5,@%4\n\t" \
  117. OP " %w5,@%4\n\t" \
  118. "beq 1f\n\t" \
  119. "inc.l #1,%0\n" \
  120. "1:\n\t" \
  121. "ldc %w1,ccr" \
  122. : "=r"(retval),"=&r"(ccrsave),"=m"(*b_addr) \
  123. : "0" (retval),"r" (b_addr),"r"(nr) \
  124. : "memory"); \
  125. } \
  126. return retval; \
  127. } \
  128. \
  129. static __inline__ int __ ## FNNAME(int nr, volatile void * addr) \
  130. { \
  131. int retval = 0; \
  132. volatile unsigned char *b_addr; \
  133. b_addr = (volatile unsigned char *)addr + ((nr >> 3) ^ 3); \
  134. if (__builtin_constant_p(nr)) { \
  135. switch(nr & 7) { \
  136. H8300_GEN_TEST_BITOP_CONST(OP,0) \
  137. H8300_GEN_TEST_BITOP_CONST(OP,1) \
  138. H8300_GEN_TEST_BITOP_CONST(OP,2) \
  139. H8300_GEN_TEST_BITOP_CONST(OP,3) \
  140. H8300_GEN_TEST_BITOP_CONST(OP,4) \
  141. H8300_GEN_TEST_BITOP_CONST(OP,5) \
  142. H8300_GEN_TEST_BITOP_CONST(OP,6) \
  143. H8300_GEN_TEST_BITOP_CONST(OP,7) \
  144. } \
  145. } else { \
  146. __asm__("btst %w4,@%3\n\t" \
  147. OP " %w4,@%3\n\t" \
  148. "beq 1f\n\t" \
  149. "inc.l #1,%0\n" \
  150. "1:" \
  151. : "=r"(retval),"=m"(*b_addr) \
  152. : "0" (retval),"r" (b_addr),"r"(nr) \
  153. : "memory"); \
  154. } \
  155. return retval; \
  156. }
  157. H8300_GEN_TEST_BITOP(test_and_set_bit, "bset")
  158. H8300_GEN_TEST_BITOP(test_and_clear_bit, "bclr")
  159. H8300_GEN_TEST_BITOP(test_and_change_bit,"bnot")
  160. #undef H8300_GEN_TEST_BITOP_CONST
  161. #undef H8300_GEN_TEST_BITOP_CONST_INT
  162. #undef H8300_GEN_TEST_BITOP
  163. #define find_first_zero_bit(addr, size) \
  164. find_next_zero_bit((addr), (size), 0)
  165. #define ffs(x) generic_ffs(x)
  166. static __inline__ unsigned long __ffs(unsigned long word)
  167. {
  168. unsigned long result;
  169. result = -1;
  170. __asm__("1:\n\t"
  171. "shlr.l %2\n\t"
  172. "adds #1,%0\n\t"
  173. "bcc 1b"
  174. : "=r" (result)
  175. : "0"(result),"r"(word));
  176. return result;
  177. }
  178. static __inline__ int find_next_zero_bit (const unsigned long * addr, int size, int offset)
  179. {
  180. unsigned long *p = (unsigned long *)(((unsigned long)addr + (offset >> 3)) & ~3);
  181. unsigned long result = offset & ~31UL;
  182. unsigned long tmp;
  183. if (offset >= size)
  184. return size;
  185. size -= result;
  186. offset &= 31UL;
  187. if (offset) {
  188. tmp = *(p++);
  189. tmp |= ~0UL >> (32-offset);
  190. if (size < 32)
  191. goto found_first;
  192. if (~tmp)
  193. goto found_middle;
  194. size -= 32;
  195. result += 32;
  196. }
  197. while (size & ~31UL) {
  198. if (~(tmp = *(p++)))
  199. goto found_middle;
  200. result += 32;
  201. size -= 32;
  202. }
  203. if (!size)
  204. return result;
  205. tmp = *p;
  206. found_first:
  207. tmp |= ~0UL << size;
  208. found_middle:
  209. return result + ffz(tmp);
  210. }
  211. static __inline__ unsigned long find_next_bit(const unsigned long *addr,
  212. unsigned long size, unsigned long offset)
  213. {
  214. unsigned long *p = (unsigned long *)(((unsigned long)addr + (offset >> 3)) & ~3);
  215. unsigned int result = offset & ~31UL;
  216. unsigned int tmp;
  217. if (offset >= size)
  218. return size;
  219. size -= result;
  220. offset &= 31UL;
  221. if (offset) {
  222. tmp = *(p++);
  223. tmp &= ~0UL << offset;
  224. if (size < 32)
  225. goto found_first;
  226. if (tmp)
  227. goto found_middle;
  228. size -= 32;
  229. result += 32;
  230. }
  231. while (size >= 32) {
  232. if ((tmp = *p++) != 0)
  233. goto found_middle;
  234. result += 32;
  235. size -= 32;
  236. }
  237. if (!size)
  238. return result;
  239. tmp = *p;
  240. found_first:
  241. tmp &= ~0UL >> (32 - size);
  242. if (tmp == 0UL)
  243. return result + size;
  244. found_middle:
  245. return result + __ffs(tmp);
  246. }
  247. #define find_first_bit(addr, size) find_next_bit(addr, size, 0)
  248. /*
  249. * Every architecture must define this function. It's the fastest
  250. * way of searching a 140-bit bitmap where the first 100 bits are
  251. * unlikely to be set. It's guaranteed that at least one of the 140
  252. * bits is cleared.
  253. */
  254. static inline int sched_find_first_bit(unsigned long *b)
  255. {
  256. if (unlikely(b[0]))
  257. return __ffs(b[0]);
  258. if (unlikely(b[1]))
  259. return __ffs(b[1]) + 32;
  260. if (unlikely(b[2]))
  261. return __ffs(b[2]) + 64;
  262. if (b[3])
  263. return __ffs(b[3]) + 96;
  264. return __ffs(b[4]) + 128;
  265. }
  266. /*
  267. * hweightN: returns the hamming weight (i.e. the number
  268. * of bits set) of a N-bit word
  269. */
  270. #define hweight32(x) generic_hweight32(x)
  271. #define hweight16(x) generic_hweight16(x)
  272. #define hweight8(x) generic_hweight8(x)
  273. static __inline__ int ext2_set_bit(int nr, volatile void * addr)
  274. {
  275. int mask, retval;
  276. unsigned long flags;
  277. volatile unsigned char *ADDR = (unsigned char *) addr;
  278. ADDR += nr >> 3;
  279. mask = 1 << (nr & 0x07);
  280. local_irq_save(flags);
  281. retval = (mask & *ADDR) != 0;
  282. *ADDR |= mask;
  283. local_irq_restore(flags);
  284. return retval;
  285. }
  286. #define ext2_set_bit_atomic(lock, nr, addr) ext2_set_bit(nr, addr)
  287. static __inline__ int ext2_clear_bit(int nr, volatile void * addr)
  288. {
  289. int mask, retval;
  290. unsigned long flags;
  291. volatile unsigned char *ADDR = (unsigned char *) addr;
  292. ADDR += nr >> 3;
  293. mask = 1 << (nr & 0x07);
  294. local_irq_save(flags);
  295. retval = (mask & *ADDR) != 0;
  296. *ADDR &= ~mask;
  297. local_irq_restore(flags);
  298. return retval;
  299. }
  300. #define ext2_clear_bit_atomic(lock, nr, addr) ext2_set_bit(nr, addr)
  301. static __inline__ int ext2_test_bit(int nr, const volatile void * addr)
  302. {
  303. int mask;
  304. const volatile unsigned char *ADDR = (const unsigned char *) addr;
  305. ADDR += nr >> 3;
  306. mask = 1 << (nr & 0x07);
  307. return ((mask & *ADDR) != 0);
  308. }
  309. #define ext2_find_first_zero_bit(addr, size) \
  310. ext2_find_next_zero_bit((addr), (size), 0)
  311. static __inline__ unsigned long ext2_find_next_zero_bit(void *addr, unsigned long size, unsigned long offset)
  312. {
  313. unsigned long *p = ((unsigned long *) addr) + (offset >> 5);
  314. unsigned long result = offset & ~31UL;
  315. unsigned long tmp;
  316. if (offset >= size)
  317. return size;
  318. size -= result;
  319. offset &= 31UL;
  320. if(offset) {
  321. /* We hold the little endian value in tmp, but then the
  322. * shift is illegal. So we could keep a big endian value
  323. * in tmp, like this:
  324. *
  325. * tmp = __swab32(*(p++));
  326. * tmp |= ~0UL >> (32-offset);
  327. *
  328. * but this would decrease performance, so we change the
  329. * shift:
  330. */
  331. tmp = *(p++);
  332. tmp |= __swab32(~0UL >> (32-offset));
  333. if(size < 32)
  334. goto found_first;
  335. if(~tmp)
  336. goto found_middle;
  337. size -= 32;
  338. result += 32;
  339. }
  340. while(size & ~31UL) {
  341. if(~(tmp = *(p++)))
  342. goto found_middle;
  343. result += 32;
  344. size -= 32;
  345. }
  346. if(!size)
  347. return result;
  348. tmp = *p;
  349. found_first:
  350. /* tmp is little endian, so we would have to swab the shift,
  351. * see above. But then we have to swab tmp below for ffz, so
  352. * we might as well do this here.
  353. */
  354. return result + ffz(__swab32(tmp) | (~0UL << size));
  355. found_middle:
  356. return result + ffz(__swab32(tmp));
  357. }
  358. /* Bitmap functions for the minix filesystem. */
  359. #define minix_test_and_set_bit(nr,addr) test_and_set_bit(nr,addr)
  360. #define minix_set_bit(nr,addr) set_bit(nr,addr)
  361. #define minix_test_and_clear_bit(nr,addr) test_and_clear_bit(nr,addr)
  362. #define minix_test_bit(nr,addr) test_bit(nr,addr)
  363. #define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size)
  364. #endif /* __KERNEL__ */
  365. #define fls(x) generic_fls(x)
  366. #define fls64(x) generic_fls64(x)
  367. #endif /* _H8300_BITOPS_H */