bitops.h 8.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392
  1. #ifndef _MICROBLAZE_BITOPS_H
  2. #define _MICROBLAZE_BITOPS_H
  3. /*
  4. * Copyright 1992, Linus Torvalds.
  5. */
  6. #include <linux/config.h>
  7. #include <asm/byteorder.h> /* swab32 */
  8. #include <asm/system.h> /* save_flags */
  9. #ifdef __KERNEL__
  10. /*
  11. * Function prototypes to keep gcc -Wall happy
  12. */
  13. /*
  14. * The __ functions are not atomic
  15. */
  16. extern void set_bit(int nr, volatile void * addr);
  17. extern void __set_bit(int nr, volatile void * addr);
  18. extern void clear_bit(int nr, volatile void * addr);
  19. #define __clear_bit(nr, addr) clear_bit(nr, addr)
  20. extern void change_bit(int nr, volatile void * addr);
  21. extern void __change_bit(int nr, volatile void * addr);
  22. extern int test_and_set_bit(int nr, volatile void * addr);
  23. extern int __test_and_set_bit(int nr, volatile void * addr);
  24. extern int test_and_clear_bit(int nr, volatile void * addr);
  25. extern int __test_and_clear_bit(int nr, volatile void * addr);
  26. extern int test_and_change_bit(int nr, volatile void * addr);
  27. extern int __test_and_change_bit(int nr, volatile void * addr);
  28. extern int __constant_test_bit(int nr, const volatile void * addr);
  29. extern int __test_bit(int nr, volatile void * addr);
  30. extern int find_first_zero_bit(void * addr, unsigned size);
  31. extern int find_next_zero_bit (void * addr, int size, int offset);
  32. /*
  33. * ffz = Find First Zero in word. Undefined if no zero exists,
  34. * so code should check against ~0UL first..
  35. */
  36. extern __inline__ unsigned long ffz(unsigned long word)
  37. {
  38. unsigned long result = 0;
  39. while(word & 1) {
  40. result++;
  41. word >>= 1;
  42. }
  43. return result;
  44. }
  45. extern __inline__ void set_bit(int nr, volatile void * addr)
  46. {
  47. int * a = (int *) addr;
  48. int mask;
  49. unsigned long flags;
  50. a += nr >> 5;
  51. mask = 1 << (nr & 0x1f);
  52. save_flags_cli(flags);
  53. *a |= mask;
  54. restore_flags(flags);
  55. }
  56. extern __inline__ void __set_bit(int nr, volatile void * addr)
  57. {
  58. int * a = (int *) addr;
  59. int mask;
  60. a += nr >> 5;
  61. mask = 1 << (nr & 0x1f);
  62. *a |= mask;
  63. }
  64. /*
  65. * clear_bit() doesn't provide any barrier for the compiler.
  66. */
  67. #define smp_mb__before_clear_bit() barrier()
  68. #define smp_mb__after_clear_bit() barrier()
  69. extern __inline__ void clear_bit(int nr, volatile void * addr)
  70. {
  71. int * a = (int *) addr;
  72. int mask;
  73. unsigned long flags;
  74. a += nr >> 5;
  75. mask = 1 << (nr & 0x1f);
  76. save_flags_cli(flags);
  77. *a &= ~mask;
  78. restore_flags(flags);
  79. }
  80. extern __inline__ void change_bit(int nr, volatile void * addr)
  81. {
  82. int mask;
  83. unsigned long flags;
  84. unsigned long *ADDR = (unsigned long *) addr;
  85. ADDR += nr >> 5;
  86. mask = 1 << (nr & 31);
  87. save_flags_cli(flags);
  88. *ADDR ^= mask;
  89. restore_flags(flags);
  90. }
  91. extern __inline__ void __change_bit(int nr, volatile void * addr)
  92. {
  93. int mask;
  94. unsigned long *ADDR = (unsigned long *) addr;
  95. ADDR += nr >> 5;
  96. mask = 1 << (nr & 31);
  97. *ADDR ^= mask;
  98. }
  99. extern __inline__ int test_and_set_bit(int nr, volatile void * addr)
  100. {
  101. int mask, retval;
  102. volatile unsigned int *a = (volatile unsigned int *) addr;
  103. unsigned long flags;
  104. a += nr >> 5;
  105. mask = 1 << (nr & 0x1f);
  106. save_flags_cli(flags);
  107. retval = (mask & *a) != 0;
  108. *a |= mask;
  109. restore_flags(flags);
  110. return retval;
  111. }
  112. extern __inline__ int __test_and_set_bit(int nr, volatile void * addr)
  113. {
  114. int mask, retval;
  115. volatile unsigned int *a = (volatile unsigned int *) addr;
  116. a += nr >> 5;
  117. mask = 1 << (nr & 0x1f);
  118. retval = (mask & *a) != 0;
  119. *a |= mask;
  120. return retval;
  121. }
  122. extern __inline__ int test_and_clear_bit(int nr, volatile void * addr)
  123. {
  124. int mask, retval;
  125. volatile unsigned int *a = (volatile unsigned int *) addr;
  126. unsigned long flags;
  127. a += nr >> 5;
  128. mask = 1 << (nr & 0x1f);
  129. save_flags_cli(flags);
  130. retval = (mask & *a) != 0;
  131. *a &= ~mask;
  132. restore_flags(flags);
  133. return retval;
  134. }
  135. extern __inline__ int __test_and_clear_bit(int nr, volatile void * addr)
  136. {
  137. int mask, retval;
  138. volatile unsigned int *a = (volatile unsigned int *) addr;
  139. a += nr >> 5;
  140. mask = 1 << (nr & 0x1f);
  141. retval = (mask & *a) != 0;
  142. *a &= ~mask;
  143. return retval;
  144. }
  145. extern __inline__ int test_and_change_bit(int nr, volatile void * addr)
  146. {
  147. int mask, retval;
  148. volatile unsigned int *a = (volatile unsigned int *) addr;
  149. unsigned long flags;
  150. a += nr >> 5;
  151. mask = 1 << (nr & 0x1f);
  152. save_flags_cli(flags);
  153. retval = (mask & *a) != 0;
  154. *a ^= mask;
  155. restore_flags(flags);
  156. return retval;
  157. }
  158. extern __inline__ int __test_and_change_bit(int nr, volatile void * addr)
  159. {
  160. int mask, retval;
  161. volatile unsigned int *a = (volatile unsigned int *) addr;
  162. a += nr >> 5;
  163. mask = 1 << (nr & 0x1f);
  164. retval = (mask & *a) != 0;
  165. *a ^= mask;
  166. return retval;
  167. }
  168. /*
  169. * This routine doesn't need to be atomic.
  170. */
  171. extern __inline__ int __constant_test_bit(int nr, const volatile void * addr)
  172. {
  173. return ((1UL << (nr & 31)) & (((const volatile unsigned int *) addr)[nr >> 5])) != 0;
  174. }
  175. extern __inline__ int __test_bit(int nr, volatile void * addr)
  176. {
  177. int * a = (int *) addr;
  178. int mask;
  179. a += nr >> 5;
  180. mask = 1 << (nr & 0x1f);
  181. return ((mask & *a) != 0);
  182. }
  183. #define test_bit(nr,addr) \
  184. (__builtin_constant_p(nr) ? \
  185. __constant_test_bit((nr),(addr)) : \
  186. __test_bit((nr),(addr)))
  187. #define find_first_zero_bit(addr, size) \
  188. find_next_zero_bit((addr), (size), 0)
  189. extern __inline__ int find_next_zero_bit (void * addr, int size, int offset)
  190. {
  191. unsigned long *p = ((unsigned long *) addr) + (offset >> 5);
  192. unsigned long result = offset & ~31UL;
  193. unsigned long tmp;
  194. if (offset >= size)
  195. return size;
  196. size -= result;
  197. offset &= 31UL;
  198. if (offset) {
  199. tmp = *(p++);
  200. tmp |= ~0UL >> (32-offset);
  201. if (size < 32)
  202. goto found_first;
  203. if (~tmp)
  204. goto found_middle;
  205. size -= 32;
  206. result += 32;
  207. }
  208. while (size & ~31UL) {
  209. if (~(tmp = *(p++)))
  210. goto found_middle;
  211. result += 32;
  212. size -= 32;
  213. }
  214. if (!size)
  215. return result;
  216. tmp = *p;
  217. found_first:
  218. tmp |= ~0UL >> size;
  219. found_middle:
  220. return result + ffz(tmp);
  221. }
  222. #define ffs(x) generic_ffs(x)
  223. /*
  224. * hweightN: returns the hamming weight (i.e. the number
  225. * of bits set) of a N-bit word
  226. */
  227. #define hweight32(x) generic_hweight32(x)
  228. #define hweight16(x) generic_hweight16(x)
  229. #define hweight8(x) generic_hweight8(x)
  230. extern __inline__ int ext2_set_bit(int nr, volatile void * addr)
  231. {
  232. int mask, retval;
  233. unsigned long flags;
  234. volatile unsigned char *ADDR = (unsigned char *) addr;
  235. ADDR += nr >> 3;
  236. mask = 1 << (nr & 0x07);
  237. save_flags_cli(flags);
  238. retval = (mask & *ADDR) != 0;
  239. *ADDR |= mask;
  240. restore_flags(flags);
  241. return retval;
  242. }
  243. extern __inline__ int ext2_clear_bit(int nr, volatile void * addr)
  244. {
  245. int mask, retval;
  246. unsigned long flags;
  247. volatile unsigned char *ADDR = (unsigned char *) addr;
  248. ADDR += nr >> 3;
  249. mask = 1 << (nr & 0x07);
  250. save_flags_cli(flags);
  251. retval = (mask & *ADDR) != 0;
  252. *ADDR &= ~mask;
  253. restore_flags(flags);
  254. return retval;
  255. }
  256. extern __inline__ int ext2_test_bit(int nr, const volatile void * addr)
  257. {
  258. int mask;
  259. const volatile unsigned char *ADDR = (const unsigned char *) addr;
  260. ADDR += nr >> 3;
  261. mask = 1 << (nr & 0x07);
  262. return ((mask & *ADDR) != 0);
  263. }
  264. #define ext2_find_first_zero_bit(addr, size) \
  265. ext2_find_next_zero_bit((addr), (size), 0)
  266. extern __inline__ unsigned long ext2_find_next_zero_bit(void *addr, unsigned long size, unsigned long offset)
  267. {
  268. unsigned long *p = ((unsigned long *) addr) + (offset >> 5);
  269. unsigned long result = offset & ~31UL;
  270. unsigned long tmp;
  271. if (offset >= size)
  272. return size;
  273. size -= result;
  274. offset &= 31UL;
  275. if(offset) {
  276. /* We hold the little endian value in tmp, but then the
  277. * shift is illegal. So we could keep a big endian value
  278. * in tmp, like this:
  279. *
  280. * tmp = __swab32(*(p++));
  281. * tmp |= ~0UL >> (32-offset);
  282. *
  283. * but this would decrease preformance, so we change the
  284. * shift:
  285. */
  286. tmp = *(p++);
  287. tmp |= __swab32(~0UL >> (32-offset));
  288. if(size < 32)
  289. goto found_first;
  290. if(~tmp)
  291. goto found_middle;
  292. size -= 32;
  293. result += 32;
  294. }
  295. while(size & ~31UL) {
  296. if(~(tmp = *(p++)))
  297. goto found_middle;
  298. result += 32;
  299. size -= 32;
  300. }
  301. if(!size)
  302. return result;
  303. tmp = *p;
  304. found_first:
  305. /* tmp is little endian, so we would have to swab the shift,
  306. * see above. But then we have to swab tmp below for ffz, so
  307. * we might as well do this here.
  308. */
  309. return result + ffz(__swab32(tmp) | (~0UL << size));
  310. found_middle:
  311. return result + ffz(__swab32(tmp));
  312. }
  313. /* Bitmap functions for the minix filesystem. */
  314. #define minix_test_and_set_bit(nr,addr) test_and_set_bit(nr,addr)
  315. #define minix_set_bit(nr,addr) set_bit(nr,addr)
  316. #define minix_test_and_clear_bit(nr,addr) test_and_clear_bit(nr,addr)
  317. #define minix_test_bit(nr,addr) test_bit(nr,addr)
  318. #define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size)
  319. /**
  320. * hweightN - returns the hamming weight of a N-bit word
  321. * @x: the word to weigh
  322. *
  323. * The Hamming Weight of a number is the total number of bits set in it.
  324. */
  325. #define hweight32(x) generic_hweight32(x)
  326. #define hweight16(x) generic_hweight16(x)
  327. #define hweight8(x) generic_hweight8(x)
  328. #endif /* __KERNEL__ */
  329. #endif /* _MICROBLAZE_BITOPS_H */