bitops.h 8.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392
  1. #ifndef _MICROBLAZE_BITOPS_H
  2. #define _MICROBLAZE_BITOPS_H
  3. /*
  4. * Copyright 1992, Linus Torvalds.
  5. */
  6. #include <linux/config.h>
  7. #include <asm/byteorder.h> /* swab32 */
  8. #include <asm/system.h> /* save_flags */
  9. #ifdef __KERNEL__
  10. /*
  11. * Function prototypes to keep gcc -Wall happy
  12. */
  13. /*
  14. * The __ functions are not atomic
  15. */
  16. extern void set_bit(int nr, volatile void * addr);
  17. extern void __set_bit(int nr, volatile void * addr);
  18. extern void clear_bit(int nr, volatile void * addr);
  19. #define __clear_bit(nr, addr) clear_bit(nr, addr)
  20. #define PLATFORM__CLEAR_BIT
  21. extern void change_bit(int nr, volatile void * addr);
  22. extern void __change_bit(int nr, volatile void * addr);
  23. extern int test_and_set_bit(int nr, volatile void * addr);
  24. extern int __test_and_set_bit(int nr, volatile void * addr);
  25. extern int test_and_clear_bit(int nr, volatile void * addr);
  26. extern int __test_and_clear_bit(int nr, volatile void * addr);
  27. extern int test_and_change_bit(int nr, volatile void * addr);
  28. extern int __test_and_change_bit(int nr, volatile void * addr);
  29. extern int __constant_test_bit(int nr, const volatile void * addr);
  30. extern int __test_bit(int nr, volatile void * addr);
  31. extern int find_first_zero_bit(void * addr, unsigned size);
  32. extern int find_next_zero_bit (void * addr, int size, int offset);
  33. /*
  34. * ffz = Find First Zero in word. Undefined if no zero exists,
  35. * so code should check against ~0UL first..
  36. */
  37. extern __inline__ unsigned long ffz(unsigned long word)
  38. {
  39. unsigned long result = 0;
  40. while(word & 1) {
  41. result++;
  42. word >>= 1;
  43. }
  44. return result;
  45. }
  46. extern __inline__ void set_bit(int nr, volatile void * addr)
  47. {
  48. int * a = (int *) addr;
  49. int mask;
  50. unsigned long flags;
  51. a += nr >> 5;
  52. mask = 1 << (nr & 0x1f);
  53. save_flags_cli(flags);
  54. *a |= mask;
  55. restore_flags(flags);
  56. }
  57. extern __inline__ void __set_bit(int nr, volatile void * addr)
  58. {
  59. int * a = (int *) addr;
  60. int mask;
  61. a += nr >> 5;
  62. mask = 1 << (nr & 0x1f);
  63. *a |= mask;
  64. }
  65. #define PLATFORM__SET_BIT
  66. /*
  67. * clear_bit() doesn't provide any barrier for the compiler.
  68. */
  69. #define smp_mb__before_clear_bit() barrier()
  70. #define smp_mb__after_clear_bit() barrier()
  71. extern __inline__ void clear_bit(int nr, volatile void * addr)
  72. {
  73. int * a = (int *) addr;
  74. int mask;
  75. unsigned long flags;
  76. a += nr >> 5;
  77. mask = 1 << (nr & 0x1f);
  78. save_flags_cli(flags);
  79. *a &= ~mask;
  80. restore_flags(flags);
  81. }
  82. extern __inline__ void change_bit(int nr, volatile void * addr)
  83. {
  84. int mask;
  85. unsigned long flags;
  86. unsigned long *ADDR = (unsigned long *) addr;
  87. ADDR += nr >> 5;
  88. mask = 1 << (nr & 31);
  89. save_flags_cli(flags);
  90. *ADDR ^= mask;
  91. restore_flags(flags);
  92. }
  93. extern __inline__ void __change_bit(int nr, volatile void * addr)
  94. {
  95. int mask;
  96. unsigned long *ADDR = (unsigned long *) addr;
  97. ADDR += nr >> 5;
  98. mask = 1 << (nr & 31);
  99. *ADDR ^= mask;
  100. }
  101. extern __inline__ int test_and_set_bit(int nr, volatile void * addr)
  102. {
  103. int mask, retval;
  104. volatile unsigned int *a = (volatile unsigned int *) addr;
  105. unsigned long flags;
  106. a += nr >> 5;
  107. mask = 1 << (nr & 0x1f);
  108. save_flags_cli(flags);
  109. retval = (mask & *a) != 0;
  110. *a |= mask;
  111. restore_flags(flags);
  112. return retval;
  113. }
  114. extern __inline__ int __test_and_set_bit(int nr, volatile void * addr)
  115. {
  116. int mask, retval;
  117. volatile unsigned int *a = (volatile unsigned int *) addr;
  118. a += nr >> 5;
  119. mask = 1 << (nr & 0x1f);
  120. retval = (mask & *a) != 0;
  121. *a |= mask;
  122. return retval;
  123. }
  124. extern __inline__ int test_and_clear_bit(int nr, volatile void * addr)
  125. {
  126. int mask, retval;
  127. volatile unsigned int *a = (volatile unsigned int *) addr;
  128. unsigned long flags;
  129. a += nr >> 5;
  130. mask = 1 << (nr & 0x1f);
  131. save_flags_cli(flags);
  132. retval = (mask & *a) != 0;
  133. *a &= ~mask;
  134. restore_flags(flags);
  135. return retval;
  136. }
  137. extern __inline__ int __test_and_clear_bit(int nr, volatile void * addr)
  138. {
  139. int mask, retval;
  140. volatile unsigned int *a = (volatile unsigned int *) addr;
  141. a += nr >> 5;
  142. mask = 1 << (nr & 0x1f);
  143. retval = (mask & *a) != 0;
  144. *a &= ~mask;
  145. return retval;
  146. }
  147. extern __inline__ int test_and_change_bit(int nr, volatile void * addr)
  148. {
  149. int mask, retval;
  150. volatile unsigned int *a = (volatile unsigned int *) addr;
  151. unsigned long flags;
  152. a += nr >> 5;
  153. mask = 1 << (nr & 0x1f);
  154. save_flags_cli(flags);
  155. retval = (mask & *a) != 0;
  156. *a ^= mask;
  157. restore_flags(flags);
  158. return retval;
  159. }
  160. extern __inline__ int __test_and_change_bit(int nr, volatile void * addr)
  161. {
  162. int mask, retval;
  163. volatile unsigned int *a = (volatile unsigned int *) addr;
  164. a += nr >> 5;
  165. mask = 1 << (nr & 0x1f);
  166. retval = (mask & *a) != 0;
  167. *a ^= mask;
  168. return retval;
  169. }
  170. /*
  171. * This routine doesn't need to be atomic.
  172. */
  173. extern __inline__ int __constant_test_bit(int nr, const volatile void * addr)
  174. {
  175. return ((1UL << (nr & 31)) & (((const volatile unsigned int *) addr)[nr >> 5])) != 0;
  176. }
  177. extern __inline__ int __test_bit(int nr, volatile void * addr)
  178. {
  179. int * a = (int *) addr;
  180. int mask;
  181. a += nr >> 5;
  182. mask = 1 << (nr & 0x1f);
  183. return ((mask & *a) != 0);
  184. }
  185. #define test_bit(nr,addr) \
  186. (__builtin_constant_p(nr) ? \
  187. __constant_test_bit((nr),(addr)) : \
  188. __test_bit((nr),(addr)))
  189. #define find_first_zero_bit(addr, size) \
  190. find_next_zero_bit((addr), (size), 0)
  191. extern __inline__ int find_next_zero_bit (void * addr, int size, int offset)
  192. {
  193. unsigned long *p = ((unsigned long *) addr) + (offset >> 5);
  194. unsigned long result = offset & ~31UL;
  195. unsigned long tmp;
  196. if (offset >= size)
  197. return size;
  198. size -= result;
  199. offset &= 31UL;
  200. if (offset) {
  201. tmp = *(p++);
  202. tmp |= ~0UL >> (32-offset);
  203. if (size < 32)
  204. goto found_first;
  205. if (~tmp)
  206. goto found_middle;
  207. size -= 32;
  208. result += 32;
  209. }
  210. while (size & ~31UL) {
  211. if (~(tmp = *(p++)))
  212. goto found_middle;
  213. result += 32;
  214. size -= 32;
  215. }
  216. if (!size)
  217. return result;
  218. tmp = *p;
  219. found_first:
  220. tmp |= ~0UL >> size;
  221. found_middle:
  222. return result + ffz(tmp);
  223. }
  224. /*
  225. * hweightN: returns the hamming weight (i.e. the number
  226. * of bits set) of a N-bit word
  227. */
  228. #define hweight32(x) generic_hweight32(x)
  229. #define hweight16(x) generic_hweight16(x)
  230. #define hweight8(x) generic_hweight8(x)
  231. extern __inline__ int ext2_set_bit(int nr, volatile void * addr)
  232. {
  233. int mask, retval;
  234. unsigned long flags;
  235. volatile unsigned char *ADDR = (unsigned char *) addr;
  236. ADDR += nr >> 3;
  237. mask = 1 << (nr & 0x07);
  238. save_flags_cli(flags);
  239. retval = (mask & *ADDR) != 0;
  240. *ADDR |= mask;
  241. restore_flags(flags);
  242. return retval;
  243. }
  244. extern __inline__ int ext2_clear_bit(int nr, volatile void * addr)
  245. {
  246. int mask, retval;
  247. unsigned long flags;
  248. volatile unsigned char *ADDR = (unsigned char *) addr;
  249. ADDR += nr >> 3;
  250. mask = 1 << (nr & 0x07);
  251. save_flags_cli(flags);
  252. retval = (mask & *ADDR) != 0;
  253. *ADDR &= ~mask;
  254. restore_flags(flags);
  255. return retval;
  256. }
  257. extern __inline__ int ext2_test_bit(int nr, const volatile void * addr)
  258. {
  259. int mask;
  260. const volatile unsigned char *ADDR = (const unsigned char *) addr;
  261. ADDR += nr >> 3;
  262. mask = 1 << (nr & 0x07);
  263. return ((mask & *ADDR) != 0);
  264. }
  265. #define ext2_find_first_zero_bit(addr, size) \
  266. ext2_find_next_zero_bit((addr), (size), 0)
  267. extern __inline__ unsigned long ext2_find_next_zero_bit(void *addr, unsigned long size, unsigned long offset)
  268. {
  269. unsigned long *p = ((unsigned long *) addr) + (offset >> 5);
  270. unsigned long result = offset & ~31UL;
  271. unsigned long tmp;
  272. if (offset >= size)
  273. return size;
  274. size -= result;
  275. offset &= 31UL;
  276. if(offset) {
  277. /* We hold the little endian value in tmp, but then the
  278. * shift is illegal. So we could keep a big endian value
  279. * in tmp, like this:
  280. *
  281. * tmp = __swab32(*(p++));
  282. * tmp |= ~0UL >> (32-offset);
  283. *
  284. * but this would decrease preformance, so we change the
  285. * shift:
  286. */
  287. tmp = *(p++);
  288. tmp |= __swab32(~0UL >> (32-offset));
  289. if(size < 32)
  290. goto found_first;
  291. if(~tmp)
  292. goto found_middle;
  293. size -= 32;
  294. result += 32;
  295. }
  296. while(size & ~31UL) {
  297. if(~(tmp = *(p++)))
  298. goto found_middle;
  299. result += 32;
  300. size -= 32;
  301. }
  302. if(!size)
  303. return result;
  304. tmp = *p;
  305. found_first:
  306. /* tmp is little endian, so we would have to swab the shift,
  307. * see above. But then we have to swab tmp below for ffz, so
  308. * we might as well do this here.
  309. */
  310. return result + ffz(__swab32(tmp) | (~0UL << size));
  311. found_middle:
  312. return result + ffz(__swab32(tmp));
  313. }
  314. /* Bitmap functions for the minix filesystem. */
  315. #define minix_test_and_set_bit(nr,addr) test_and_set_bit(nr,addr)
  316. #define minix_set_bit(nr,addr) set_bit(nr,addr)
  317. #define minix_test_and_clear_bit(nr,addr) test_and_clear_bit(nr,addr)
  318. #define minix_test_bit(nr,addr) test_bit(nr,addr)
  319. #define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size)
  320. /**
  321. * hweightN - returns the hamming weight of a N-bit word
  322. * @x: the word to weigh
  323. *
  324. * The Hamming Weight of a number is the total number of bits set in it.
  325. */
  326. #define hweight32(x) generic_hweight32(x)
  327. #define hweight16(x) generic_hweight16(x)
  328. #define hweight8(x) generic_hweight8(x)
  329. #endif /* __KERNEL__ */
  330. #endif /* _MICROBLAZE_BITOPS_H */