bitops.h 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446
  1. /*
  2. * include/asm-xtensa/bitops.h
  3. *
  4. * Atomic operations that C can't guarantee us.Useful for resource counting etc.
  5. *
  6. * This file is subject to the terms and conditions of the GNU General Public
  7. * License. See the file "COPYING" in the main directory of this archive
  8. * for more details.
  9. *
  10. * Copyright (C) 2001 - 2005 Tensilica Inc.
  11. */
  12. #ifndef _XTENSA_BITOPS_H
  13. #define _XTENSA_BITOPS_H
  14. #ifdef __KERNEL__
  15. #include <asm/processor.h>
  16. #include <asm/byteorder.h>
  17. #include <asm/system.h>
  18. #ifdef CONFIG_SMP
  19. # error SMP not supported on this architecture
  20. #endif
  21. static __inline__ void set_bit(int nr, volatile void * addr)
  22. {
  23. unsigned long mask = 1 << (nr & 0x1f);
  24. unsigned long *a = ((unsigned long *)addr) + (nr >> 5);
  25. unsigned long flags;
  26. local_irq_save(flags);
  27. *a |= mask;
  28. local_irq_restore(flags);
  29. }
  30. static __inline__ void __set_bit(int nr, volatile unsigned long * addr)
  31. {
  32. unsigned long mask = 1 << (nr & 0x1f);
  33. unsigned long *a = ((unsigned long *)addr) + (nr >> 5);
  34. *a |= mask;
  35. }
  36. static __inline__ void clear_bit(int nr, volatile void * addr)
  37. {
  38. unsigned long mask = 1 << (nr & 0x1f);
  39. unsigned long *a = ((unsigned long *)addr) + (nr >> 5);
  40. unsigned long flags;
  41. local_irq_save(flags);
  42. *a &= ~mask;
  43. local_irq_restore(flags);
  44. }
  45. static __inline__ void __clear_bit(int nr, volatile unsigned long *addr)
  46. {
  47. unsigned long mask = 1 << (nr & 0x1f);
  48. unsigned long *a = ((unsigned long *)addr) + (nr >> 5);
  49. *a &= ~mask;
  50. }
  51. /*
  52. * clear_bit() doesn't provide any barrier for the compiler.
  53. */
  54. #define smp_mb__before_clear_bit() barrier()
  55. #define smp_mb__after_clear_bit() barrier()
  56. static __inline__ void change_bit(int nr, volatile void * addr)
  57. {
  58. unsigned long mask = 1 << (nr & 0x1f);
  59. unsigned long *a = ((unsigned long *)addr) + (nr >> 5);
  60. unsigned long flags;
  61. local_irq_save(flags);
  62. *a ^= mask;
  63. local_irq_restore(flags);
  64. }
  65. static __inline__ void __change_bit(int nr, volatile void * addr)
  66. {
  67. unsigned long mask = 1 << (nr & 0x1f);
  68. unsigned long *a = ((unsigned long *)addr) + (nr >> 5);
  69. *a ^= mask;
  70. }
  71. static __inline__ int test_and_set_bit(int nr, volatile void * addr)
  72. {
  73. unsigned long retval;
  74. unsigned long mask = 1 << (nr & 0x1f);
  75. unsigned long *a = ((unsigned long *)addr) + (nr >> 5);
  76. unsigned long flags;
  77. local_irq_save(flags);
  78. retval = (mask & *a) != 0;
  79. *a |= mask;
  80. local_irq_restore(flags);
  81. return retval;
  82. }
  83. static __inline__ int __test_and_set_bit(int nr, volatile void * addr)
  84. {
  85. unsigned long retval;
  86. unsigned long mask = 1 << (nr & 0x1f);
  87. unsigned long *a = ((unsigned long *)addr) + (nr >> 5);
  88. retval = (mask & *a) != 0;
  89. *a |= mask;
  90. return retval;
  91. }
  92. static __inline__ int test_and_clear_bit(int nr, volatile void * addr)
  93. {
  94. unsigned long retval;
  95. unsigned long mask = 1 << (nr & 0x1f);
  96. unsigned long *a = ((unsigned long *)addr) + (nr >> 5);
  97. unsigned long flags;
  98. local_irq_save(flags);
  99. retval = (mask & *a) != 0;
  100. *a &= ~mask;
  101. local_irq_restore(flags);
  102. return retval;
  103. }
  104. static __inline__ int __test_and_clear_bit(int nr, volatile void * addr)
  105. {
  106. unsigned long mask = 1 << (nr & 0x1f);
  107. unsigned long *a = ((unsigned long *)addr) + (nr >> 5);
  108. unsigned long old = *a;
  109. *a = old & ~mask;
  110. return (old & mask) != 0;
  111. }
  112. static __inline__ int test_and_change_bit(int nr, volatile void * addr)
  113. {
  114. unsigned long retval;
  115. unsigned long mask = 1 << (nr & 0x1f);
  116. unsigned long *a = ((unsigned long *)addr) + (nr >> 5);
  117. unsigned long flags;
  118. local_irq_save(flags);
  119. retval = (mask & *a) != 0;
  120. *a ^= mask;
  121. local_irq_restore(flags);
  122. return retval;
  123. }
  124. /*
  125. * non-atomic version; can be reordered
  126. */
  127. static __inline__ int __test_and_change_bit(int nr, volatile void *addr)
  128. {
  129. unsigned long mask = 1 << (nr & 0x1f);
  130. unsigned long *a = ((unsigned long *)addr) + (nr >> 5);
  131. unsigned long old = *a;
  132. *a = old ^ mask;
  133. return (old & mask) != 0;
  134. }
  135. static __inline__ int test_bit(int nr, const volatile void *addr)
  136. {
  137. return 1UL & (((const volatile unsigned int *)addr)[nr>>5] >> (nr&31));
  138. }
  139. #if XCHAL_HAVE_NSAU
  140. static __inline__ int __cntlz (unsigned long x)
  141. {
  142. int lz;
  143. asm ("nsau %0, %1" : "=r" (lz) : "r" (x));
  144. return 31 - lz;
  145. }
  146. #else
  147. static __inline__ int __cntlz (unsigned long x)
  148. {
  149. unsigned long sum, x1, x2, x4, x8, x16;
  150. x1 = x & 0xAAAAAAAA;
  151. x2 = x & 0xCCCCCCCC;
  152. x4 = x & 0xF0F0F0F0;
  153. x8 = x & 0xFF00FF00;
  154. x16 = x & 0xFFFF0000;
  155. sum = x2 ? 2 : 0;
  156. sum += (x16 != 0) * 16;
  157. sum += (x8 != 0) * 8;
  158. sum += (x4 != 0) * 4;
  159. sum += (x1 != 0);
  160. return sum;
  161. }
  162. #endif
  163. /*
  164. * ffz: Find first zero in word. Undefined if no zero exists.
  165. * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
  166. */
  167. static __inline__ int ffz(unsigned long x)
  168. {
  169. if ((x = ~x) == 0)
  170. return 32;
  171. return __cntlz(x & -x);
  172. }
  173. /*
  174. * __ffs: Find first bit set in word. Return 0 for bit 0
  175. */
  176. static __inline__ int __ffs(unsigned long x)
  177. {
  178. return __cntlz(x & -x);
  179. }
  180. /*
  181. * ffs: Find first bit set in word. This is defined the same way as
  182. * the libc and compiler builtin ffs routines, therefore
  183. * differs in spirit from the above ffz (man ffs).
  184. */
  185. static __inline__ int ffs(unsigned long x)
  186. {
  187. return __cntlz(x & -x) + 1;
  188. }
  189. /*
  190. * fls: Find last (most-significant) bit set in word.
  191. * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
  192. */
  193. static __inline__ int fls (unsigned int x)
  194. {
  195. return __cntlz(x);
  196. }
  197. static __inline__ int
  198. find_next_bit(const unsigned long *addr, int size, int offset)
  199. {
  200. const unsigned long *p = addr + (offset >> 5);
  201. unsigned long result = offset & ~31UL;
  202. unsigned long tmp;
  203. if (offset >= size)
  204. return size;
  205. size -= result;
  206. offset &= 31UL;
  207. if (offset) {
  208. tmp = *p++;
  209. tmp &= ~0UL << offset;
  210. if (size < 32)
  211. goto found_first;
  212. if (tmp)
  213. goto found_middle;
  214. size -= 32;
  215. result += 32;
  216. }
  217. while (size >= 32) {
  218. if ((tmp = *p++) != 0)
  219. goto found_middle;
  220. result += 32;
  221. size -= 32;
  222. }
  223. if (!size)
  224. return result;
  225. tmp = *p;
  226. found_first:
  227. tmp &= ~0UL >> (32 - size);
  228. if (tmp == 0UL) /* Are any bits set? */
  229. return result + size; /* Nope. */
  230. found_middle:
  231. return result + __ffs(tmp);
  232. }
  233. /**
  234. * find_first_bit - find the first set bit in a memory region
  235. * @addr: The address to start the search at
  236. * @size: The maximum size to search
  237. *
  238. * Returns the bit-number of the first set bit, not the number of the byte
  239. * containing a bit.
  240. */
  241. #define find_first_bit(addr, size) \
  242. find_next_bit((addr), (size), 0)
  243. static __inline__ int
  244. find_next_zero_bit(const unsigned long *addr, int size, int offset)
  245. {
  246. const unsigned long *p = addr + (offset >> 5);
  247. unsigned long result = offset & ~31UL;
  248. unsigned long tmp;
  249. if (offset >= size)
  250. return size;
  251. size -= result;
  252. offset &= 31UL;
  253. if (offset) {
  254. tmp = *p++;
  255. tmp |= ~0UL >> (32-offset);
  256. if (size < 32)
  257. goto found_first;
  258. if (~tmp)
  259. goto found_middle;
  260. size -= 32;
  261. result += 32;
  262. }
  263. while (size & ~31UL) {
  264. if (~(tmp = *p++))
  265. goto found_middle;
  266. result += 32;
  267. size -= 32;
  268. }
  269. if (!size)
  270. return result;
  271. tmp = *p;
  272. found_first:
  273. tmp |= ~0UL << size;
  274. found_middle:
  275. return result + ffz(tmp);
  276. }
  277. #define find_first_zero_bit(addr, size) \
  278. find_next_zero_bit((addr), (size), 0)
  279. #ifdef __XTENSA_EL__
  280. # define ext2_set_bit(nr,addr) __test_and_set_bit((nr), (addr))
  281. # define ext2_set_bit_atomic(lock,nr,addr) test_and_set_bit((nr),(addr))
  282. # define ext2_clear_bit(nr,addr) __test_and_clear_bit((nr), (addr))
  283. # define ext2_clear_bit_atomic(lock,nr,addr) test_and_clear_bit((nr),(addr))
  284. # define ext2_test_bit(nr,addr) test_bit((nr), (addr))
  285. # define ext2_find_first_zero_bit(addr, size) find_first_zero_bit((addr),(size))
  286. # define ext2_find_next_zero_bit(addr, size, offset) \
  287. find_next_zero_bit((addr), (size), (offset))
  288. #elif defined(__XTENSA_EB__)
  289. # define ext2_set_bit(nr,addr) __test_and_set_bit((nr) ^ 0x18, (addr))
  290. # define ext2_set_bit_atomic(lock,nr,addr) test_and_set_bit((nr) ^ 0x18, (addr))
  291. # define ext2_clear_bit(nr,addr) __test_and_clear_bit((nr) ^ 18, (addr))
  292. # define ext2_clear_bit_atomic(lock,nr,addr) test_and_clear_bit((nr)^0x18,(addr))
  293. # define ext2_test_bit(nr,addr) test_bit((nr) ^ 0x18, (addr))
  294. # define ext2_find_first_zero_bit(addr, size) \
  295. ext2_find_next_zero_bit((addr), (size), 0)
  296. static __inline__ unsigned long ext2_find_next_zero_bit(void *addr, unsigned long size, unsigned long offset)
  297. {
  298. unsigned long *p = ((unsigned long *) addr) + (offset >> 5);
  299. unsigned long result = offset & ~31UL;
  300. unsigned long tmp;
  301. if (offset >= size)
  302. return size;
  303. size -= result;
  304. offset &= 31UL;
  305. if(offset) {
  306. /* We hold the little endian value in tmp, but then the
  307. * shift is illegal. So we could keep a big endian value
  308. * in tmp, like this:
  309. *
  310. * tmp = __swab32(*(p++));
  311. * tmp |= ~0UL >> (32-offset);
  312. *
  313. * but this would decrease preformance, so we change the
  314. * shift:
  315. */
  316. tmp = *(p++);
  317. tmp |= __swab32(~0UL >> (32-offset));
  318. if(size < 32)
  319. goto found_first;
  320. if(~tmp)
  321. goto found_middle;
  322. size -= 32;
  323. result += 32;
  324. }
  325. while(size & ~31UL) {
  326. if(~(tmp = *(p++)))
  327. goto found_middle;
  328. result += 32;
  329. size -= 32;
  330. }
  331. if(!size)
  332. return result;
  333. tmp = *p;
  334. found_first:
  335. /* tmp is little endian, so we would have to swab the shift,
  336. * see above. But then we have to swab tmp below for ffz, so
  337. * we might as well do this here.
  338. */
  339. return result + ffz(__swab32(tmp) | (~0UL << size));
  340. found_middle:
  341. return result + ffz(__swab32(tmp));
  342. }
  343. #else
  344. # error processor byte order undefined!
  345. #endif
  346. #define hweight32(x) generic_hweight32(x)
  347. #define hweight16(x) generic_hweight16(x)
  348. #define hweight8(x) generic_hweight8(x)
  349. /*
  350. * Find the first bit set in a 140-bit bitmap.
  351. * The first 100 bits are unlikely to be set.
  352. */
  353. static inline int sched_find_first_bit(const unsigned long *b)
  354. {
  355. if (unlikely(b[0]))
  356. return __ffs(b[0]);
  357. if (unlikely(b[1]))
  358. return __ffs(b[1]) + 32;
  359. if (unlikely(b[2]))
  360. return __ffs(b[2]) + 64;
  361. if (b[3])
  362. return __ffs(b[3]) + 96;
  363. return __ffs(b[4]) + 128;
  364. }
  365. /* Bitmap functions for the minix filesystem. */
  366. #define minix_test_and_set_bit(nr,addr) test_and_set_bit(nr,addr)
  367. #define minix_set_bit(nr,addr) set_bit(nr,addr)
  368. #define minix_test_and_clear_bit(nr,addr) test_and_clear_bit(nr,addr)
  369. #define minix_test_bit(nr,addr) test_bit(nr,addr)
  370. #define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size)
  371. #endif /* __KERNEL__ */
  372. #endif /* _XTENSA_BITOPS_H */