bitops.h 9.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350
  1. /*
  2. * PowerPC atomic bit operations.
  3. *
  4. * Merged version by David Gibson <david@gibson.dropbear.id.au>.
  5. * Based on ppc64 versions by: Dave Engebretsen, Todd Inglett, Don
  6. * Reed, Pat McCarthy, Peter Bergner, Anton Blanchard. They
  7. * originally took it from the ppc32 code.
  8. *
  9. * Within a word, bits are numbered LSB first. Lot's of places make
  10. * this assumption by directly testing bits with (val & (1<<nr)).
  11. * This can cause confusion for large (> 1 word) bitmaps on a
  12. * big-endian system because, unlike little endian, the number of each
  13. * bit depends on the word size.
  14. *
  15. * The bitop functions are defined to work on unsigned longs, so for a
  16. * ppc64 system the bits end up numbered:
  17. * |63..............0|127............64|191...........128|255...........196|
  18. * and on ppc32:
  19. * |31.....0|63....31|95....64|127...96|159..128|191..160|223..192|255..224|
  20. *
  21. * There are a few little-endian macros used mostly for filesystem
  22. * bitmaps, these work on similar bit arrays layouts, but
  23. * byte-oriented:
  24. * |7...0|15...8|23...16|31...24|39...32|47...40|55...48|63...56|
  25. *
  26. * The main difference is that bit 3-5 (64b) or 3-4 (32b) in the bit
  27. * number field needs to be reversed compared to the big-endian bit
  28. * fields. This can be achieved by XOR with 0x38 (64b) or 0x18 (32b).
  29. *
  30. * This program is free software; you can redistribute it and/or
  31. * modify it under the terms of the GNU General Public License
  32. * as published by the Free Software Foundation; either version
  33. * 2 of the License, or (at your option) any later version.
  34. */
  35. #ifndef _ASM_POWERPC_BITOPS_H
  36. #define _ASM_POWERPC_BITOPS_H
  37. #ifdef __KERNEL__
  38. #include <linux/compiler.h>
  39. #include <asm/asm-compat.h>
  40. #include <asm/synch.h>
  41. /*
  42. * clear_bit doesn't imply a memory barrier
  43. */
  44. #define smp_mb__before_clear_bit() smp_mb()
  45. #define smp_mb__after_clear_bit() smp_mb()
  46. #define BITOP_MASK(nr) (1UL << ((nr) % BITS_PER_LONG))
  47. #define BITOP_WORD(nr) ((nr) / BITS_PER_LONG)
  48. #define BITOP_LE_SWIZZLE ((BITS_PER_LONG-1) & ~0x7)
  49. static __inline__ void set_bit(int nr, volatile unsigned long *addr)
  50. {
  51. unsigned long old;
  52. unsigned long mask = BITOP_MASK(nr);
  53. unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
  54. __asm__ __volatile__(
  55. "1:" PPC_LLARX "%0,0,%3 # set_bit\n"
  56. "or %0,%0,%2\n"
  57. PPC405_ERR77(0,%3)
  58. PPC_STLCX "%0,0,%3\n"
  59. "bne- 1b"
  60. : "=&r" (old), "+m" (*p)
  61. : "r" (mask), "r" (p)
  62. : "cc" );
  63. }
  64. static __inline__ void clear_bit(int nr, volatile unsigned long *addr)
  65. {
  66. unsigned long old;
  67. unsigned long mask = BITOP_MASK(nr);
  68. unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
  69. __asm__ __volatile__(
  70. "1:" PPC_LLARX "%0,0,%3 # clear_bit\n"
  71. "andc %0,%0,%2\n"
  72. PPC405_ERR77(0,%3)
  73. PPC_STLCX "%0,0,%3\n"
  74. "bne- 1b"
  75. : "=&r" (old), "+m" (*p)
  76. : "r" (mask), "r" (p)
  77. : "cc" );
  78. }
  79. static __inline__ void change_bit(int nr, volatile unsigned long *addr)
  80. {
  81. unsigned long old;
  82. unsigned long mask = BITOP_MASK(nr);
  83. unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
  84. __asm__ __volatile__(
  85. "1:" PPC_LLARX "%0,0,%3 # change_bit\n"
  86. "xor %0,%0,%2\n"
  87. PPC405_ERR77(0,%3)
  88. PPC_STLCX "%0,0,%3\n"
  89. "bne- 1b"
  90. : "=&r" (old), "+m" (*p)
  91. : "r" (mask), "r" (p)
  92. : "cc" );
  93. }
  94. static __inline__ int test_and_set_bit(unsigned long nr,
  95. volatile unsigned long *addr)
  96. {
  97. unsigned long old, t;
  98. unsigned long mask = BITOP_MASK(nr);
  99. unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
  100. __asm__ __volatile__(
  101. LWSYNC_ON_SMP
  102. "1:" PPC_LLARX "%0,0,%3 # test_and_set_bit\n"
  103. "or %1,%0,%2 \n"
  104. PPC405_ERR77(0,%3)
  105. PPC_STLCX "%1,0,%3 \n"
  106. "bne- 1b"
  107. ISYNC_ON_SMP
  108. : "=&r" (old), "=&r" (t)
  109. : "r" (mask), "r" (p)
  110. : "cc", "memory");
  111. return (old & mask) != 0;
  112. }
  113. static __inline__ int test_and_clear_bit(unsigned long nr,
  114. volatile unsigned long *addr)
  115. {
  116. unsigned long old, t;
  117. unsigned long mask = BITOP_MASK(nr);
  118. unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
  119. __asm__ __volatile__(
  120. LWSYNC_ON_SMP
  121. "1:" PPC_LLARX "%0,0,%3 # test_and_clear_bit\n"
  122. "andc %1,%0,%2 \n"
  123. PPC405_ERR77(0,%3)
  124. PPC_STLCX "%1,0,%3 \n"
  125. "bne- 1b"
  126. ISYNC_ON_SMP
  127. : "=&r" (old), "=&r" (t)
  128. : "r" (mask), "r" (p)
  129. : "cc", "memory");
  130. return (old & mask) != 0;
  131. }
  132. static __inline__ int test_and_change_bit(unsigned long nr,
  133. volatile unsigned long *addr)
  134. {
  135. unsigned long old, t;
  136. unsigned long mask = BITOP_MASK(nr);
  137. unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
  138. __asm__ __volatile__(
  139. LWSYNC_ON_SMP
  140. "1:" PPC_LLARX "%0,0,%3 # test_and_change_bit\n"
  141. "xor %1,%0,%2 \n"
  142. PPC405_ERR77(0,%3)
  143. PPC_STLCX "%1,0,%3 \n"
  144. "bne- 1b"
  145. ISYNC_ON_SMP
  146. : "=&r" (old), "=&r" (t)
  147. : "r" (mask), "r" (p)
  148. : "cc", "memory");
  149. return (old & mask) != 0;
  150. }
  151. static __inline__ void set_bits(unsigned long mask, unsigned long *addr)
  152. {
  153. unsigned long old;
  154. __asm__ __volatile__(
  155. "1:" PPC_LLARX "%0,0,%3 # set_bits\n"
  156. "or %0,%0,%2\n"
  157. PPC_STLCX "%0,0,%3\n"
  158. "bne- 1b"
  159. : "=&r" (old), "+m" (*addr)
  160. : "r" (mask), "r" (addr)
  161. : "cc");
  162. }
  163. #include <asm-generic/bitops/non-atomic.h>
  164. /*
  165. * Return the zero-based bit position (LE, not IBM bit numbering) of
  166. * the most significant 1-bit in a double word.
  167. */
  168. static __inline__ __attribute__((const))
  169. int __ilog2(unsigned long x)
  170. {
  171. int lz;
  172. asm (PPC_CNTLZL "%0,%1" : "=r" (lz) : "r" (x));
  173. return BITS_PER_LONG - 1 - lz;
  174. }
  175. static inline __attribute__((const))
  176. int __ilog2_u32(u32 n)
  177. {
  178. int bit;
  179. asm ("cntlzw %0,%1" : "=r" (bit) : "r" (n));
  180. return 31 - bit;
  181. }
  182. #ifdef __powerpc64__
  183. static inline __attribute__((const))
  184. int __ilog2_u64(u64 n)
  185. {
  186. int bit;
  187. asm ("cntlzd %0,%1" : "=r" (bit) : "r" (n));
  188. return 63 - bit;
  189. }
  190. #endif
  191. /*
  192. * Determines the bit position of the least significant 0 bit in the
  193. * specified double word. The returned bit position will be
  194. * zero-based, starting from the right side (63/31 - 0).
  195. */
  196. static __inline__ unsigned long ffz(unsigned long x)
  197. {
  198. /* no zero exists anywhere in the 8 byte area. */
  199. if ((x = ~x) == 0)
  200. return BITS_PER_LONG;
  201. /*
  202. * Calculate the bit position of the least signficant '1' bit in x
  203. * (since x has been changed this will actually be the least signficant
  204. * '0' bit in * the original x). Note: (x & -x) gives us a mask that
  205. * is the least significant * (RIGHT-most) 1-bit of the value in x.
  206. */
  207. return __ilog2(x & -x);
  208. }
  209. static __inline__ int __ffs(unsigned long x)
  210. {
  211. return __ilog2(x & -x);
  212. }
  213. /*
  214. * ffs: find first bit set. This is defined the same way as
  215. * the libc and compiler builtin ffs routines, therefore
  216. * differs in spirit from the above ffz (man ffs).
  217. */
  218. static __inline__ int ffs(int x)
  219. {
  220. unsigned long i = (unsigned long)x;
  221. return __ilog2(i & -i) + 1;
  222. }
  223. /*
  224. * fls: find last (most-significant) bit set.
  225. * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
  226. */
  227. static __inline__ int fls(unsigned int x)
  228. {
  229. int lz;
  230. asm ("cntlzw %0,%1" : "=r" (lz) : "r" (x));
  231. return 32 - lz;
  232. }
  233. #include <asm-generic/bitops/fls64.h>
  234. #include <asm-generic/bitops/hweight.h>
  235. #define find_first_zero_bit(addr, size) find_next_zero_bit((addr), (size), 0)
  236. unsigned long find_next_zero_bit(const unsigned long *addr,
  237. unsigned long size, unsigned long offset);
  238. /**
  239. * find_first_bit - find the first set bit in a memory region
  240. * @addr: The address to start the search at
  241. * @size: The maximum size to search
  242. *
  243. * Returns the bit-number of the first set bit, not the number of the byte
  244. * containing a bit.
  245. */
  246. #define find_first_bit(addr, size) find_next_bit((addr), (size), 0)
  247. unsigned long find_next_bit(const unsigned long *addr,
  248. unsigned long size, unsigned long offset);
  249. /* Little-endian versions */
  250. static __inline__ int test_le_bit(unsigned long nr,
  251. __const__ unsigned long *addr)
  252. {
  253. __const__ unsigned char *tmp = (__const__ unsigned char *) addr;
  254. return (tmp[nr >> 3] >> (nr & 7)) & 1;
  255. }
  256. #define __set_le_bit(nr, addr) \
  257. __set_bit((nr) ^ BITOP_LE_SWIZZLE, (addr))
  258. #define __clear_le_bit(nr, addr) \
  259. __clear_bit((nr) ^ BITOP_LE_SWIZZLE, (addr))
  260. #define test_and_set_le_bit(nr, addr) \
  261. test_and_set_bit((nr) ^ BITOP_LE_SWIZZLE, (addr))
  262. #define test_and_clear_le_bit(nr, addr) \
  263. test_and_clear_bit((nr) ^ BITOP_LE_SWIZZLE, (addr))
  264. #define __test_and_set_le_bit(nr, addr) \
  265. __test_and_set_bit((nr) ^ BITOP_LE_SWIZZLE, (addr))
  266. #define __test_and_clear_le_bit(nr, addr) \
  267. __test_and_clear_bit((nr) ^ BITOP_LE_SWIZZLE, (addr))
  268. #define find_first_zero_le_bit(addr, size) generic_find_next_zero_le_bit((addr), (size), 0)
  269. unsigned long generic_find_next_zero_le_bit(const unsigned long *addr,
  270. unsigned long size, unsigned long offset);
  271. /* Bitmap functions for the ext2 filesystem */
  272. #define ext2_set_bit(nr,addr) \
  273. __test_and_set_le_bit((nr), (unsigned long*)addr)
  274. #define ext2_clear_bit(nr, addr) \
  275. __test_and_clear_le_bit((nr), (unsigned long*)addr)
  276. #define ext2_set_bit_atomic(lock, nr, addr) \
  277. test_and_set_le_bit((nr), (unsigned long*)addr)
  278. #define ext2_clear_bit_atomic(lock, nr, addr) \
  279. test_and_clear_le_bit((nr), (unsigned long*)addr)
  280. #define ext2_test_bit(nr, addr) test_le_bit((nr),(unsigned long*)addr)
  281. #define ext2_find_first_zero_bit(addr, size) \
  282. find_first_zero_le_bit((unsigned long*)addr, size)
  283. #define ext2_find_next_zero_bit(addr, size, off) \
  284. generic_find_next_zero_le_bit((unsigned long*)addr, size, off)
  285. /* Bitmap functions for the minix filesystem. */
  286. #define minix_test_and_set_bit(nr,addr) \
  287. __test_and_set_le_bit(nr, (unsigned long *)addr)
  288. #define minix_set_bit(nr,addr) \
  289. __set_le_bit(nr, (unsigned long *)addr)
  290. #define minix_test_and_clear_bit(nr,addr) \
  291. __test_and_clear_le_bit(nr, (unsigned long *)addr)
  292. #define minix_test_bit(nr,addr) \
  293. test_le_bit(nr, (unsigned long *)addr)
  294. #define minix_find_first_zero_bit(addr,size) \
  295. find_first_zero_le_bit((unsigned long *)addr, size)
  296. #include <asm-generic/bitops/sched.h>
  297. #endif /* __KERNEL__ */
  298. #endif /* _ASM_POWERPC_BITOPS_H */