bitops.h 4.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190
  1. #ifndef _LINUX_BITOPS_H
  2. #define _LINUX_BITOPS_H
  3. #include <asm/types.h>
  4. #ifdef __KERNEL__
  5. #define BIT(nr) (1UL << (nr))
  6. #define BIT_MASK(nr) (1UL << ((nr) % BITS_PER_LONG))
  7. #define BIT_WORD(nr) ((nr) / BITS_PER_LONG)
  8. #define BITS_PER_BYTE 8
  9. #define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long))
  10. #endif
  11. extern unsigned int __sw_hweight8(unsigned int w);
  12. extern unsigned int __sw_hweight16(unsigned int w);
  13. extern unsigned int __sw_hweight32(unsigned int w);
  14. extern unsigned long __sw_hweight64(__u64 w);
  15. /*
  16. * Include this here because some architectures need generic_ffs/fls in
  17. * scope
  18. */
  19. #include <asm/bitops.h>
  20. #define for_each_set_bit(bit, addr, size) \
  21. for ((bit) = find_first_bit((addr), (size)); \
  22. (bit) < (size); \
  23. (bit) = find_next_bit((addr), (size), (bit) + 1))
  24. /* same as for_each_set_bit() but use bit as value to start with */
  25. #define for_each_set_bit_cont(bit, addr, size) \
  26. for ((bit) = find_next_bit((addr), (size), (bit)); \
  27. (bit) < (size); \
  28. (bit) = find_next_bit((addr), (size), (bit) + 1))
  29. static __inline__ int get_bitmask_order(unsigned int count)
  30. {
  31. int order;
  32. order = fls(count);
  33. return order; /* We could be slightly more clever with -1 here... */
  34. }
  35. static __inline__ int get_count_order(unsigned int count)
  36. {
  37. int order;
  38. order = fls(count) - 1;
  39. if (count & (count - 1))
  40. order++;
  41. return order;
  42. }
  43. static inline unsigned long hweight_long(unsigned long w)
  44. {
  45. return sizeof(w) == 4 ? hweight32(w) : hweight64(w);
  46. }
  47. /**
  48. * rol64 - rotate a 64-bit value left
  49. * @word: value to rotate
  50. * @shift: bits to roll
  51. */
  52. static inline __u64 rol64(__u64 word, unsigned int shift)
  53. {
  54. return (word << shift) | (word >> (64 - shift));
  55. }
  56. /**
  57. * ror64 - rotate a 64-bit value right
  58. * @word: value to rotate
  59. * @shift: bits to roll
  60. */
  61. static inline __u64 ror64(__u64 word, unsigned int shift)
  62. {
  63. return (word >> shift) | (word << (64 - shift));
  64. }
  65. /**
  66. * rol32 - rotate a 32-bit value left
  67. * @word: value to rotate
  68. * @shift: bits to roll
  69. */
  70. static inline __u32 rol32(__u32 word, unsigned int shift)
  71. {
  72. return (word << shift) | (word >> (32 - shift));
  73. }
  74. /**
  75. * ror32 - rotate a 32-bit value right
  76. * @word: value to rotate
  77. * @shift: bits to roll
  78. */
  79. static inline __u32 ror32(__u32 word, unsigned int shift)
  80. {
  81. return (word >> shift) | (word << (32 - shift));
  82. }
  83. /**
  84. * rol16 - rotate a 16-bit value left
  85. * @word: value to rotate
  86. * @shift: bits to roll
  87. */
  88. static inline __u16 rol16(__u16 word, unsigned int shift)
  89. {
  90. return (word << shift) | (word >> (16 - shift));
  91. }
  92. /**
  93. * ror16 - rotate a 16-bit value right
  94. * @word: value to rotate
  95. * @shift: bits to roll
  96. */
  97. static inline __u16 ror16(__u16 word, unsigned int shift)
  98. {
  99. return (word >> shift) | (word << (16 - shift));
  100. }
  101. /**
  102. * rol8 - rotate an 8-bit value left
  103. * @word: value to rotate
  104. * @shift: bits to roll
  105. */
  106. static inline __u8 rol8(__u8 word, unsigned int shift)
  107. {
  108. return (word << shift) | (word >> (8 - shift));
  109. }
  110. /**
  111. * ror8 - rotate an 8-bit value right
  112. * @word: value to rotate
  113. * @shift: bits to roll
  114. */
  115. static inline __u8 ror8(__u8 word, unsigned int shift)
  116. {
  117. return (word >> shift) | (word << (8 - shift));
  118. }
  119. /**
  120. * sign_extend32 - sign extend a 32-bit value using specified bit as sign-bit
  121. * @value: value to sign extend
  122. * @index: 0 based bit index (0<=index<32) to sign bit
  123. */
  124. static inline __s32 sign_extend32(__u32 value, int index)
  125. {
  126. __u8 shift = 31 - index;
  127. return (__s32)(value << shift) >> shift;
  128. }
  129. static inline unsigned fls_long(unsigned long l)
  130. {
  131. if (sizeof(l) == 4)
  132. return fls(l);
  133. return fls64(l);
  134. }
  135. /**
  136. * __ffs64 - find first set bit in a 64 bit word
  137. * @word: The 64 bit word
  138. *
  139. * On 64 bit arches this is a synomyn for __ffs
  140. * The result is not defined if no bits are set, so check that @word
  141. * is non-zero before calling this.
  142. */
  143. static inline unsigned long __ffs64(u64 word)
  144. {
  145. #if BITS_PER_LONG == 32
  146. if (((u32)word) == 0UL)
  147. return __ffs((u32)(word >> 32)) + 32;
  148. #elif BITS_PER_LONG != 64
  149. #error BITS_PER_LONG not 32 or 64
  150. #endif
  151. return __ffs((unsigned long)word);
  152. }
  153. #ifdef __KERNEL__
  154. #ifndef find_last_bit
  155. /**
  156. * find_last_bit - find the last set bit in a memory region
  157. * @addr: The address to start the search at
  158. * @size: The maximum size to search
  159. *
  160. * Returns the bit number of the first set bit, or size.
  161. */
  162. extern unsigned long find_last_bit(const unsigned long *addr,
  163. unsigned long size);
  164. #endif
  165. #endif /* __KERNEL__ */
  166. #endif