123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123 |
- #ifndef _LINUX_MATH64_H
- #define _LINUX_MATH64_H
- #include <linux/types.h>
- #include <asm/div64.h>
- #if BITS_PER_LONG == 64
- #define div64_long(x, y) div64_s64((x), (y))
- #define div64_ul(x, y) div64_u64((x), (y))
- /**
- * div_u64_rem - unsigned 64bit divide with 32bit divisor with remainder
- *
- * This is commonly provided by 32bit archs to provide an optimized 64bit
- * divide.
- */
- static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
- {
- *remainder = dividend % divisor;
- return dividend / divisor;
- }
- /**
- * div_s64_rem - signed 64bit divide with 32bit divisor with remainder
- */
- static inline s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
- {
- *remainder = dividend % divisor;
- return dividend / divisor;
- }
- /**
- * div64_u64 - unsigned 64bit divide with 64bit divisor
- */
- static inline u64 div64_u64(u64 dividend, u64 divisor)
- {
- return dividend / divisor;
- }
- /**
- * div64_s64 - signed 64bit divide with 64bit divisor
- */
- static inline s64 div64_s64(s64 dividend, s64 divisor)
- {
- return dividend / divisor;
- }
- #elif BITS_PER_LONG == 32
- #define div64_long(x, y) div_s64((x), (y))
- #define div64_ul(x, y) div_u64((x), (y))
- #ifndef div_u64_rem
- static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
- {
- *remainder = do_div(dividend, divisor);
- return dividend;
- }
- #endif
- #ifndef div_s64_rem
- extern s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder);
- #endif
- #ifndef div64_u64
- extern u64 div64_u64(u64 dividend, u64 divisor);
- #endif
- #ifndef div64_s64
- extern s64 div64_s64(s64 dividend, s64 divisor);
- #endif
- #endif /* BITS_PER_LONG */
- /**
- * div_u64 - unsigned 64bit divide with 32bit divisor
- *
- * This is the most common 64bit divide and should be used if possible,
- * as many 32bit archs can optimize this variant better than a full 64bit
- * divide.
- */
- #ifndef div_u64
- static inline u64 div_u64(u64 dividend, u32 divisor)
- {
- u32 remainder;
- return div_u64_rem(dividend, divisor, &remainder);
- }
- #endif
- /**
- * div_s64 - signed 64bit divide with 32bit divisor
- */
- #ifndef div_s64
- static inline s64 div_s64(s64 dividend, s32 divisor)
- {
- s32 remainder;
- return div_s64_rem(dividend, divisor, &remainder);
- }
- #endif
- u32 iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder);
- static __always_inline u32
- __iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder)
- {
- u32 ret = 0;
- while (dividend >= divisor) {
- /* The following asm() prevents the compiler from
- optimising this loop into a modulo operation. */
- asm("" : "+rm"(dividend));
- dividend -= divisor;
- ret++;
- }
- *remainder = dividend;
- return ret;
- }
- #endif /* _LINUX_MATH64_H */
|