|
@@ -83,4 +83,23 @@ static inline s64 div_s64(s64 dividend, s32 divisor)
|
|
|
|
|
|
u32 iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder);
|
|
u32 iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder);
|
|
|
|
|
|
|
|
+static __always_inline u32
|
|
|
|
+__iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder)
|
|
|
|
+{
|
|
|
|
+ u32 ret = 0;
|
|
|
|
+
|
|
|
|
+ while (dividend >= divisor) {
|
|
|
|
+ /* The following asm() prevents the compiler from
|
|
|
|
+ optimising this loop into a modulo operation. */
|
|
|
|
+ asm("" : "+rm"(dividend));
|
|
|
|
+
|
|
|
|
+ dividend -= divisor;
|
|
|
|
+ ret++;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ *remainder = dividend;
|
|
|
|
+
|
|
|
|
+ return ret;
|
|
|
|
+}
|
|
|
|
+
|
|
#endif /* _LINUX_MATH64_H */
|
|
#endif /* _LINUX_MATH64_H */
|