math64.h 2.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123
  1. #ifndef _LINUX_MATH64_H
  2. #define _LINUX_MATH64_H
  3. #include <linux/types.h>
  4. #include <asm/div64.h>
  5. #if BITS_PER_LONG == 64
  6. #define div64_long(x, y) div64_s64((x), (y))
  7. #define div64_ul(x, y) div64_u64((x), (y))
  8. /**
  9. * div_u64_rem - unsigned 64bit divide with 32bit divisor with remainder
  10. *
  11. * This is commonly provided by 32bit archs to provide an optimized 64bit
  12. * divide.
  13. */
  14. static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
  15. {
  16. *remainder = dividend % divisor;
  17. return dividend / divisor;
  18. }
  19. /**
  20. * div_s64_rem - signed 64bit divide with 32bit divisor with remainder
  21. */
  22. static inline s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
  23. {
  24. *remainder = dividend % divisor;
  25. return dividend / divisor;
  26. }
  27. /**
  28. * div64_u64 - unsigned 64bit divide with 64bit divisor
  29. */
  30. static inline u64 div64_u64(u64 dividend, u64 divisor)
  31. {
  32. return dividend / divisor;
  33. }
  34. /**
  35. * div64_s64 - signed 64bit divide with 64bit divisor
  36. */
  37. static inline s64 div64_s64(s64 dividend, s64 divisor)
  38. {
  39. return dividend / divisor;
  40. }
  41. #elif BITS_PER_LONG == 32
  42. #define div64_long(x, y) div_s64((x), (y))
  43. #define div64_ul(x, y) div_u64((x), (y))
  44. #ifndef div_u64_rem
  45. static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
  46. {
  47. *remainder = do_div(dividend, divisor);
  48. return dividend;
  49. }
  50. #endif
  51. #ifndef div_s64_rem
  52. extern s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder);
  53. #endif
  54. #ifndef div64_u64
  55. extern u64 div64_u64(u64 dividend, u64 divisor);
  56. #endif
  57. #ifndef div64_s64
  58. extern s64 div64_s64(s64 dividend, s64 divisor);
  59. #endif
  60. #endif /* BITS_PER_LONG */
  61. /**
  62. * div_u64 - unsigned 64bit divide with 32bit divisor
  63. *
  64. * This is the most common 64bit divide and should be used if possible,
  65. * as many 32bit archs can optimize this variant better than a full 64bit
  66. * divide.
  67. */
  68. #ifndef div_u64
  69. static inline u64 div_u64(u64 dividend, u32 divisor)
  70. {
  71. u32 remainder;
  72. return div_u64_rem(dividend, divisor, &remainder);
  73. }
  74. #endif
  75. /**
  76. * div_s64 - signed 64bit divide with 32bit divisor
  77. */
  78. #ifndef div_s64
  79. static inline s64 div_s64(s64 dividend, s32 divisor)
  80. {
  81. s32 remainder;
  82. return div_s64_rem(dividend, divisor, &remainder);
  83. }
  84. #endif
  85. u32 iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder);
  86. static __always_inline u32
  87. __iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder)
  88. {
  89. u32 ret = 0;
  90. while (dividend >= divisor) {
  91. /* The following asm() prevents the compiler from
  92. optimising this loop into a modulo operation. */
  93. asm("" : "+rm"(dividend));
  94. dividend -= divisor;
  95. ret++;
  96. }
  97. *remainder = dividend;
  98. return ret;
  99. }
  100. #endif /* _LINUX_MATH64_H */