math64.h 2.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138
  1. #ifndef _LINUX_MATH64_H
  2. #define _LINUX_MATH64_H
  3. #include <linux/types.h>
  4. #include <asm/div64.h>
  5. #if BITS_PER_LONG == 64
  6. #define div64_long(x,y) div64_s64((x),(y))
  7. /**
  8. * div_u64_rem - unsigned 64bit divide with 32bit divisor with remainder
  9. *
  10. * This is commonly provided by 32bit archs to provide an optimized 64bit
  11. * divide.
  12. */
  13. static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
  14. {
  15. *remainder = dividend % divisor;
  16. return dividend / divisor;
  17. }
  18. /**
  19. * div_s64_rem - signed 64bit divide with 32bit divisor with remainder
  20. */
  21. static inline s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
  22. {
  23. *remainder = dividend % divisor;
  24. return dividend / divisor;
  25. }
  26. /**
  27. * div64_u64_rem - unsigned 64bit divide with 64bit divisor
  28. */
  29. static inline u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder)
  30. {
  31. *remainder = dividend % divisor;
  32. return dividend / divisor;
  33. }
  34. /**
  35. * div64_u64 - unsigned 64bit divide with 64bit divisor
  36. */
  37. static inline u64 div64_u64(u64 dividend, u64 divisor)
  38. {
  39. return dividend / divisor;
  40. }
  41. /**
  42. * div64_s64 - signed 64bit divide with 64bit divisor
  43. */
  44. static inline s64 div64_s64(s64 dividend, s64 divisor)
  45. {
  46. return dividend / divisor;
  47. }
  48. #elif BITS_PER_LONG == 32
  49. #define div64_long(x,y) div_s64((x),(y))
  50. #ifndef div_u64_rem
  51. static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
  52. {
  53. *remainder = do_div(dividend, divisor);
  54. return dividend;
  55. }
  56. #endif
  57. #ifndef div_s64_rem
  58. extern s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder);
  59. #endif
  60. #ifndef div64_u64_rem
  61. extern u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder);
  62. #endif
  63. #ifndef div64_u64
  64. static inline u64 div64_u64(u64 dividend, u64 divisor)
  65. {
  66. u64 remainder;
  67. return div64_u64_rem(dividend, divisor, &remainder);
  68. }
  69. #endif
  70. #ifndef div64_s64
  71. extern s64 div64_s64(s64 dividend, s64 divisor);
  72. #endif
  73. #endif /* BITS_PER_LONG */
  74. /**
  75. * div_u64 - unsigned 64bit divide with 32bit divisor
  76. *
  77. * This is the most common 64bit divide and should be used if possible,
  78. * as many 32bit archs can optimize this variant better than a full 64bit
  79. * divide.
  80. */
  81. #ifndef div_u64
  82. static inline u64 div_u64(u64 dividend, u32 divisor)
  83. {
  84. u32 remainder;
  85. return div_u64_rem(dividend, divisor, &remainder);
  86. }
  87. #endif
  88. /**
  89. * div_s64 - signed 64bit divide with 32bit divisor
  90. */
  91. #ifndef div_s64
  92. static inline s64 div_s64(s64 dividend, s32 divisor)
  93. {
  94. s32 remainder;
  95. return div_s64_rem(dividend, divisor, &remainder);
  96. }
  97. #endif
  98. u32 iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder);
  99. static __always_inline u32
  100. __iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder)
  101. {
  102. u32 ret = 0;
  103. while (dividend >= divisor) {
  104. /* The following asm() prevents the compiler from
  105. optimising this loop into a modulo operation. */
  106. asm("" : "+rm"(dividend));
  107. dividend -= divisor;
  108. ret++;
  109. }
  110. *remainder = dividend;
  111. return ret;
  112. }
  113. #endif /* _LINUX_MATH64_H */