123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081 |
- /*
- * Copyright (C) 2003 Bernardo Innocenti <bernie@develer.com>
- *
- * Based on former do_div() implementation from asm-parisc/div64.h:
- * Copyright (C) 1999 Hewlett-Packard Co
- * Copyright (C) 1999 David Mosberger-Tang <davidm@hpl.hp.com>
- *
- *
- * Generic C version of 64bit/32bit division and modulo, with
- * 64bit result and 32bit remainder.
- *
- * The fast case for (n>>32 == 0) is handled inline by do_div().
- *
- * Code generated for this function might be very inefficient
- * for some CPUs. __div64_32() can be overridden by linking arch-specific
- * assembly versions such as arch/ppc/lib/div64.S and arch/sh/lib/div64.S.
- */
- #include <linux/types.h>
- #include <linux/module.h>
- #include <asm/div64.h>
- /* Not needed on 64bit architectures */
- #if BITS_PER_LONG == 32
- uint32_t __attribute__((weak)) __div64_32(uint64_t *n, uint32_t base)
- {
- uint64_t rem = *n;
- uint64_t b = base;
- uint64_t res, d = 1;
- uint32_t high = rem >> 32;
- /* Reduce the thing a bit first */
- res = 0;
- if (high >= base) {
- high /= base;
- res = (uint64_t) high << 32;
- rem -= (uint64_t) (high*base) << 32;
- }
- while ((int64_t)b > 0 && b < rem) {
- b = b+b;
- d = d+d;
- }
- do {
- if (rem >= b) {
- rem -= b;
- res += d;
- }
- b >>= 1;
- d >>= 1;
- } while (d);
- *n = res;
- return rem;
- }
- EXPORT_SYMBOL(__div64_32);
- /* 64bit divisor, dividend and result. dynamic precision */
- uint64_t div64_64(uint64_t dividend, uint64_t divisor)
- {
- uint32_t high, d;
- high = divisor >> 32;
- if (high) {
- unsigned int shift = fls(high);
- d = divisor >> shift;
- dividend >>= shift;
- } else
- d = divisor;
- do_div(dividend, d);
- return dividend;
- }
- EXPORT_SYMBOL(div64_64);
- #endif /* BITS_PER_LONG == 32 */
|