delay.h 2.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * Copyright (C) 1994 by Waldorf Electronics
  7. * Copyright (C) 1995 - 2000, 01, 03 by Ralf Baechle
  8. * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
  9. * Copyright (C) 2007 Maciej W. Rozycki
  10. */
  11. #ifndef _ASM_DELAY_H
  12. #define _ASM_DELAY_H
  13. #include <linux/param.h>
  14. #include <linux/smp.h>
  15. #include <asm/compiler.h>
  16. #include <asm/war.h>
  17. static inline void __delay(unsigned long loops)
  18. {
  19. if (sizeof(long) == 4)
  20. __asm__ __volatile__ (
  21. " .set noreorder \n"
  22. " .align 3 \n"
  23. "1: bnez %0, 1b \n"
  24. " subu %0, 1 \n"
  25. " .set reorder \n"
  26. : "=r" (loops)
  27. : "0" (loops));
  28. else if (sizeof(long) == 8 && !DADDI_WAR)
  29. __asm__ __volatile__ (
  30. " .set noreorder \n"
  31. " .align 3 \n"
  32. "1: bnez %0, 1b \n"
  33. " dsubu %0, 1 \n"
  34. " .set reorder \n"
  35. : "=r" (loops)
  36. : "0" (loops));
  37. else if (sizeof(long) == 8 && DADDI_WAR)
  38. __asm__ __volatile__ (
  39. " .set noreorder \n"
  40. " .align 3 \n"
  41. "1: bnez %0, 1b \n"
  42. " dsubu %0, %2 \n"
  43. " .set reorder \n"
  44. : "=r" (loops)
  45. : "0" (loops), "r" (1));
  46. }
  47. /*
  48. * Division by multiplication: you don't have to worry about
  49. * loss of precision.
  50. *
  51. * Use only for very small delays ( < 1 msec). Should probably use a
  52. * lookup table, really, as the multiplications take much too long with
  53. * short delays. This is a "reasonable" implementation, though (and the
  54. * first constant multiplications gets optimized away if the delay is
  55. * a constant)
  56. */
  57. static inline void __udelay(unsigned long usecs, unsigned long lpj)
  58. {
  59. unsigned long hi, lo;
  60. /*
  61. * The rates of 128 is rounded wrongly by the catchall case
  62. * for 64-bit. Excessive precission? Probably ...
  63. */
  64. #if defined(CONFIG_64BIT) && (HZ == 128)
  65. usecs *= 0x0008637bd05af6c7UL; /* 2**64 / (1000000 / HZ) */
  66. #elif defined(CONFIG_64BIT)
  67. usecs *= (0x8000000000000000UL / (500000 / HZ));
  68. #else /* 32-bit junk follows here */
  69. usecs *= (unsigned long) (((0x8000000000000000ULL / (500000 / HZ)) +
  70. 0x80000000ULL) >> 32);
  71. #endif
  72. if (sizeof(long) == 4)
  73. __asm__("multu\t%2, %3"
  74. : "=h" (usecs), "=l" (lo)
  75. : "r" (usecs), "r" (lpj)
  76. : GCC_REG_ACCUM);
  77. else if (sizeof(long) == 8 && !R4000_WAR)
  78. __asm__("dmultu\t%2, %3"
  79. : "=h" (usecs), "=l" (lo)
  80. : "r" (usecs), "r" (lpj)
  81. : GCC_REG_ACCUM);
  82. else if (sizeof(long) == 8 && R4000_WAR)
  83. __asm__("dmultu\t%3, %4\n\tmfhi\t%0"
  84. : "=r" (usecs), "=h" (hi), "=l" (lo)
  85. : "r" (usecs), "r" (lpj)
  86. : GCC_REG_ACCUM);
  87. __delay(usecs);
  88. }
  89. #define __udelay_val cpu_data[raw_smp_processor_id()].udelay_val
  90. #define udelay(usecs) __udelay((usecs), __udelay_val)
  91. /* make sure "usecs *= ..." in udelay do not overflow. */
  92. #if HZ >= 1000
  93. #define MAX_UDELAY_MS 1
  94. #elif HZ <= 200
  95. #define MAX_UDELAY_MS 5
  96. #else
  97. #define MAX_UDELAY_MS (1000 / HZ)
  98. #endif
  99. #endif /* _ASM_DELAY_H */