delay_32.c 2.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108
  1. /*
  2. * Precise Delay Loops for i386
  3. *
  4. * Copyright (C) 1993 Linus Torvalds
  5. * Copyright (C) 1997 Martin Mares <mj@atrey.karlin.mff.cuni.cz>
  6. *
  7. * The __delay function must _NOT_ be inlined as its execution time
  8. * depends wildly on alignment on many x86 processors. The additional
  9. * jump magic is needed to get the timing stable on all the CPU's
  10. * we have to worry about.
  11. */
  12. #include <linux/module.h>
  13. #include <linux/sched.h>
  14. #include <linux/timex.h>
  15. #include <linux/preempt.h>
  16. #include <linux/delay.h>
  17. #include <linux/init.h>
  18. #include <asm/processor.h>
  19. #include <asm/delay.h>
  20. #include <asm/timer.h>
  21. #ifdef CONFIG_SMP
  22. # include <asm/smp.h>
  23. #endif
  24. /* simple loop based delay: */
  25. static void delay_loop(unsigned long loops)
  26. {
  27. int d0;
  28. __asm__ __volatile__(
  29. "\tjmp 1f\n"
  30. ".align 16\n"
  31. "1:\tjmp 2f\n"
  32. ".align 16\n"
  33. "2:\tdecl %0\n\tjns 2b"
  34. :"=&a" (d0)
  35. :"0" (loops));
  36. }
  37. /* TSC based delay: */
  38. static void delay_tsc(unsigned long loops)
  39. {
  40. unsigned long bclock, now;
  41. preempt_disable(); /* TSC's are per-cpu */
  42. rdtscl(bclock);
  43. do {
  44. rep_nop();
  45. rdtscl(now);
  46. } while ((now-bclock) < loops);
  47. preempt_enable();
  48. }
  49. /*
  50. * Since we calibrate only once at boot, this
  51. * function should be set once at boot and not changed
  52. */
  53. static void (*delay_fn)(unsigned long) = delay_loop;
  54. void use_tsc_delay(void)
  55. {
  56. delay_fn = delay_tsc;
  57. }
  58. int __devinit read_current_timer(unsigned long *timer_val)
  59. {
  60. if (delay_fn == delay_tsc) {
  61. rdtscl(*timer_val);
  62. return 0;
  63. }
  64. return -1;
  65. }
  66. void __delay(unsigned long loops)
  67. {
  68. delay_fn(loops);
  69. }
  70. inline void __const_udelay(unsigned long xloops)
  71. {
  72. int d0;
  73. xloops *= 4;
  74. __asm__("mull %0"
  75. :"=d" (xloops), "=&a" (d0)
  76. :"1" (xloops), "0"
  77. (cpu_data(raw_smp_processor_id()).loops_per_jiffy * (HZ/4)));
  78. __delay(++xloops);
  79. }
  80. void __udelay(unsigned long usecs)
  81. {
  82. __const_udelay(usecs * 0x000010c7); /* 2**32 / 1000000 (rounded up) */
  83. }
  84. void __ndelay(unsigned long nsecs)
  85. {
  86. __const_udelay(nsecs * 0x00005); /* 2**32 / 1000000000 (rounded up) */
  87. }
  88. EXPORT_SYMBOL(__delay);
  89. EXPORT_SYMBOL(__const_udelay);
  90. EXPORT_SYMBOL(__udelay);
  91. EXPORT_SYMBOL(__ndelay);