time_64.c 4.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170
  1. /*
  2. * Copyright (c) 1991,1992,1995 Linus Torvalds
  3. * Copyright (c) 1994 Alan Modra
  4. * Copyright (c) 1995 Markus Kuhn
  5. * Copyright (c) 1996 Ingo Molnar
  6. * Copyright (c) 1998 Andrea Arcangeli
  7. * Copyright (c) 2002,2006 Vojtech Pavlik
  8. * Copyright (c) 2003 Andi Kleen
  9. *
  10. */
  11. #include <linux/clockchips.h>
  12. #include <linux/interrupt.h>
  13. #include <linux/time.h>
  14. #include <linux/mca.h>
  15. #include <asm/vsyscall.h>
  16. #include <asm/x86_init.h>
  17. #include <asm/i8259.h>
  18. #include <asm/i8253.h>
  19. #include <asm/timer.h>
  20. #include <asm/hpet.h>
  21. #include <asm/time.h>
  22. #include <asm/nmi.h>
  23. #if defined(CONFIG_X86_32) && defined(CONFIG_X86_IO_APIC)
  24. int timer_ack;
  25. #endif
  26. #ifdef CONFIG_X86_64
  27. volatile unsigned long __jiffies __section_jiffies = INITIAL_JIFFIES;
  28. #endif
  29. unsigned long profile_pc(struct pt_regs *regs)
  30. {
  31. unsigned long pc = instruction_pointer(regs);
  32. /* Assume the lock function has either no stack frame or a copy
  33. of flags from PUSHF
  34. Eflags always has bits 22 and up cleared unlike kernel addresses. */
  35. if (!user_mode_vm(regs) && in_lock_functions(pc)) {
  36. #ifdef CONFIG_FRAME_POINTER
  37. return *(unsigned long *)(regs->bp + sizeof(long));
  38. #else
  39. unsigned long *sp = (unsigned long *)regs->sp;
  40. if (sp[0] >> 22)
  41. return sp[0];
  42. if (sp[1] >> 22)
  43. return sp[1];
  44. #endif
  45. }
  46. return pc;
  47. }
  48. EXPORT_SYMBOL(profile_pc);
  49. /*
  50. * Default timer interrupt handler for PIT/HPET
  51. */
  52. static irqreturn_t timer_interrupt(int irq, void *dev_id)
  53. {
  54. /* Keep nmi watchdog up to date */
  55. inc_irq_stat(irq0_irqs);
  56. /* Optimized out for !IO_APIC and x86_64 */
  57. if (timer_ack) {
  58. /*
  59. * Subtle, when I/O APICs are used we have to ack timer IRQ
  60. * manually to deassert NMI lines for the watchdog if run
  61. * on an 82489DX-based system.
  62. */
  63. spin_lock(&i8259A_lock);
  64. outb(0x0c, PIC_MASTER_OCW3);
  65. /* Ack the IRQ; AEOI will end it automatically. */
  66. inb(PIC_MASTER_POLL);
  67. spin_unlock(&i8259A_lock);
  68. }
  69. global_clock_event->event_handler(global_clock_event);
  70. /* MCA bus quirk: Acknowledge irq0 by setting bit 7 in port 0x61 */
  71. if (MCA_bus)
  72. outb_p(inb_p(0x61)| 0x80, 0x61);
  73. return IRQ_HANDLED;
  74. }
  75. /*
  76. * calibrate_cpu is used on systems with fixed rate TSCs to determine
  77. * processor frequency
  78. */
  79. #define TICK_COUNT 100000000
  80. unsigned long __init calibrate_cpu(void)
  81. {
  82. int tsc_start, tsc_now;
  83. int i, no_ctr_free;
  84. unsigned long evntsel3 = 0, pmc3 = 0, pmc_now = 0;
  85. unsigned long flags;
  86. for (i = 0; i < 4; i++)
  87. if (avail_to_resrv_perfctr_nmi_bit(i))
  88. break;
  89. no_ctr_free = (i == 4);
  90. if (no_ctr_free) {
  91. WARN(1, KERN_WARNING "Warning: AMD perfctrs busy ... "
  92. "cpu_khz value may be incorrect.\n");
  93. i = 3;
  94. rdmsrl(MSR_K7_EVNTSEL3, evntsel3);
  95. wrmsrl(MSR_K7_EVNTSEL3, 0);
  96. rdmsrl(MSR_K7_PERFCTR3, pmc3);
  97. } else {
  98. reserve_perfctr_nmi(MSR_K7_PERFCTR0 + i);
  99. reserve_evntsel_nmi(MSR_K7_EVNTSEL0 + i);
  100. }
  101. local_irq_save(flags);
  102. /* start measuring cycles, incrementing from 0 */
  103. wrmsrl(MSR_K7_PERFCTR0 + i, 0);
  104. wrmsrl(MSR_K7_EVNTSEL0 + i, 1 << 22 | 3 << 16 | 0x76);
  105. rdtscl(tsc_start);
  106. do {
  107. rdmsrl(MSR_K7_PERFCTR0 + i, pmc_now);
  108. tsc_now = get_cycles();
  109. } while ((tsc_now - tsc_start) < TICK_COUNT);
  110. local_irq_restore(flags);
  111. if (no_ctr_free) {
  112. wrmsrl(MSR_K7_EVNTSEL3, 0);
  113. wrmsrl(MSR_K7_PERFCTR3, pmc3);
  114. wrmsrl(MSR_K7_EVNTSEL3, evntsel3);
  115. } else {
  116. release_perfctr_nmi(MSR_K7_PERFCTR0 + i);
  117. release_evntsel_nmi(MSR_K7_EVNTSEL0 + i);
  118. }
  119. return pmc_now * tsc_khz / (tsc_now - tsc_start);
  120. }
  121. static struct irqaction irq0 = {
  122. .handler = timer_interrupt,
  123. .flags = IRQF_DISABLED | IRQF_NOBALANCING | IRQF_IRQPOLL | IRQF_TIMER,
  124. .name = "timer"
  125. };
  126. void __init setup_default_timer_irq(void)
  127. {
  128. irq0.mask = cpumask_of_cpu(0);
  129. setup_irq(0, &irq0);
  130. }
  131. /* Default timer init function */
  132. void __init hpet_time_init(void)
  133. {
  134. if (!hpet_enable())
  135. setup_pit_timer();
  136. setup_default_timer_irq();
  137. }
  138. static void x86_late_time_init(void)
  139. {
  140. x86_init.timers.timer_init();
  141. }
  142. /*
  143. * Initialize TSC and delay the periodic timer init to
  144. * late x86_late_time_init() so ioremap works.
  145. */
  146. void __init time_init(void)
  147. {
  148. tsc_init();
  149. late_time_init = x86_late_time_init;
  150. }