time_64.c 3.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126
  1. /*
  2. * "High Precision Event Timer" based timekeeping.
  3. *
  4. * Copyright (c) 1991,1992,1995 Linus Torvalds
  5. * Copyright (c) 1994 Alan Modra
  6. * Copyright (c) 1995 Markus Kuhn
  7. * Copyright (c) 1996 Ingo Molnar
  8. * Copyright (c) 1998 Andrea Arcangeli
  9. * Copyright (c) 2002,2006 Vojtech Pavlik
  10. * Copyright (c) 2003 Andi Kleen
  11. * RTC support code taken from arch/i386/kernel/timers/time_hpet.c
  12. */
  13. #include <linux/clockchips.h>
  14. #include <linux/init.h>
  15. #include <linux/interrupt.h>
  16. #include <linux/module.h>
  17. #include <linux/time.h>
  18. #include <asm/i8253.h>
  19. #include <asm/hpet.h>
  20. #include <asm/nmi.h>
  21. #include <asm/vgtod.h>
  22. #include <asm/time.h>
  23. #include <asm/timer.h>
  24. volatile unsigned long __jiffies __section_jiffies = INITIAL_JIFFIES;
  25. unsigned long profile_pc(struct pt_regs *regs)
  26. {
  27. unsigned long pc = instruction_pointer(regs);
  28. /* Assume the lock function has either no stack frame or a copy
  29. of flags from PUSHF
  30. Eflags always has bits 22 and up cleared unlike kernel addresses. */
  31. if (!user_mode(regs) && in_lock_functions(pc)) {
  32. unsigned long *sp = (unsigned long *)regs->sp;
  33. if (sp[0] >> 22)
  34. return sp[0];
  35. if (sp[1] >> 22)
  36. return sp[1];
  37. }
  38. return pc;
  39. }
  40. EXPORT_SYMBOL(profile_pc);
  41. static irqreturn_t timer_event_interrupt(int irq, void *dev_id)
  42. {
  43. add_pda(irq0_irqs, 1);
  44. global_clock_event->event_handler(global_clock_event);
  45. return IRQ_HANDLED;
  46. }
  47. /* calibrate_cpu is used on systems with fixed rate TSCs to determine
  48. * processor frequency */
  49. #define TICK_COUNT 100000000
  50. unsigned long __init calibrate_cpu(void)
  51. {
  52. int tsc_start, tsc_now;
  53. int i, no_ctr_free;
  54. unsigned long evntsel3 = 0, pmc3 = 0, pmc_now = 0;
  55. unsigned long flags;
  56. for (i = 0; i < 4; i++)
  57. if (avail_to_resrv_perfctr_nmi_bit(i))
  58. break;
  59. no_ctr_free = (i == 4);
  60. if (no_ctr_free) {
  61. i = 3;
  62. rdmsrl(MSR_K7_EVNTSEL3, evntsel3);
  63. wrmsrl(MSR_K7_EVNTSEL3, 0);
  64. rdmsrl(MSR_K7_PERFCTR3, pmc3);
  65. } else {
  66. reserve_perfctr_nmi(MSR_K7_PERFCTR0 + i);
  67. reserve_evntsel_nmi(MSR_K7_EVNTSEL0 + i);
  68. }
  69. local_irq_save(flags);
  70. /* start measuring cycles, incrementing from 0 */
  71. wrmsrl(MSR_K7_PERFCTR0 + i, 0);
  72. wrmsrl(MSR_K7_EVNTSEL0 + i, 1 << 22 | 3 << 16 | 0x76);
  73. rdtscl(tsc_start);
  74. do {
  75. rdmsrl(MSR_K7_PERFCTR0 + i, pmc_now);
  76. tsc_now = get_cycles();
  77. } while ((tsc_now - tsc_start) < TICK_COUNT);
  78. local_irq_restore(flags);
  79. if (no_ctr_free) {
  80. wrmsrl(MSR_K7_EVNTSEL3, 0);
  81. wrmsrl(MSR_K7_PERFCTR3, pmc3);
  82. wrmsrl(MSR_K7_EVNTSEL3, evntsel3);
  83. } else {
  84. release_perfctr_nmi(MSR_K7_PERFCTR0 + i);
  85. release_evntsel_nmi(MSR_K7_EVNTSEL0 + i);
  86. }
  87. return pmc_now * tsc_khz / (tsc_now - tsc_start);
  88. }
  89. static struct irqaction irq0 = {
  90. .handler = timer_event_interrupt,
  91. .flags = IRQF_DISABLED | IRQF_IRQPOLL | IRQF_NOBALANCING,
  92. .mask = CPU_MASK_NONE,
  93. .name = "timer"
  94. };
  95. void __init hpet_time_init(void)
  96. {
  97. if (!hpet_enable())
  98. setup_pit_timer();
  99. setup_irq(0, &irq0);
  100. }
  101. void __init time_init(void)
  102. {
  103. tsc_init();
  104. if (cpu_has(&boot_cpu_data, X86_FEATURE_RDTSCP))
  105. vgetcpu_mode = VGETCPU_RDTSCP;
  106. else
  107. vgetcpu_mode = VGETCPU_LSL;
  108. late_time_init = choose_time_init();
  109. }