time_64.c 3.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131
  1. /*
  2. * "High Precision Event Timer" based timekeeping.
  3. *
  4. * Copyright (c) 1991,1992,1995 Linus Torvalds
  5. * Copyright (c) 1994 Alan Modra
  6. * Copyright (c) 1995 Markus Kuhn
  7. * Copyright (c) 1996 Ingo Molnar
  8. * Copyright (c) 1998 Andrea Arcangeli
  9. * Copyright (c) 2002,2006 Vojtech Pavlik
  10. * Copyright (c) 2003 Andi Kleen
  11. * RTC support code taken from arch/i386/kernel/timers/time_hpet.c
  12. */
  13. #include <linux/clockchips.h>
  14. #include <linux/init.h>
  15. #include <linux/interrupt.h>
  16. #include <linux/module.h>
  17. #include <linux/time.h>
  18. #include <asm/i8253.h>
  19. #include <asm/hpet.h>
  20. #include <asm/nmi.h>
  21. #include <asm/vgtod.h>
  22. #include <asm/time.h>
  23. #include <asm/timer.h>
  24. volatile unsigned long __jiffies __section_jiffies = INITIAL_JIFFIES;
  25. unsigned long profile_pc(struct pt_regs *regs)
  26. {
  27. unsigned long pc = instruction_pointer(regs);
  28. /* Assume the lock function has either no stack frame or a copy
  29. of flags from PUSHF
  30. Eflags always has bits 22 and up cleared unlike kernel addresses. */
  31. if (!user_mode_vm(regs) && in_lock_functions(pc)) {
  32. #ifdef CONFIG_FRAME_POINTER
  33. return *(unsigned long *)(regs->bp + sizeof(long));
  34. #else
  35. unsigned long *sp = (unsigned long *)regs->sp;
  36. if (sp[0] >> 22)
  37. return sp[0];
  38. if (sp[1] >> 22)
  39. return sp[1];
  40. #endif
  41. }
  42. return pc;
  43. }
  44. EXPORT_SYMBOL(profile_pc);
  45. static irqreturn_t timer_event_interrupt(int irq, void *dev_id)
  46. {
  47. add_pda(irq0_irqs, 1);
  48. global_clock_event->event_handler(global_clock_event);
  49. return IRQ_HANDLED;
  50. }
  51. /* calibrate_cpu is used on systems with fixed rate TSCs to determine
  52. * processor frequency */
  53. #define TICK_COUNT 100000000
  54. unsigned long __init calibrate_cpu(void)
  55. {
  56. int tsc_start, tsc_now;
  57. int i, no_ctr_free;
  58. unsigned long evntsel3 = 0, pmc3 = 0, pmc_now = 0;
  59. unsigned long flags;
  60. for (i = 0; i < 4; i++)
  61. if (avail_to_resrv_perfctr_nmi_bit(i))
  62. break;
  63. no_ctr_free = (i == 4);
  64. if (no_ctr_free) {
  65. i = 3;
  66. rdmsrl(MSR_K7_EVNTSEL3, evntsel3);
  67. wrmsrl(MSR_K7_EVNTSEL3, 0);
  68. rdmsrl(MSR_K7_PERFCTR3, pmc3);
  69. } else {
  70. reserve_perfctr_nmi(MSR_K7_PERFCTR0 + i);
  71. reserve_evntsel_nmi(MSR_K7_EVNTSEL0 + i);
  72. }
  73. local_irq_save(flags);
  74. /* start measuring cycles, incrementing from 0 */
  75. wrmsrl(MSR_K7_PERFCTR0 + i, 0);
  76. wrmsrl(MSR_K7_EVNTSEL0 + i, 1 << 22 | 3 << 16 | 0x76);
  77. rdtscl(tsc_start);
  78. do {
  79. rdmsrl(MSR_K7_PERFCTR0 + i, pmc_now);
  80. tsc_now = get_cycles();
  81. } while ((tsc_now - tsc_start) < TICK_COUNT);
  82. local_irq_restore(flags);
  83. if (no_ctr_free) {
  84. wrmsrl(MSR_K7_EVNTSEL3, 0);
  85. wrmsrl(MSR_K7_PERFCTR3, pmc3);
  86. wrmsrl(MSR_K7_EVNTSEL3, evntsel3);
  87. } else {
  88. release_perfctr_nmi(MSR_K7_PERFCTR0 + i);
  89. release_evntsel_nmi(MSR_K7_EVNTSEL0 + i);
  90. }
  91. return pmc_now * tsc_khz / (tsc_now - tsc_start);
  92. }
  93. static struct irqaction irq0 = {
  94. .handler = timer_event_interrupt,
  95. .flags = IRQF_DISABLED | IRQF_IRQPOLL | IRQF_NOBALANCING,
  96. .mask = CPU_MASK_NONE,
  97. .name = "timer"
  98. };
  99. void __init hpet_time_init(void)
  100. {
  101. if (!hpet_enable())
  102. setup_pit_timer();
  103. irq0.mask = cpumask_of_cpu(0);
  104. setup_irq(0, &irq0);
  105. }
  106. void __init time_init(void)
  107. {
  108. tsc_init();
  109. if (cpu_has(&boot_cpu_data, X86_FEATURE_RDTSCP))
  110. vgetcpu_mode = VGETCPU_RDTSCP;
  111. else
  112. vgetcpu_mode = VGETCPU_LSL;
  113. late_time_init = choose_time_init();
  114. }