|
@@ -82,7 +82,8 @@ extern unsigned long wall_jiffies;
|
|
|
DEFINE_SPINLOCK(rtc_lock);
|
|
|
EXPORT_SYMBOL(rtc_lock);
|
|
|
|
|
|
-struct timer_opts *cur_timer __read_mostly = &timer_none;
|
|
|
+/* XXX - necessary to keep things compiling. to be removed later */
|
|
|
+u32 pmtmr_ioport;
|
|
|
|
|
|
/*
|
|
|
* This is a special lock that is owned by the CPU and holds the index
|
|
@@ -113,99 +114,19 @@ void rtc_cmos_write(unsigned char val, unsigned char addr)
|
|
|
}
|
|
|
EXPORT_SYMBOL(rtc_cmos_write);
|
|
|
|
|
|
-/*
|
|
|
- * This version of gettimeofday has microsecond resolution
|
|
|
- * and better than microsecond precision on fast x86 machines with TSC.
|
|
|
- */
|
|
|
-void do_gettimeofday(struct timeval *tv)
|
|
|
-{
|
|
|
- unsigned long seq;
|
|
|
- unsigned long usec, sec;
|
|
|
- unsigned long max_ntp_tick;
|
|
|
-
|
|
|
- do {
|
|
|
- unsigned long lost;
|
|
|
-
|
|
|
- seq = read_seqbegin(&xtime_lock);
|
|
|
-
|
|
|
- usec = cur_timer->get_offset();
|
|
|
- lost = jiffies - wall_jiffies;
|
|
|
-
|
|
|
- /*
|
|
|
- * If time_adjust is negative then NTP is slowing the clock
|
|
|
- * so make sure not to go into next possible interval.
|
|
|
- * Better to lose some accuracy than have time go backwards..
|
|
|
- */
|
|
|
- if (unlikely(time_adjust < 0)) {
|
|
|
- max_ntp_tick = (USEC_PER_SEC / HZ) - tickadj;
|
|
|
- usec = min(usec, max_ntp_tick);
|
|
|
-
|
|
|
- if (lost)
|
|
|
- usec += lost * max_ntp_tick;
|
|
|
- }
|
|
|
- else if (unlikely(lost))
|
|
|
- usec += lost * (USEC_PER_SEC / HZ);
|
|
|
-
|
|
|
- sec = xtime.tv_sec;
|
|
|
- usec += (xtime.tv_nsec / 1000);
|
|
|
- } while (read_seqretry(&xtime_lock, seq));
|
|
|
-
|
|
|
- while (usec >= 1000000) {
|
|
|
- usec -= 1000000;
|
|
|
- sec++;
|
|
|
- }
|
|
|
-
|
|
|
- tv->tv_sec = sec;
|
|
|
- tv->tv_usec = usec;
|
|
|
-}
|
|
|
-
|
|
|
-EXPORT_SYMBOL(do_gettimeofday);
|
|
|
-
|
|
|
-int do_settimeofday(struct timespec *tv)
|
|
|
-{
|
|
|
- time_t wtm_sec, sec = tv->tv_sec;
|
|
|
- long wtm_nsec, nsec = tv->tv_nsec;
|
|
|
-
|
|
|
- if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
|
|
|
- return -EINVAL;
|
|
|
-
|
|
|
- write_seqlock_irq(&xtime_lock);
|
|
|
- /*
|
|
|
- * This is revolting. We need to set "xtime" correctly. However, the
|
|
|
- * value in this location is the value at the most recent update of
|
|
|
- * wall time. Discover what correction gettimeofday() would have
|
|
|
- * made, and then undo it!
|
|
|
- */
|
|
|
- nsec -= cur_timer->get_offset() * NSEC_PER_USEC;
|
|
|
- nsec -= (jiffies - wall_jiffies) * TICK_NSEC;
|
|
|
-
|
|
|
- wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec);
|
|
|
- wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec);
|
|
|
-
|
|
|
- set_normalized_timespec(&xtime, sec, nsec);
|
|
|
- set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
|
|
|
-
|
|
|
- ntp_clear();
|
|
|
- write_sequnlock_irq(&xtime_lock);
|
|
|
- clock_was_set();
|
|
|
- return 0;
|
|
|
-}
|
|
|
-
|
|
|
-EXPORT_SYMBOL(do_settimeofday);
|
|
|
-
|
|
|
static int set_rtc_mmss(unsigned long nowtime)
|
|
|
{
|
|
|
int retval;
|
|
|
-
|
|
|
- WARN_ON(irqs_disabled());
|
|
|
+ unsigned long flags;
|
|
|
|
|
|
/* gets recalled with irq locally disabled */
|
|
|
- spin_lock_irq(&rtc_lock);
|
|
|
+ /* XXX - does irqsave resolve this? -johnstul */
|
|
|
+ spin_lock_irqsave(&rtc_lock, flags);
|
|
|
if (efi_enabled)
|
|
|
retval = efi_set_rtc_mmss(nowtime);
|
|
|
else
|
|
|
retval = mach_set_rtc_mmss(nowtime);
|
|
|
- spin_unlock_irq(&rtc_lock);
|
|
|
+ spin_unlock_irqrestore(&rtc_lock, flags);
|
|
|
|
|
|
return retval;
|
|
|
}
|
|
@@ -213,16 +134,6 @@ static int set_rtc_mmss(unsigned long nowtime)
|
|
|
|
|
|
int timer_ack;
|
|
|
|
|
|
-/* monotonic_clock(): returns # of nanoseconds passed since time_init()
|
|
|
- * Note: This function is required to return accurate
|
|
|
- * time even in the absence of multiple timer ticks.
|
|
|
- */
|
|
|
-unsigned long long monotonic_clock(void)
|
|
|
-{
|
|
|
- return cur_timer->monotonic_clock();
|
|
|
-}
|
|
|
-EXPORT_SYMBOL(monotonic_clock);
|
|
|
-
|
|
|
#if defined(CONFIG_SMP) && defined(CONFIG_FRAME_POINTER)
|
|
|
unsigned long profile_pc(struct pt_regs *regs)
|
|
|
{
|
|
@@ -237,11 +148,21 @@ EXPORT_SYMBOL(profile_pc);
|
|
|
#endif
|
|
|
|
|
|
/*
|
|
|
- * timer_interrupt() needs to keep up the real-time clock,
|
|
|
- * as well as call the "do_timer()" routine every clocktick
|
|
|
+ * This is the same as the above, except we _also_ save the current
|
|
|
+ * Time Stamp Counter value at the time of the timer interrupt, so that
|
|
|
+ * we later on can estimate the time of day more exactly.
|
|
|
*/
|
|
|
-static inline void do_timer_interrupt(int irq, struct pt_regs *regs)
|
|
|
+irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
|
|
|
{
|
|
|
+ /*
|
|
|
+ * Here we are in the timer irq handler. We just have irqs locally
|
|
|
+ * disabled but we don't know if the timer_bh is running on the other
|
|
|
+ * CPU. We need to avoid to SMP race with it. NOTE: we don' t need
|
|
|
+ * the irq version of write_lock because as just said we have irq
|
|
|
+ * locally disabled. -arca
|
|
|
+ */
|
|
|
+ write_seqlock(&xtime_lock);
|
|
|
+
|
|
|
#ifdef CONFIG_X86_IO_APIC
|
|
|
if (timer_ack) {
|
|
|
/*
|
|
@@ -274,27 +195,6 @@ static inline void do_timer_interrupt(int irq, struct pt_regs *regs)
|
|
|
irq = inb_p( 0x61 ); /* read the current state */
|
|
|
outb_p( irq|0x80, 0x61 ); /* reset the IRQ */
|
|
|
}
|
|
|
-}
|
|
|
-
|
|
|
-/*
|
|
|
- * This is the same as the above, except we _also_ save the current
|
|
|
- * Time Stamp Counter value at the time of the timer interrupt, so that
|
|
|
- * we later on can estimate the time of day more exactly.
|
|
|
- */
|
|
|
-irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
|
|
|
-{
|
|
|
- /*
|
|
|
- * Here we are in the timer irq handler. We just have irqs locally
|
|
|
- * disabled but we don't know if the timer_bh is running on the other
|
|
|
- * CPU. We need to avoid to SMP race with it. NOTE: we don' t need
|
|
|
- * the irq version of write_lock because as just said we have irq
|
|
|
- * locally disabled. -arca
|
|
|
- */
|
|
|
- write_seqlock(&xtime_lock);
|
|
|
-
|
|
|
- cur_timer->mark_offset();
|
|
|
-
|
|
|
- do_timer_interrupt(irq, regs);
|
|
|
|
|
|
write_sequnlock(&xtime_lock);
|
|
|
|
|
@@ -375,7 +275,6 @@ void notify_arch_cmos_timer(void)
|
|
|
|
|
|
static long clock_cmos_diff, sleep_start;
|
|
|
|
|
|
-static struct timer_opts *last_timer;
|
|
|
static int timer_suspend(struct sys_device *dev, pm_message_t state)
|
|
|
{
|
|
|
/*
|
|
@@ -384,10 +283,6 @@ static int timer_suspend(struct sys_device *dev, pm_message_t state)
|
|
|
clock_cmos_diff = -get_cmos_time();
|
|
|
clock_cmos_diff += get_seconds();
|
|
|
sleep_start = get_cmos_time();
|
|
|
- last_timer = cur_timer;
|
|
|
- cur_timer = &timer_none;
|
|
|
- if (last_timer->suspend)
|
|
|
- last_timer->suspend(state);
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
@@ -410,10 +305,6 @@ static int timer_resume(struct sys_device *dev)
|
|
|
jiffies_64 += sleep_length;
|
|
|
wall_jiffies += sleep_length;
|
|
|
write_sequnlock_irqrestore(&xtime_lock, flags);
|
|
|
- if (last_timer->resume)
|
|
|
- last_timer->resume();
|
|
|
- cur_timer = last_timer;
|
|
|
- last_timer = NULL;
|
|
|
touch_softlockup_watchdog();
|
|
|
return 0;
|
|
|
}
|
|
@@ -455,9 +346,6 @@ static void __init hpet_time_init(void)
|
|
|
printk("Using HPET for base-timer\n");
|
|
|
}
|
|
|
|
|
|
- cur_timer = select_timer();
|
|
|
- printk(KERN_INFO "Using %s for high-res timesource\n",cur_timer->name);
|
|
|
-
|
|
|
time_init_hook();
|
|
|
}
|
|
|
#endif
|
|
@@ -479,8 +367,5 @@ void __init time_init(void)
|
|
|
set_normalized_timespec(&wall_to_monotonic,
|
|
|
-xtime.tv_sec, -xtime.tv_nsec);
|
|
|
|
|
|
- cur_timer = select_timer();
|
|
|
- printk(KERN_INFO "Using %s for high-res timesource\n",cur_timer->name);
|
|
|
-
|
|
|
time_init_hook();
|
|
|
}
|