|
@@ -1349,194 +1349,6 @@ void __init init_timers(void)
|
|
|
open_softirq(TIMER_SOFTIRQ, run_timer_softirq, NULL);
|
|
|
}
|
|
|
|
|
|
-#ifdef CONFIG_TIME_INTERPOLATION
|
|
|
-
|
|
|
-struct time_interpolator *time_interpolator __read_mostly;
|
|
|
-static struct time_interpolator *time_interpolator_list __read_mostly;
|
|
|
-static DEFINE_SPINLOCK(time_interpolator_lock);
|
|
|
-
|
|
|
-static inline cycles_t time_interpolator_get_cycles(unsigned int src)
|
|
|
-{
|
|
|
- unsigned long (*x)(void);
|
|
|
-
|
|
|
- switch (src)
|
|
|
- {
|
|
|
- case TIME_SOURCE_FUNCTION:
|
|
|
- x = time_interpolator->addr;
|
|
|
- return x();
|
|
|
-
|
|
|
- case TIME_SOURCE_MMIO64 :
|
|
|
- return readq_relaxed((void __iomem *)time_interpolator->addr);
|
|
|
-
|
|
|
- case TIME_SOURCE_MMIO32 :
|
|
|
- return readl_relaxed((void __iomem *)time_interpolator->addr);
|
|
|
-
|
|
|
- default: return get_cycles();
|
|
|
- }
|
|
|
-}
|
|
|
-
|
|
|
-static inline u64 time_interpolator_get_counter(int writelock)
|
|
|
-{
|
|
|
- unsigned int src = time_interpolator->source;
|
|
|
-
|
|
|
- if (time_interpolator->jitter)
|
|
|
- {
|
|
|
- cycles_t lcycle;
|
|
|
- cycles_t now;
|
|
|
-
|
|
|
- do {
|
|
|
- lcycle = time_interpolator->last_cycle;
|
|
|
- now = time_interpolator_get_cycles(src);
|
|
|
- if (lcycle && time_after(lcycle, now))
|
|
|
- return lcycle;
|
|
|
-
|
|
|
- /* When holding the xtime write lock, there's no need
|
|
|
- * to add the overhead of the cmpxchg. Readers are
|
|
|
- * force to retry until the write lock is released.
|
|
|
- */
|
|
|
- if (writelock) {
|
|
|
- time_interpolator->last_cycle = now;
|
|
|
- return now;
|
|
|
- }
|
|
|
- /* Keep track of the last timer value returned. The use of cmpxchg here
|
|
|
- * will cause contention in an SMP environment.
|
|
|
- */
|
|
|
- } while (unlikely(cmpxchg(&time_interpolator->last_cycle, lcycle, now) != lcycle));
|
|
|
- return now;
|
|
|
- }
|
|
|
- else
|
|
|
- return time_interpolator_get_cycles(src);
|
|
|
-}
|
|
|
-
|
|
|
-void time_interpolator_reset(void)
|
|
|
-{
|
|
|
- time_interpolator->offset = 0;
|
|
|
- time_interpolator->last_counter = time_interpolator_get_counter(1);
|
|
|
-}
|
|
|
-
|
|
|
-#define GET_TI_NSECS(count,i) (((((count) - i->last_counter) & (i)->mask) * (i)->nsec_per_cyc) >> (i)->shift)
|
|
|
-
|
|
|
-unsigned long time_interpolator_get_offset(void)
|
|
|
-{
|
|
|
- /* If we do not have a time interpolator set up then just return zero */
|
|
|
- if (!time_interpolator)
|
|
|
- return 0;
|
|
|
-
|
|
|
- return time_interpolator->offset +
|
|
|
- GET_TI_NSECS(time_interpolator_get_counter(0), time_interpolator);
|
|
|
-}
|
|
|
-
|
|
|
-#define INTERPOLATOR_ADJUST 65536
|
|
|
-#define INTERPOLATOR_MAX_SKIP 10*INTERPOLATOR_ADJUST
|
|
|
-
|
|
|
-void time_interpolator_update(long delta_nsec)
|
|
|
-{
|
|
|
- u64 counter;
|
|
|
- unsigned long offset;
|
|
|
-
|
|
|
- /* If there is no time interpolator set up then do nothing */
|
|
|
- if (!time_interpolator)
|
|
|
- return;
|
|
|
-
|
|
|
- /*
|
|
|
- * The interpolator compensates for late ticks by accumulating the late
|
|
|
- * time in time_interpolator->offset. A tick earlier than expected will
|
|
|
- * lead to a reset of the offset and a corresponding jump of the clock
|
|
|
- * forward. Again this only works if the interpolator clock is running
|
|
|
- * slightly slower than the regular clock and the tuning logic insures
|
|
|
- * that.
|
|
|
- */
|
|
|
-
|
|
|
- counter = time_interpolator_get_counter(1);
|
|
|
- offset = time_interpolator->offset +
|
|
|
- GET_TI_NSECS(counter, time_interpolator);
|
|
|
-
|
|
|
- if (delta_nsec < 0 || (unsigned long) delta_nsec < offset)
|
|
|
- time_interpolator->offset = offset - delta_nsec;
|
|
|
- else {
|
|
|
- time_interpolator->skips++;
|
|
|
- time_interpolator->ns_skipped += delta_nsec - offset;
|
|
|
- time_interpolator->offset = 0;
|
|
|
- }
|
|
|
- time_interpolator->last_counter = counter;
|
|
|
-
|
|
|
- /* Tuning logic for time interpolator invoked every minute or so.
|
|
|
- * Decrease interpolator clock speed if no skips occurred and an offset is carried.
|
|
|
- * Increase interpolator clock speed if we skip too much time.
|
|
|
- */
|
|
|
- if (jiffies % INTERPOLATOR_ADJUST == 0)
|
|
|
- {
|
|
|
- if (time_interpolator->skips == 0 && time_interpolator->offset > tick_nsec)
|
|
|
- time_interpolator->nsec_per_cyc--;
|
|
|
- if (time_interpolator->ns_skipped > INTERPOLATOR_MAX_SKIP && time_interpolator->offset == 0)
|
|
|
- time_interpolator->nsec_per_cyc++;
|
|
|
- time_interpolator->skips = 0;
|
|
|
- time_interpolator->ns_skipped = 0;
|
|
|
- }
|
|
|
-}
|
|
|
-
|
|
|
-static inline int
|
|
|
-is_better_time_interpolator(struct time_interpolator *new)
|
|
|
-{
|
|
|
- if (!time_interpolator)
|
|
|
- return 1;
|
|
|
- return new->frequency > 2*time_interpolator->frequency ||
|
|
|
- (unsigned long)new->drift < (unsigned long)time_interpolator->drift;
|
|
|
-}
|
|
|
-
|
|
|
-void
|
|
|
-register_time_interpolator(struct time_interpolator *ti)
|
|
|
-{
|
|
|
- unsigned long flags;
|
|
|
-
|
|
|
- /* Sanity check */
|
|
|
- BUG_ON(ti->frequency == 0 || ti->mask == 0);
|
|
|
-
|
|
|
- ti->nsec_per_cyc = ((u64)NSEC_PER_SEC << ti->shift) / ti->frequency;
|
|
|
- spin_lock(&time_interpolator_lock);
|
|
|
- write_seqlock_irqsave(&xtime_lock, flags);
|
|
|
- if (is_better_time_interpolator(ti)) {
|
|
|
- time_interpolator = ti;
|
|
|
- time_interpolator_reset();
|
|
|
- }
|
|
|
- write_sequnlock_irqrestore(&xtime_lock, flags);
|
|
|
-
|
|
|
- ti->next = time_interpolator_list;
|
|
|
- time_interpolator_list = ti;
|
|
|
- spin_unlock(&time_interpolator_lock);
|
|
|
-}
|
|
|
-
|
|
|
-void
|
|
|
-unregister_time_interpolator(struct time_interpolator *ti)
|
|
|
-{
|
|
|
- struct time_interpolator *curr, **prev;
|
|
|
- unsigned long flags;
|
|
|
-
|
|
|
- spin_lock(&time_interpolator_lock);
|
|
|
- prev = &time_interpolator_list;
|
|
|
- for (curr = *prev; curr; curr = curr->next) {
|
|
|
- if (curr == ti) {
|
|
|
- *prev = curr->next;
|
|
|
- break;
|
|
|
- }
|
|
|
- prev = &curr->next;
|
|
|
- }
|
|
|
-
|
|
|
- write_seqlock_irqsave(&xtime_lock, flags);
|
|
|
- if (ti == time_interpolator) {
|
|
|
- /* we lost the best time-interpolator: */
|
|
|
- time_interpolator = NULL;
|
|
|
- /* find the next-best interpolator */
|
|
|
- for (curr = time_interpolator_list; curr; curr = curr->next)
|
|
|
- if (is_better_time_interpolator(curr))
|
|
|
- time_interpolator = curr;
|
|
|
- time_interpolator_reset();
|
|
|
- }
|
|
|
- write_sequnlock_irqrestore(&xtime_lock, flags);
|
|
|
- spin_unlock(&time_interpolator_lock);
|
|
|
-}
|
|
|
-#endif /* CONFIG_TIME_INTERPOLATION */
|
|
|
-
|
|
|
/**
|
|
|
* msleep - sleep safely even with waitqueue interruptions
|
|
|
* @msecs: Time in milliseconds to sleep for
|