|
@@ -64,17 +64,20 @@ DEFINE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases) =
|
|
|
.clock_base =
|
|
|
{
|
|
|
{
|
|
|
- .index = CLOCK_REALTIME,
|
|
|
- .get_time = &ktime_get_real,
|
|
|
+ .index = HRTIMER_BASE_MONOTONIC,
|
|
|
+ .clockid = CLOCK_MONOTONIC,
|
|
|
+ .get_time = &ktime_get,
|
|
|
.resolution = KTIME_LOW_RES,
|
|
|
},
|
|
|
{
|
|
|
- .index = CLOCK_MONOTONIC,
|
|
|
- .get_time = &ktime_get,
|
|
|
+ .index = HRTIMER_BASE_REALTIME,
|
|
|
+ .clockid = CLOCK_REALTIME,
|
|
|
+ .get_time = &ktime_get_real,
|
|
|
.resolution = KTIME_LOW_RES,
|
|
|
},
|
|
|
{
|
|
|
- .index = CLOCK_BOOTTIME,
|
|
|
+ .index = HRTIMER_BASE_BOOTTIME,
|
|
|
+ .clockid = CLOCK_BOOTTIME,
|
|
|
.get_time = &ktime_get_boottime,
|
|
|
.resolution = KTIME_LOW_RES,
|
|
|
},
|
|
@@ -196,7 +199,7 @@ switch_hrtimer_base(struct hrtimer *timer, struct hrtimer_clock_base *base,
|
|
|
struct hrtimer_cpu_base *new_cpu_base;
|
|
|
int this_cpu = smp_processor_id();
|
|
|
int cpu = hrtimer_get_target(this_cpu, pinned);
|
|
|
- int basenum = hrtimer_clockid_to_base(base->index);
|
|
|
+ int basenum = base->index;
|
|
|
|
|
|
again:
|
|
|
new_cpu_base = &per_cpu(hrtimer_bases, cpu);
|
|
@@ -621,66 +624,6 @@ static int hrtimer_reprogram(struct hrtimer *timer,
|
|
|
return res;
|
|
|
}
|
|
|
|
|
|
-
|
|
|
-/*
|
|
|
- * Retrigger next event is called after clock was set
|
|
|
- *
|
|
|
- * Called with interrupts disabled via on_each_cpu()
|
|
|
- */
|
|
|
-static void retrigger_next_event(void *arg)
|
|
|
-{
|
|
|
- struct hrtimer_cpu_base *base;
|
|
|
- struct timespec realtime_offset, wtm, sleep;
|
|
|
-
|
|
|
- if (!hrtimer_hres_active())
|
|
|
- return;
|
|
|
-
|
|
|
- get_xtime_and_monotonic_and_sleep_offset(&realtime_offset, &wtm,
|
|
|
- &sleep);
|
|
|
- set_normalized_timespec(&realtime_offset, -wtm.tv_sec, -wtm.tv_nsec);
|
|
|
-
|
|
|
- base = &__get_cpu_var(hrtimer_bases);
|
|
|
-
|
|
|
- /* Adjust CLOCK_REALTIME offset */
|
|
|
- raw_spin_lock(&base->lock);
|
|
|
- base->clock_base[HRTIMER_BASE_REALTIME].offset =
|
|
|
- timespec_to_ktime(realtime_offset);
|
|
|
- base->clock_base[HRTIMER_BASE_BOOTTIME].offset =
|
|
|
- timespec_to_ktime(sleep);
|
|
|
-
|
|
|
- hrtimer_force_reprogram(base, 0);
|
|
|
- raw_spin_unlock(&base->lock);
|
|
|
-}
|
|
|
-
|
|
|
-/*
|
|
|
- * Clock realtime was set
|
|
|
- *
|
|
|
- * Change the offset of the realtime clock vs. the monotonic
|
|
|
- * clock.
|
|
|
- *
|
|
|
- * We might have to reprogram the high resolution timer interrupt. On
|
|
|
- * SMP we call the architecture specific code to retrigger _all_ high
|
|
|
- * resolution timer interrupts. On UP we just disable interrupts and
|
|
|
- * call the high resolution interrupt code.
|
|
|
- */
|
|
|
-void clock_was_set(void)
|
|
|
-{
|
|
|
- /* Retrigger the CPU local events everywhere */
|
|
|
- on_each_cpu(retrigger_next_event, NULL, 1);
|
|
|
-}
|
|
|
-
|
|
|
-/*
|
|
|
- * During resume we might have to reprogram the high resolution timer
|
|
|
- * interrupt (on the local CPU):
|
|
|
- */
|
|
|
-void hres_timers_resume(void)
|
|
|
-{
|
|
|
- WARN_ONCE(!irqs_disabled(),
|
|
|
- KERN_INFO "hres_timers_resume() called with IRQs enabled!");
|
|
|
-
|
|
|
- retrigger_next_event(NULL);
|
|
|
-}
|
|
|
-
|
|
|
/*
|
|
|
* Initialize the high resolution related parts of cpu_base
|
|
|
*/
|
|
@@ -714,12 +657,40 @@ static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer,
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
+/*
|
|
|
+ * Retrigger next event is called after clock was set
|
|
|
+ *
|
|
|
+ * Called with interrupts disabled via on_each_cpu()
|
|
|
+ */
|
|
|
+static void retrigger_next_event(void *arg)
|
|
|
+{
|
|
|
+ struct hrtimer_cpu_base *base = &__get_cpu_var(hrtimer_bases);
|
|
|
+ struct timespec realtime_offset, xtim, wtm, sleep;
|
|
|
+
|
|
|
+ if (!hrtimer_hres_active())
|
|
|
+ return;
|
|
|
+
|
|
|
+ /* Optimized out for !HIGH_RES */
|
|
|
+ get_xtime_and_monotonic_and_sleep_offset(&xtim, &wtm, &sleep);
|
|
|
+ set_normalized_timespec(&realtime_offset, -wtm.tv_sec, -wtm.tv_nsec);
|
|
|
+
|
|
|
+ /* Adjust CLOCK_REALTIME offset */
|
|
|
+ raw_spin_lock(&base->lock);
|
|
|
+ base->clock_base[HRTIMER_BASE_REALTIME].offset =
|
|
|
+ timespec_to_ktime(realtime_offset);
|
|
|
+ base->clock_base[HRTIMER_BASE_BOOTTIME].offset =
|
|
|
+ timespec_to_ktime(sleep);
|
|
|
+
|
|
|
+ hrtimer_force_reprogram(base, 0);
|
|
|
+ raw_spin_unlock(&base->lock);
|
|
|
+}
|
|
|
+
|
|
|
/*
|
|
|
* Switch to high resolution mode
|
|
|
*/
|
|
|
static int hrtimer_switch_to_hres(void)
|
|
|
{
|
|
|
- int cpu = smp_processor_id();
|
|
|
+ int i, cpu = smp_processor_id();
|
|
|
struct hrtimer_cpu_base *base = &per_cpu(hrtimer_bases, cpu);
|
|
|
unsigned long flags;
|
|
|
|
|
@@ -735,9 +706,8 @@ static int hrtimer_switch_to_hres(void)
|
|
|
return 0;
|
|
|
}
|
|
|
base->hres_active = 1;
|
|
|
- base->clock_base[HRTIMER_BASE_REALTIME].resolution = KTIME_HIGH_RES;
|
|
|
- base->clock_base[HRTIMER_BASE_MONOTONIC].resolution = KTIME_HIGH_RES;
|
|
|
- base->clock_base[HRTIMER_BASE_BOOTTIME].resolution = KTIME_HIGH_RES;
|
|
|
+ for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++)
|
|
|
+ base->clock_base[i].resolution = KTIME_HIGH_RES;
|
|
|
|
|
|
tick_setup_sched_timer();
|
|
|
|
|
@@ -761,9 +731,43 @@ static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer,
|
|
|
return 0;
|
|
|
}
|
|
|
static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base) { }
|
|
|
+static inline void retrigger_next_event(void *arg) { }
|
|
|
|
|
|
#endif /* CONFIG_HIGH_RES_TIMERS */
|
|
|
|
|
|
+/*
|
|
|
+ * Clock realtime was set
|
|
|
+ *
|
|
|
+ * Change the offset of the realtime clock vs. the monotonic
|
|
|
+ * clock.
|
|
|
+ *
|
|
|
+ * We might have to reprogram the high resolution timer interrupt. On
|
|
|
+ * SMP we call the architecture specific code to retrigger _all_ high
|
|
|
+ * resolution timer interrupts. On UP we just disable interrupts and
|
|
|
+ * call the high resolution interrupt code.
|
|
|
+ */
|
|
|
+void clock_was_set(void)
|
|
|
+{
|
|
|
+#ifdef CONFIG_HIGHRES_TIMERS
|
|
|
+ /* Retrigger the CPU local events everywhere */
|
|
|
+ on_each_cpu(retrigger_next_event, NULL, 1);
|
|
|
+#endif
|
|
|
+ timerfd_clock_was_set();
|
|
|
+}
|
|
|
+
|
|
|
+/*
|
|
|
+ * During resume we might have to reprogram the high resolution timer
|
|
|
+ * interrupt (on the local CPU):
|
|
|
+ */
|
|
|
+void hrtimers_resume(void)
|
|
|
+{
|
|
|
+ WARN_ONCE(!irqs_disabled(),
|
|
|
+ KERN_INFO "hrtimers_resume() called with IRQs enabled!");
|
|
|
+
|
|
|
+ retrigger_next_event(NULL);
|
|
|
+ timerfd_clock_was_set();
|
|
|
+}
|
|
|
+
|
|
|
static inline void timer_stats_hrtimer_set_start_info(struct hrtimer *timer)
|
|
|
{
|
|
|
#ifdef CONFIG_TIMER_STATS
|
|
@@ -856,6 +860,7 @@ static int enqueue_hrtimer(struct hrtimer *timer,
|
|
|
debug_activate(timer);
|
|
|
|
|
|
timerqueue_add(&base->active, &timer->node);
|
|
|
+ base->cpu_base->active_bases |= 1 << base->index;
|
|
|
|
|
|
/*
|
|
|
* HRTIMER_STATE_ENQUEUED is or'ed to the current state to preserve the
|
|
@@ -897,6 +902,8 @@ static void __remove_hrtimer(struct hrtimer *timer,
|
|
|
#endif
|
|
|
}
|
|
|
timerqueue_del(&base->active, &timer->node);
|
|
|
+ if (!timerqueue_getnext(&base->active))
|
|
|
+ base->cpu_base->active_bases &= ~(1 << base->index);
|
|
|
out:
|
|
|
timer->state = newstate;
|
|
|
}
|
|
@@ -1234,7 +1241,6 @@ static void __run_hrtimer(struct hrtimer *timer, ktime_t *now)
|
|
|
void hrtimer_interrupt(struct clock_event_device *dev)
|
|
|
{
|
|
|
struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
|
|
|
- struct hrtimer_clock_base *base;
|
|
|
ktime_t expires_next, now, entry_time, delta;
|
|
|
int i, retries = 0;
|
|
|
|
|
@@ -1256,12 +1262,15 @@ retry:
|
|
|
*/
|
|
|
cpu_base->expires_next.tv64 = KTIME_MAX;
|
|
|
|
|
|
- base = cpu_base->clock_base;
|
|
|
-
|
|
|
for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
|
|
|
- ktime_t basenow;
|
|
|
+ struct hrtimer_clock_base *base;
|
|
|
struct timerqueue_node *node;
|
|
|
+ ktime_t basenow;
|
|
|
+
|
|
|
+ if (!(cpu_base->active_bases & (1 << i)))
|
|
|
+ continue;
|
|
|
|
|
|
+ base = cpu_base->clock_base + i;
|
|
|
basenow = ktime_add(now, base->offset);
|
|
|
|
|
|
while ((node = timerqueue_getnext(&base->active))) {
|
|
@@ -1294,7 +1303,6 @@ retry:
|
|
|
|
|
|
__run_hrtimer(timer, &basenow);
|
|
|
}
|
|
|
- base++;
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -1525,7 +1533,7 @@ long __sched hrtimer_nanosleep_restart(struct restart_block *restart)
|
|
|
struct timespec __user *rmtp;
|
|
|
int ret = 0;
|
|
|
|
|
|
- hrtimer_init_on_stack(&t.timer, restart->nanosleep.index,
|
|
|
+ hrtimer_init_on_stack(&t.timer, restart->nanosleep.clockid,
|
|
|
HRTIMER_MODE_ABS);
|
|
|
hrtimer_set_expires_tv64(&t.timer, restart->nanosleep.expires);
|
|
|
|
|
@@ -1577,7 +1585,7 @@ long hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp,
|
|
|
|
|
|
restart = ¤t_thread_info()->restart_block;
|
|
|
restart->fn = hrtimer_nanosleep_restart;
|
|
|
- restart->nanosleep.index = t.timer.base->index;
|
|
|
+ restart->nanosleep.clockid = t.timer.base->clockid;
|
|
|
restart->nanosleep.rmtp = rmtp;
|
|
|
restart->nanosleep.expires = hrtimer_get_expires_tv64(&t.timer);
|
|
|
|