|
@@ -31,7 +31,7 @@
|
|
|
static DEFINE_PER_CPU(struct tick_sched, tick_cpu_sched);
|
|
|
|
|
|
/*
|
|
|
- * The time, when the last jiffy update happened. Protected by xtime_lock.
|
|
|
+ * The time, when the last jiffy update happened. Protected by jiffies_lock.
|
|
|
*/
|
|
|
static ktime_t last_jiffies_update;
|
|
|
|
|
@@ -49,14 +49,14 @@ static void tick_do_update_jiffies64(ktime_t now)
|
|
|
ktime_t delta;
|
|
|
|
|
|
/*
|
|
|
- * Do a quick check without holding xtime_lock:
|
|
|
+ * Do a quick check without holding jiffies_lock:
|
|
|
*/
|
|
|
delta = ktime_sub(now, last_jiffies_update);
|
|
|
if (delta.tv64 < tick_period.tv64)
|
|
|
return;
|
|
|
|
|
|
- /* Reevalute with xtime_lock held */
|
|
|
- write_seqlock(&xtime_lock);
|
|
|
+ /* Reevalute with jiffies_lock held */
|
|
|
+ write_seqlock(&jiffies_lock);
|
|
|
|
|
|
delta = ktime_sub(now, last_jiffies_update);
|
|
|
if (delta.tv64 >= tick_period.tv64) {
|
|
@@ -79,7 +79,7 @@ static void tick_do_update_jiffies64(ktime_t now)
|
|
|
/* Keep the tick_next_period variable up to date */
|
|
|
tick_next_period = ktime_add(last_jiffies_update, tick_period);
|
|
|
}
|
|
|
- write_sequnlock(&xtime_lock);
|
|
|
+ write_sequnlock(&jiffies_lock);
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -89,15 +89,58 @@ static ktime_t tick_init_jiffy_update(void)
|
|
|
{
|
|
|
ktime_t period;
|
|
|
|
|
|
- write_seqlock(&xtime_lock);
|
|
|
+ write_seqlock(&jiffies_lock);
|
|
|
/* Did we start the jiffies update yet ? */
|
|
|
if (last_jiffies_update.tv64 == 0)
|
|
|
last_jiffies_update = tick_next_period;
|
|
|
period = last_jiffies_update;
|
|
|
- write_sequnlock(&xtime_lock);
|
|
|
+ write_sequnlock(&jiffies_lock);
|
|
|
return period;
|
|
|
}
|
|
|
|
|
|
+
|
|
|
+static void tick_sched_do_timer(ktime_t now)
|
|
|
+{
|
|
|
+ int cpu = smp_processor_id();
|
|
|
+
|
|
|
+#ifdef CONFIG_NO_HZ
|
|
|
+ /*
|
|
|
+ * Check if the do_timer duty was dropped. We don't care about
|
|
|
+ * concurrency: This happens only when the cpu in charge went
|
|
|
+ * into a long sleep. If two cpus happen to assign themself to
|
|
|
+ * this duty, then the jiffies update is still serialized by
|
|
|
+ * jiffies_lock.
|
|
|
+ */
|
|
|
+ if (unlikely(tick_do_timer_cpu == TICK_DO_TIMER_NONE))
|
|
|
+ tick_do_timer_cpu = cpu;
|
|
|
+#endif
|
|
|
+
|
|
|
+ /* Check, if the jiffies need an update */
|
|
|
+ if (tick_do_timer_cpu == cpu)
|
|
|
+ tick_do_update_jiffies64(now);
|
|
|
+}
|
|
|
+
|
|
|
+static void tick_sched_handle(struct tick_sched *ts, struct pt_regs *regs)
|
|
|
+{
|
|
|
+#ifdef CONFIG_NO_HZ
|
|
|
+ /*
|
|
|
+ * When we are idle and the tick is stopped, we have to touch
|
|
|
+ * the watchdog as we might not schedule for a really long
|
|
|
+ * time. This happens on complete idle SMP systems while
|
|
|
+ * waiting on the login prompt. We also increment the "start of
|
|
|
+ * idle" jiffy stamp so the idle accounting adjustment we do
|
|
|
+ * when we go busy again does not account too much ticks.
|
|
|
+ */
|
|
|
+ if (ts->tick_stopped) {
|
|
|
+ touch_softlockup_watchdog();
|
|
|
+ if (is_idle_task(current))
|
|
|
+ ts->idle_jiffies++;
|
|
|
+ }
|
|
|
+#endif
|
|
|
+ update_process_times(user_mode(regs));
|
|
|
+ profile_tick(CPU_PROFILING);
|
|
|
+}
|
|
|
+
|
|
|
/*
|
|
|
* NOHZ - aka dynamic tick functionality
|
|
|
*/
|
|
@@ -282,11 +325,11 @@ static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts,
|
|
|
|
|
|
/* Read jiffies and the time when jiffies were updated last */
|
|
|
do {
|
|
|
- seq = read_seqbegin(&xtime_lock);
|
|
|
+ seq = read_seqbegin(&jiffies_lock);
|
|
|
last_update = last_jiffies_update;
|
|
|
last_jiffies = jiffies;
|
|
|
time_delta = timekeeping_max_deferment();
|
|
|
- } while (read_seqretry(&xtime_lock, seq));
|
|
|
+ } while (read_seqretry(&jiffies_lock, seq));
|
|
|
|
|
|
if (rcu_needs_cpu(cpu, &rcu_delta_jiffies) || printk_needs_cpu(cpu) ||
|
|
|
arch_needs_cpu(cpu)) {
|
|
@@ -652,40 +695,12 @@ static void tick_nohz_handler(struct clock_event_device *dev)
|
|
|
{
|
|
|
struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched);
|
|
|
struct pt_regs *regs = get_irq_regs();
|
|
|
- int cpu = smp_processor_id();
|
|
|
ktime_t now = ktime_get();
|
|
|
|
|
|
dev->next_event.tv64 = KTIME_MAX;
|
|
|
|
|
|
- /*
|
|
|
- * Check if the do_timer duty was dropped. We don't care about
|
|
|
- * concurrency: This happens only when the cpu in charge went
|
|
|
- * into a long sleep. If two cpus happen to assign themself to
|
|
|
- * this duty, then the jiffies update is still serialized by
|
|
|
- * xtime_lock.
|
|
|
- */
|
|
|
- if (unlikely(tick_do_timer_cpu == TICK_DO_TIMER_NONE))
|
|
|
- tick_do_timer_cpu = cpu;
|
|
|
-
|
|
|
- /* Check, if the jiffies need an update */
|
|
|
- if (tick_do_timer_cpu == cpu)
|
|
|
- tick_do_update_jiffies64(now);
|
|
|
-
|
|
|
- /*
|
|
|
- * When we are idle and the tick is stopped, we have to touch
|
|
|
- * the watchdog as we might not schedule for a really long
|
|
|
- * time. This happens on complete idle SMP systems while
|
|
|
- * waiting on the login prompt. We also increment the "start
|
|
|
- * of idle" jiffy stamp so the idle accounting adjustment we
|
|
|
- * do when we go busy again does not account too much ticks.
|
|
|
- */
|
|
|
- if (ts->tick_stopped) {
|
|
|
- touch_softlockup_watchdog();
|
|
|
- ts->idle_jiffies++;
|
|
|
- }
|
|
|
-
|
|
|
- update_process_times(user_mode(regs));
|
|
|
- profile_tick(CPU_PROFILING);
|
|
|
+ tick_sched_do_timer(now);
|
|
|
+ tick_sched_handle(ts, regs);
|
|
|
|
|
|
while (tick_nohz_reprogram(ts, now)) {
|
|
|
now = ktime_get();
|
|
@@ -806,45 +821,15 @@ static enum hrtimer_restart tick_sched_timer(struct hrtimer *timer)
|
|
|
container_of(timer, struct tick_sched, sched_timer);
|
|
|
struct pt_regs *regs = get_irq_regs();
|
|
|
ktime_t now = ktime_get();
|
|
|
- int cpu = smp_processor_id();
|
|
|
|
|
|
-#ifdef CONFIG_NO_HZ
|
|
|
- /*
|
|
|
- * Check if the do_timer duty was dropped. We don't care about
|
|
|
- * concurrency: This happens only when the cpu in charge went
|
|
|
- * into a long sleep. If two cpus happen to assign themself to
|
|
|
- * this duty, then the jiffies update is still serialized by
|
|
|
- * xtime_lock.
|
|
|
- */
|
|
|
- if (unlikely(tick_do_timer_cpu == TICK_DO_TIMER_NONE))
|
|
|
- tick_do_timer_cpu = cpu;
|
|
|
-#endif
|
|
|
-
|
|
|
- /* Check, if the jiffies need an update */
|
|
|
- if (tick_do_timer_cpu == cpu)
|
|
|
- tick_do_update_jiffies64(now);
|
|
|
+ tick_sched_do_timer(now);
|
|
|
|
|
|
/*
|
|
|
* Do not call, when we are not in irq context and have
|
|
|
* no valid regs pointer
|
|
|
*/
|
|
|
- if (regs) {
|
|
|
- /*
|
|
|
- * When we are idle and the tick is stopped, we have to touch
|
|
|
- * the watchdog as we might not schedule for a really long
|
|
|
- * time. This happens on complete idle SMP systems while
|
|
|
- * waiting on the login prompt. We also increment the "start of
|
|
|
- * idle" jiffy stamp so the idle accounting adjustment we do
|
|
|
- * when we go busy again does not account too much ticks.
|
|
|
- */
|
|
|
- if (ts->tick_stopped) {
|
|
|
- touch_softlockup_watchdog();
|
|
|
- if (is_idle_task(current))
|
|
|
- ts->idle_jiffies++;
|
|
|
- }
|
|
|
- update_process_times(user_mode(regs));
|
|
|
- profile_tick(CPU_PROFILING);
|
|
|
- }
|
|
|
+ if (regs)
|
|
|
+ tick_sched_handle(ts, regs);
|
|
|
|
|
|
hrtimer_forward(timer, now, tick_period);
|
|
|
|
|
@@ -878,7 +863,7 @@ void tick_setup_sched_timer(void)
|
|
|
/* Get the next period (per cpu) */
|
|
|
hrtimer_set_expires(&ts->sched_timer, tick_init_jiffy_update());
|
|
|
|
|
|
- /* Offset the tick to avert xtime_lock contention. */
|
|
|
+ /* Offset the tick to avert jiffies_lock contention. */
|
|
|
if (sched_skew_tick) {
|
|
|
u64 offset = ktime_to_ns(tick_period) >> 1;
|
|
|
do_div(offset, num_possible_cpus());
|