|
@@ -546,6 +546,7 @@ void tick_setup_sched_timer(void)
|
|
{
|
|
{
|
|
struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched);
|
|
struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched);
|
|
ktime_t now = ktime_get();
|
|
ktime_t now = ktime_get();
|
|
|
|
+ u64 offset;
|
|
|
|
|
|
/*
|
|
/*
|
|
* Emulate tick processing via per-CPU hrtimers:
|
|
* Emulate tick processing via per-CPU hrtimers:
|
|
@@ -554,8 +555,12 @@ void tick_setup_sched_timer(void)
|
|
ts->sched_timer.function = tick_sched_timer;
|
|
ts->sched_timer.function = tick_sched_timer;
|
|
ts->sched_timer.cb_mode = HRTIMER_CB_IRQSAFE_NO_SOFTIRQ;
|
|
ts->sched_timer.cb_mode = HRTIMER_CB_IRQSAFE_NO_SOFTIRQ;
|
|
|
|
|
|
- /* Get the next period */
|
|
|
|
|
|
+ /* Get the next period (per cpu) */
|
|
ts->sched_timer.expires = tick_init_jiffy_update();
|
|
ts->sched_timer.expires = tick_init_jiffy_update();
|
|
|
|
+ offset = ktime_to_ns(tick_period) >> 1;
|
|
|
|
+ do_div(offset, NR_CPUS);
|
|
|
|
+ offset *= smp_processor_id();
|
|
|
|
+ ts->sched_timer.expires = ktime_add_ns(ts->sched_timer.expires, offset);
|
|
|
|
|
|
for (;;) {
|
|
for (;;) {
|
|
hrtimer_forward(&ts->sched_timer, now, tick_period);
|
|
hrtimer_forward(&ts->sched_timer, now, tick_period);
|