|
@@ -4889,18 +4889,15 @@ static void nohz_balancer_kick(int cpu)
|
|
|
return;
|
|
|
}
|
|
|
|
|
|
- if (!cpu_rq(ilb_cpu)->nohz_balance_kick) {
|
|
|
- cpu_rq(ilb_cpu)->nohz_balance_kick = 1;
|
|
|
-
|
|
|
- smp_mb();
|
|
|
- /*
|
|
|
- * Use smp_send_reschedule() instead of resched_cpu().
|
|
|
- * This way we generate a sched IPI on the target cpu which
|
|
|
- * is idle. And the softirq performing nohz idle load balance
|
|
|
- * will be run before returning from the IPI.
|
|
|
- */
|
|
|
- smp_send_reschedule(ilb_cpu);
|
|
|
- }
|
|
|
+ if (test_and_set_bit(NOHZ_BALANCE_KICK, nohz_flags(cpu)))
|
|
|
+ return;
|
|
|
+ /*
|
|
|
+ * Use smp_send_reschedule() instead of resched_cpu().
|
|
|
+ * This way we generate a sched IPI on the target cpu which
|
|
|
+ * is idle. And the softirq performing nohz idle load balance
|
|
|
+ * will be run before returning from the IPI.
|
|
|
+ */
|
|
|
+ smp_send_reschedule(ilb_cpu);
|
|
|
return;
|
|
|
}
|
|
|
|
|
@@ -4964,6 +4961,8 @@ void select_nohz_load_balancer(int stop_tick)
|
|
|
}
|
|
|
return;
|
|
|
}
|
|
|
+
|
|
|
+ set_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu));
|
|
|
} else {
|
|
|
if (!cpumask_test_cpu(cpu, nohz.idle_cpus_mask))
|
|
|
return;
|
|
@@ -5079,8 +5078,9 @@ static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle)
|
|
|
struct rq *rq;
|
|
|
int balance_cpu;
|
|
|
|
|
|
- if (idle != CPU_IDLE || !this_rq->nohz_balance_kick)
|
|
|
- return;
|
|
|
+ if (idle != CPU_IDLE ||
|
|
|
+ !test_bit(NOHZ_BALANCE_KICK, nohz_flags(this_cpu)))
|
|
|
+ goto end;
|
|
|
|
|
|
for_each_cpu(balance_cpu, nohz.idle_cpus_mask) {
|
|
|
if (balance_cpu == this_cpu)
|
|
@@ -5091,10 +5091,8 @@ static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle)
|
|
|
* work being done for other cpus. Next load
|
|
|
* balancing owner will pick it up.
|
|
|
*/
|
|
|
- if (need_resched()) {
|
|
|
- this_rq->nohz_balance_kick = 0;
|
|
|
+ if (need_resched())
|
|
|
break;
|
|
|
- }
|
|
|
|
|
|
raw_spin_lock_irq(&this_rq->lock);
|
|
|
update_rq_clock(this_rq);
|
|
@@ -5108,7 +5106,8 @@ static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle)
|
|
|
this_rq->next_balance = rq->next_balance;
|
|
|
}
|
|
|
nohz.next_balance = this_rq->next_balance;
|
|
|
- this_rq->nohz_balance_kick = 0;
|
|
|
+end:
|
|
|
+ clear_bit(NOHZ_BALANCE_KICK, nohz_flags(this_cpu));
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -5129,10 +5128,17 @@ static inline int nohz_kick_needed(struct rq *rq, int cpu)
|
|
|
int ret;
|
|
|
int first_pick_cpu, second_pick_cpu;
|
|
|
|
|
|
- if (time_before(now, nohz.next_balance))
|
|
|
+ if (unlikely(idle_cpu(cpu)))
|
|
|
return 0;
|
|
|
|
|
|
- if (idle_cpu(cpu))
|
|
|
+ /*
|
|
|
+ * We may be recently in ticked or tickless idle mode. At the first
|
|
|
+ * busy tick after returning from idle, we will update the busy stats.
|
|
|
+ */
|
|
|
+ if (unlikely(test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu))))
|
|
|
+ clear_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu));
|
|
|
+
|
|
|
+ if (time_before(now, nohz.next_balance))
|
|
|
return 0;
|
|
|
|
|
|
first_pick_cpu = atomic_read(&nohz.first_pick_cpu);
|
|
@@ -5196,7 +5202,7 @@ void trigger_load_balance(struct rq *rq, int cpu)
|
|
|
likely(!on_null_domain(cpu)))
|
|
|
raise_softirq(SCHED_SOFTIRQ);
|
|
|
#ifdef CONFIG_NO_HZ
|
|
|
- else if (nohz_kick_needed(rq, cpu) && likely(!on_null_domain(cpu)))
|
|
|
+ if (nohz_kick_needed(rq, cpu) && likely(!on_null_domain(cpu)))
|
|
|
nohz_balancer_kick(cpu);
|
|
|
#endif
|
|
|
}
|