|
@@ -5396,6 +5396,7 @@ void idle_balance(int this_cpu, struct rq *this_rq)
|
|
|
struct sched_domain *sd;
|
|
|
int pulled_task = 0;
|
|
|
unsigned long next_balance = jiffies + HZ;
|
|
|
+ u64 curr_cost = 0;
|
|
|
|
|
|
this_rq->idle_stamp = rq_clock(this_rq);
|
|
|
|
|
@@ -5412,15 +5413,27 @@ void idle_balance(int this_cpu, struct rq *this_rq)
|
|
|
for_each_domain(this_cpu, sd) {
|
|
|
unsigned long interval;
|
|
|
int continue_balancing = 1;
|
|
|
+ u64 t0, domain_cost;
|
|
|
|
|
|
if (!(sd->flags & SD_LOAD_BALANCE))
|
|
|
continue;
|
|
|
|
|
|
+ if (this_rq->avg_idle < curr_cost + sd->max_newidle_lb_cost)
|
|
|
+ break;
|
|
|
+
|
|
|
if (sd->flags & SD_BALANCE_NEWIDLE) {
|
|
|
+ t0 = sched_clock_cpu(this_cpu);
|
|
|
+
|
|
|
/* If we've pulled tasks over stop searching: */
|
|
|
pulled_task = load_balance(this_cpu, this_rq,
|
|
|
sd, CPU_NEWLY_IDLE,
|
|
|
&continue_balancing);
|
|
|
+
|
|
|
+ domain_cost = sched_clock_cpu(this_cpu) - t0;
|
|
|
+ if (domain_cost > sd->max_newidle_lb_cost)
|
|
|
+ sd->max_newidle_lb_cost = domain_cost;
|
|
|
+
|
|
|
+ curr_cost += domain_cost;
|
|
|
}
|
|
|
|
|
|
interval = msecs_to_jiffies(sd->balance_interval);
|
|
@@ -5442,6 +5455,9 @@ void idle_balance(int this_cpu, struct rq *this_rq)
|
|
|
*/
|
|
|
this_rq->next_balance = next_balance;
|
|
|
}
|
|
|
+
|
|
|
+ if (curr_cost > this_rq->max_idle_balance_cost)
|
|
|
+ this_rq->max_idle_balance_cost = curr_cost;
|
|
|
}
|
|
|
|
|
|
/*
|