|
@@ -1579,6 +1579,13 @@ static void update_shares(struct sched_domain *sd)
|
|
walk_tg_tree(tg_nop, tg_shares_up, 0, sd);
|
|
walk_tg_tree(tg_nop, tg_shares_up, 0, sd);
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+static void update_shares_locked(struct rq *rq, struct sched_domain *sd)
|
|
|
|
+{
|
|
|
|
+ spin_unlock(&rq->lock);
|
|
|
|
+ update_shares(sd);
|
|
|
|
+ spin_lock(&rq->lock);
|
|
|
|
+}
|
|
|
|
+
|
|
static void update_h_load(int cpu)
|
|
static void update_h_load(int cpu)
|
|
{
|
|
{
|
|
walk_tg_tree(tg_load_down, tg_nop, cpu, NULL);
|
|
walk_tg_tree(tg_load_down, tg_nop, cpu, NULL);
|
|
@@ -1595,6 +1602,10 @@ static inline void update_shares(struct sched_domain *sd)
|
|
{
|
|
{
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+static inline void update_shares_locked(struct rq *rq, struct sched_domain *sd)
|
|
|
|
+{
|
|
|
|
+}
|
|
|
|
+
|
|
#endif
|
|
#endif
|
|
|
|
|
|
#endif
|
|
#endif
|
|
@@ -3543,6 +3554,7 @@ load_balance_newidle(int this_cpu, struct rq *this_rq, struct sched_domain *sd,
|
|
|
|
|
|
schedstat_inc(sd, lb_count[CPU_NEWLY_IDLE]);
|
|
schedstat_inc(sd, lb_count[CPU_NEWLY_IDLE]);
|
|
redo:
|
|
redo:
|
|
|
|
+ update_shares_locked(this_rq, sd);
|
|
group = find_busiest_group(sd, this_cpu, &imbalance, CPU_NEWLY_IDLE,
|
|
group = find_busiest_group(sd, this_cpu, &imbalance, CPU_NEWLY_IDLE,
|
|
&sd_idle, cpus, NULL);
|
|
&sd_idle, cpus, NULL);
|
|
if (!group) {
|
|
if (!group) {
|
|
@@ -3586,6 +3598,7 @@ redo:
|
|
} else
|
|
} else
|
|
sd->nr_balance_failed = 0;
|
|
sd->nr_balance_failed = 0;
|
|
|
|
|
|
|
|
+ update_shares_locked(this_rq, sd);
|
|
return ld_moved;
|
|
return ld_moved;
|
|
|
|
|
|
out_balanced:
|
|
out_balanced:
|