|
@@ -3387,6 +3387,14 @@ static int tg_load_down(struct task_group *tg, void *data)
|
|
|
|
|
|
static void update_h_load(long cpu)
|
|
|
{
|
|
|
+ struct rq *rq = cpu_rq(cpu);
|
|
|
+ unsigned long now = jiffies;
|
|
|
+
|
|
|
+ if (rq->h_load_throttle == now)
|
|
|
+ return;
|
|
|
+
|
|
|
+ rq->h_load_throttle = now;
|
|
|
+
|
|
|
rcu_read_lock();
|
|
|
walk_tg_tree(tg_load_down, tg_nop, (void *)cpu);
|
|
|
rcu_read_unlock();
|
|
@@ -4293,11 +4301,10 @@ redo:
|
|
|
env.src_rq = busiest;
|
|
|
env.loop_max = min(sysctl_sched_nr_migrate, busiest->nr_running);
|
|
|
|
|
|
+ update_h_load(env.src_cpu);
|
|
|
more_balance:
|
|
|
local_irq_save(flags);
|
|
|
double_rq_lock(this_rq, busiest);
|
|
|
- if (!env.loop)
|
|
|
- update_h_load(env.src_cpu);
|
|
|
|
|
|
/*
|
|
|
* cur_ld_moved - load moved in current iteration
|