|
@@ -4171,47 +4171,48 @@ static void update_blocked_averages(int cpu)
|
|
|
}
|
|
|
|
|
|
/*
|
|
|
- * Compute the cpu's hierarchical load factor for each task group.
|
|
|
+ * Compute the hierarchical load factor for cfs_rq and all its ascendants.
|
|
|
* This needs to be done in a top-down fashion because the load of a child
|
|
|
* group is a fraction of its parents load.
|
|
|
*/
|
|
|
-static int tg_load_down(struct task_group *tg, void *data)
|
|
|
+static void update_cfs_rq_h_load(struct cfs_rq *cfs_rq)
|
|
|
{
|
|
|
- unsigned long load;
|
|
|
- long cpu = (long)data;
|
|
|
-
|
|
|
- if (!tg->parent) {
|
|
|
- load = cpu_rq(cpu)->avg.load_avg_contrib;
|
|
|
- } else {
|
|
|
- load = tg->parent->cfs_rq[cpu]->h_load;
|
|
|
- load = div64_ul(load * tg->se[cpu]->avg.load_avg_contrib,
|
|
|
- tg->parent->cfs_rq[cpu]->runnable_load_avg + 1);
|
|
|
- }
|
|
|
-
|
|
|
- tg->cfs_rq[cpu]->h_load = load;
|
|
|
-
|
|
|
- return 0;
|
|
|
-}
|
|
|
-
|
|
|
-static void update_h_load(long cpu)
|
|
|
-{
|
|
|
- struct rq *rq = cpu_rq(cpu);
|
|
|
+ struct rq *rq = rq_of(cfs_rq);
|
|
|
+ struct sched_entity *se = cfs_rq->tg->se[cpu_of(rq)];
|
|
|
unsigned long now = jiffies;
|
|
|
+ unsigned long load;
|
|
|
|
|
|
- if (rq->h_load_throttle == now)
|
|
|
+ if (cfs_rq->last_h_load_update == now)
|
|
|
return;
|
|
|
|
|
|
- rq->h_load_throttle = now;
|
|
|
+ cfs_rq->h_load_next = NULL;
|
|
|
+ for_each_sched_entity(se) {
|
|
|
+ cfs_rq = cfs_rq_of(se);
|
|
|
+ cfs_rq->h_load_next = se;
|
|
|
+ if (cfs_rq->last_h_load_update == now)
|
|
|
+ break;
|
|
|
+ }
|
|
|
|
|
|
- rcu_read_lock();
|
|
|
- walk_tg_tree(tg_load_down, tg_nop, (void *)cpu);
|
|
|
- rcu_read_unlock();
|
|
|
+ if (!se) {
|
|
|
+ cfs_rq->h_load = rq->avg.load_avg_contrib;
|
|
|
+ cfs_rq->last_h_load_update = now;
|
|
|
+ }
|
|
|
+
|
|
|
+ while ((se = cfs_rq->h_load_next) != NULL) {
|
|
|
+ load = cfs_rq->h_load;
|
|
|
+ load = div64_ul(load * se->avg.load_avg_contrib,
|
|
|
+ cfs_rq->runnable_load_avg + 1);
|
|
|
+ cfs_rq = group_cfs_rq(se);
|
|
|
+ cfs_rq->h_load = load;
|
|
|
+ cfs_rq->last_h_load_update = now;
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
static unsigned long task_h_load(struct task_struct *p)
|
|
|
{
|
|
|
struct cfs_rq *cfs_rq = task_cfs_rq(p);
|
|
|
|
|
|
+ update_cfs_rq_h_load(cfs_rq);
|
|
|
return div64_ul(p->se.avg.load_avg_contrib * cfs_rq->h_load,
|
|
|
cfs_rq->runnable_load_avg + 1);
|
|
|
}
|
|
@@ -4220,10 +4221,6 @@ static inline void update_blocked_averages(int cpu)
|
|
|
{
|
|
|
}
|
|
|
|
|
|
-static inline void update_h_load(long cpu)
|
|
|
-{
|
|
|
-}
|
|
|
-
|
|
|
static unsigned long task_h_load(struct task_struct *p)
|
|
|
{
|
|
|
return p->se.avg.load_avg_contrib;
|
|
@@ -5108,7 +5105,6 @@ redo:
|
|
|
env.src_rq = busiest;
|
|
|
env.loop_max = min(sysctl_sched_nr_migrate, busiest->nr_running);
|
|
|
|
|
|
- update_h_load(env.src_cpu);
|
|
|
more_balance:
|
|
|
local_irq_save(flags);
|
|
|
double_rq_lock(env.dst_rq, busiest);
|