|
@@ -7646,7 +7646,7 @@ static void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
|
|
se->cfs_rq = parent->my_q;
|
|
se->cfs_rq = parent->my_q;
|
|
|
|
|
|
se->my_q = cfs_rq;
|
|
se->my_q = cfs_rq;
|
|
- update_load_set(&se->load, tg->shares);
|
|
|
|
|
|
+ update_load_set(&se->load, 0);
|
|
se->parent = parent;
|
|
se->parent = parent;
|
|
}
|
|
}
|
|
#endif
|
|
#endif
|
|
@@ -8274,37 +8274,12 @@ void sched_move_task(struct task_struct *tsk)
|
|
#endif /* CONFIG_CGROUP_SCHED */
|
|
#endif /* CONFIG_CGROUP_SCHED */
|
|
|
|
|
|
#ifdef CONFIG_FAIR_GROUP_SCHED
|
|
#ifdef CONFIG_FAIR_GROUP_SCHED
|
|
-static void __set_se_shares(struct sched_entity *se, unsigned long shares)
|
|
|
|
-{
|
|
|
|
- struct cfs_rq *cfs_rq = se->cfs_rq;
|
|
|
|
- int on_rq;
|
|
|
|
-
|
|
|
|
- on_rq = se->on_rq;
|
|
|
|
- if (on_rq)
|
|
|
|
- dequeue_entity(cfs_rq, se, 0);
|
|
|
|
-
|
|
|
|
- update_load_set(&se->load, shares);
|
|
|
|
-
|
|
|
|
- if (on_rq)
|
|
|
|
- enqueue_entity(cfs_rq, se, 0);
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-static void set_se_shares(struct sched_entity *se, unsigned long shares)
|
|
|
|
-{
|
|
|
|
- struct cfs_rq *cfs_rq = se->cfs_rq;
|
|
|
|
- struct rq *rq = cfs_rq->rq;
|
|
|
|
- unsigned long flags;
|
|
|
|
-
|
|
|
|
- raw_spin_lock_irqsave(&rq->lock, flags);
|
|
|
|
- __set_se_shares(se, shares);
|
|
|
|
- raw_spin_unlock_irqrestore(&rq->lock, flags);
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
static DEFINE_MUTEX(shares_mutex);
|
|
static DEFINE_MUTEX(shares_mutex);
|
|
|
|
|
|
int sched_group_set_shares(struct task_group *tg, unsigned long shares)
|
|
int sched_group_set_shares(struct task_group *tg, unsigned long shares)
|
|
{
|
|
{
|
|
int i;
|
|
int i;
|
|
|
|
+ unsigned long flags;
|
|
|
|
|
|
/*
|
|
/*
|
|
* We can't change the weight of the root cgroup.
|
|
* We can't change the weight of the root cgroup.
|
|
@@ -8323,10 +8298,15 @@ int sched_group_set_shares(struct task_group *tg, unsigned long shares)
|
|
|
|
|
|
tg->shares = shares;
|
|
tg->shares = shares;
|
|
for_each_possible_cpu(i) {
|
|
for_each_possible_cpu(i) {
|
|
- /*
|
|
|
|
- * force a rebalance
|
|
|
|
- */
|
|
|
|
- set_se_shares(tg->se[i], shares);
|
|
|
|
|
|
+ struct rq *rq = cpu_rq(i);
|
|
|
|
+ struct sched_entity *se;
|
|
|
|
+
|
|
|
|
+ se = tg->se[i];
|
|
|
|
+ /* Propagate contribution to hierarchy */
|
|
|
|
+ raw_spin_lock_irqsave(&rq->lock, flags);
|
|
|
|
+ for_each_sched_entity(se)
|
|
|
|
+ update_cfs_shares(group_cfs_rq(se), 0);
|
|
|
|
+ raw_spin_unlock_irqrestore(&rq->lock, flags);
|
|
}
|
|
}
|
|
|
|
|
|
done:
|
|
done:
|