|
@@ -718,7 +718,7 @@ static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
|
|
|
account_entity_enqueue(cfs_rq, se);
|
|
|
}
|
|
|
|
|
|
-static void update_cfs_shares(struct cfs_rq *cfs_rq)
|
|
|
+static void update_cfs_shares(struct cfs_rq *cfs_rq, long weight_delta)
|
|
|
{
|
|
|
struct task_group *tg;
|
|
|
struct sched_entity *se;
|
|
@@ -732,7 +732,7 @@ static void update_cfs_shares(struct cfs_rq *cfs_rq)
|
|
|
if (!se)
|
|
|
return;
|
|
|
|
|
|
- load = cfs_rq->load.weight;
|
|
|
+ load = cfs_rq->load.weight + weight_delta;
|
|
|
|
|
|
load_weight = atomic_read(&tg->load_weight);
|
|
|
load_weight -= cfs_rq->load_contribution;
|
|
@@ -754,7 +754,7 @@ static inline void update_cfs_load(struct cfs_rq *cfs_rq, int lb)
|
|
|
{
|
|
|
}
|
|
|
|
|
|
-static inline void update_cfs_shares(struct cfs_rq *cfs_rq)
|
|
|
+static inline void update_cfs_shares(struct cfs_rq *cfs_rq, long weight_delta)
|
|
|
{
|
|
|
}
|
|
|
#endif /* CONFIG_FAIR_GROUP_SCHED */
|
|
@@ -881,8 +881,8 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
|
|
|
*/
|
|
|
update_curr(cfs_rq);
|
|
|
update_cfs_load(cfs_rq, 0);
|
|
|
+ update_cfs_shares(cfs_rq, se->load.weight);
|
|
|
account_entity_enqueue(cfs_rq, se);
|
|
|
- update_cfs_shares(cfs_rq);
|
|
|
|
|
|
if (flags & ENQUEUE_WAKEUP) {
|
|
|
place_entity(cfs_rq, se, 0);
|
|
@@ -944,7 +944,7 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
|
|
|
update_cfs_load(cfs_rq, 0);
|
|
|
account_entity_dequeue(cfs_rq, se);
|
|
|
update_min_vruntime(cfs_rq);
|
|
|
- update_cfs_shares(cfs_rq);
|
|
|
+ update_cfs_shares(cfs_rq, 0);
|
|
|
|
|
|
/*
|
|
|
* Normalize the entity after updating the min_vruntime because the
|
|
@@ -1177,7 +1177,7 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
|
|
|
struct cfs_rq *cfs_rq = cfs_rq_of(se);
|
|
|
|
|
|
update_cfs_load(cfs_rq, 0);
|
|
|
- update_cfs_shares(cfs_rq);
|
|
|
+ update_cfs_shares(cfs_rq, 0);
|
|
|
}
|
|
|
|
|
|
hrtick_update(rq);
|
|
@@ -1207,7 +1207,7 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
|
|
|
struct cfs_rq *cfs_rq = cfs_rq_of(se);
|
|
|
|
|
|
update_cfs_load(cfs_rq, 0);
|
|
|
- update_cfs_shares(cfs_rq);
|
|
|
+ update_cfs_shares(cfs_rq, 0);
|
|
|
}
|
|
|
|
|
|
hrtick_update(rq);
|
|
@@ -2034,7 +2034,7 @@ static int tg_shares_up(struct task_group *tg, int cpu)
|
|
|
* We need to update shares after updating tg->load_weight in
|
|
|
* order to adjust the weight of groups with long running tasks.
|
|
|
*/
|
|
|
- update_cfs_shares(cfs_rq);
|
|
|
+ update_cfs_shares(cfs_rq, 0);
|
|
|
|
|
|
raw_spin_unlock_irqrestore(&rq->lock, flags);
|
|
|
|