|
@@ -1265,7 +1265,6 @@ static void update_cfs_rq_blocked_load(struct cfs_rq *cfs_rq, int force_update)
|
|
|
}
|
|
|
|
|
|
__update_cfs_rq_tg_load_contrib(cfs_rq, force_update);
|
|
|
- update_cfs_shares(cfs_rq);
|
|
|
}
|
|
|
|
|
|
static inline void update_rq_runnable_avg(struct rq *rq, int runnable)
|
|
@@ -1475,8 +1474,9 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
|
|
|
* Update run-time statistics of the 'current'.
|
|
|
*/
|
|
|
update_curr(cfs_rq);
|
|
|
- account_entity_enqueue(cfs_rq, se);
|
|
|
enqueue_entity_load_avg(cfs_rq, se, flags & ENQUEUE_WAKEUP);
|
|
|
+ account_entity_enqueue(cfs_rq, se);
|
|
|
+ update_cfs_shares(cfs_rq);
|
|
|
|
|
|
if (flags & ENQUEUE_WAKEUP) {
|
|
|
place_entity(cfs_rq, se, 0);
|
|
@@ -1549,6 +1549,7 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
|
|
|
* Update run-time statistics of the 'current'.
|
|
|
*/
|
|
|
update_curr(cfs_rq);
|
|
|
+ dequeue_entity_load_avg(cfs_rq, se, flags & DEQUEUE_SLEEP);
|
|
|
|
|
|
update_stats_dequeue(cfs_rq, se);
|
|
|
if (flags & DEQUEUE_SLEEP) {
|
|
@@ -1568,8 +1569,8 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
|
|
|
|
|
|
if (se != cfs_rq->curr)
|
|
|
__dequeue_entity(cfs_rq, se);
|
|
|
+ se->on_rq = 0;
|
|
|
account_entity_dequeue(cfs_rq, se);
|
|
|
- dequeue_entity_load_avg(cfs_rq, se, flags & DEQUEUE_SLEEP);
|
|
|
|
|
|
/*
|
|
|
* Normalize the entity after updating the min_vruntime because the
|
|
@@ -1583,7 +1584,7 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
|
|
|
return_cfs_rq_runtime(cfs_rq);
|
|
|
|
|
|
update_min_vruntime(cfs_rq);
|
|
|
- se->on_rq = 0;
|
|
|
+ update_cfs_shares(cfs_rq);
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -2595,8 +2596,8 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
|
|
|
if (cfs_rq_throttled(cfs_rq))
|
|
|
break;
|
|
|
|
|
|
+ update_cfs_shares(cfs_rq);
|
|
|
update_entity_load_avg(se, 1);
|
|
|
- update_cfs_rq_blocked_load(cfs_rq, 0);
|
|
|
}
|
|
|
|
|
|
if (!se) {
|
|
@@ -2656,8 +2657,8 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
|
|
|
if (cfs_rq_throttled(cfs_rq))
|
|
|
break;
|
|
|
|
|
|
+ update_cfs_shares(cfs_rq);
|
|
|
update_entity_load_avg(se, 1);
|
|
|
- update_cfs_rq_blocked_load(cfs_rq, 0);
|
|
|
}
|
|
|
|
|
|
if (!se) {
|
|
@@ -5837,11 +5838,8 @@ int sched_group_set_shares(struct task_group *tg, unsigned long shares)
|
|
|
se = tg->se[i];
|
|
|
/* Propagate contribution to hierarchy */
|
|
|
raw_spin_lock_irqsave(&rq->lock, flags);
|
|
|
- for_each_sched_entity(se) {
|
|
|
+ for_each_sched_entity(se)
|
|
|
update_cfs_shares(group_cfs_rq(se));
|
|
|
- /* update contribution to parent */
|
|
|
- update_entity_load_avg(se, 1);
|
|
|
- }
|
|
|
raw_spin_unlock_irqrestore(&rq->lock, flags);
|
|
|
}
|
|
|
|