|
@@ -1087,8 +1087,14 @@ static inline void update_entity_load_avg(struct sched_entity *se)
|
|
|
__update_entity_runnable_avg(rq_of(cfs_rq_of(se))->clock_task, &se->avg,
|
|
|
se->on_rq);
|
|
|
}
|
|
|
+
|
|
|
+static inline void update_rq_runnable_avg(struct rq *rq, int runnable)
|
|
|
+{
|
|
|
+ __update_entity_runnable_avg(rq->clock_task, &rq->avg, runnable);
|
|
|
+}
|
|
|
#else
|
|
|
static inline void update_entity_load_avg(struct sched_entity *se) {}
|
|
|
+static inline void update_rq_runnable_avg(struct rq *rq, int runnable) {}
|
|
|
#endif
|
|
|
|
|
|
static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
|
|
@@ -2340,8 +2346,10 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
|
|
|
update_cfs_shares(cfs_rq);
|
|
|
}
|
|
|
|
|
|
- if (!se)
|
|
|
+ if (!se) {
|
|
|
+ update_rq_runnable_avg(rq, rq->nr_running);
|
|
|
inc_nr_running(rq);
|
|
|
+ }
|
|
|
hrtick_update(rq);
|
|
|
}
|
|
|
|
|
@@ -2399,8 +2407,10 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
|
|
|
update_cfs_shares(cfs_rq);
|
|
|
}
|
|
|
|
|
|
- if (!se)
|
|
|
+ if (!se) {
|
|
|
dec_nr_running(rq);
|
|
|
+ update_rq_runnable_avg(rq, 1);
|
|
|
+ }
|
|
|
hrtick_update(rq);
|
|
|
}
|
|
|
|
|
@@ -4586,6 +4596,8 @@ void idle_balance(int this_cpu, struct rq *this_rq)
|
|
|
if (this_rq->avg_idle < sysctl_sched_migration_cost)
|
|
|
return;
|
|
|
|
|
|
+ update_rq_runnable_avg(this_rq, 1);
|
|
|
+
|
|
|
/*
|
|
|
* Drop the rq->lock, but keep IRQ/preempt disabled.
|
|
|
*/
|
|
@@ -5083,6 +5095,8 @@ static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
|
|
|
cfs_rq = cfs_rq_of(se);
|
|
|
entity_tick(cfs_rq, se, queued);
|
|
|
}
|
|
|
+
|
|
|
+ update_rq_runnable_avg(rq, 1);
|
|
|
}
|
|
|
|
|
|
/*
|