|
@@ -4620,6 +4620,28 @@ static inline void schedule_debug(struct task_struct *prev)
|
|
#endif
|
|
#endif
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+static void put_prev_task(struct rq *rq, struct task_struct *prev)
|
|
|
|
+{
|
|
|
|
+ if (prev->state == TASK_RUNNING) {
|
|
|
|
+ u64 runtime = prev->se.sum_exec_runtime;
|
|
|
|
+
|
|
|
|
+ runtime -= prev->se.prev_sum_exec_runtime;
|
|
|
|
+ runtime = min_t(u64, runtime, 2*sysctl_sched_migration_cost);
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * In order to avoid avg_overlap growing stale when we are
|
|
|
|
+ * indeed overlapping and hence not getting put to sleep, grow
|
|
|
|
+ * the avg_overlap on preemption.
|
|
|
|
+ *
|
|
|
|
+ * We use the average preemption runtime because that
|
|
|
|
+ * correlates to the amount of cache footprint a task can
|
|
|
|
+ * build up.
|
|
|
|
+ */
|
|
|
|
+ update_avg(&prev->se.avg_overlap, runtime);
|
|
|
|
+ }
|
|
|
|
+ prev->sched_class->put_prev_task(rq, prev);
|
|
|
|
+}
|
|
|
|
+
|
|
/*
|
|
/*
|
|
* Pick up the highest-prio task:
|
|
* Pick up the highest-prio task:
|
|
*/
|
|
*/
|
|
@@ -4698,7 +4720,7 @@ need_resched_nonpreemptible:
|
|
if (unlikely(!rq->nr_running))
|
|
if (unlikely(!rq->nr_running))
|
|
idle_balance(cpu, rq);
|
|
idle_balance(cpu, rq);
|
|
|
|
|
|
- prev->sched_class->put_prev_task(rq, prev);
|
|
|
|
|
|
+ put_prev_task(rq, prev);
|
|
next = pick_next_task(rq);
|
|
next = pick_next_task(rq);
|
|
|
|
|
|
if (likely(prev != next)) {
|
|
if (likely(prev != next)) {
|