|
@@ -1887,11 +1887,6 @@ enqueue_task(struct rq *rq, struct task_struct *p, int wakeup, bool head)
|
|
|
|
|
|
static void dequeue_task(struct rq *rq, struct task_struct *p, int sleep)
|
|
static void dequeue_task(struct rq *rq, struct task_struct *p, int sleep)
|
|
{
|
|
{
|
|
- if (sleep && p->se.last_wakeup) {
|
|
|
|
- update_avg(&p->se.avg_overlap,
|
|
|
|
- p->se.sum_exec_runtime - p->se.last_wakeup);
|
|
|
|
- p->se.last_wakeup = 0;
|
|
|
|
- }
|
|
|
|
sched_info_dequeued(p);
|
|
sched_info_dequeued(p);
|
|
p->sched_class->dequeue_task(rq, p, sleep);
|
|
p->sched_class->dequeue_task(rq, p, sleep);
|
|
p->se.on_rq = 0;
|
|
p->se.on_rq = 0;
|
|
@@ -2452,15 +2447,6 @@ out_activate:
|
|
activate_task(rq, p, 1);
|
|
activate_task(rq, p, 1);
|
|
success = 1;
|
|
success = 1;
|
|
|
|
|
|
- /*
|
|
|
|
- * Only attribute actual wakeups done by this task.
|
|
|
|
- */
|
|
|
|
- if (!in_interrupt()) {
|
|
|
|
- struct sched_entity *se = ¤t->se;
|
|
|
|
-
|
|
|
|
- se->last_wakeup = se->sum_exec_runtime;
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
out_running:
|
|
out_running:
|
|
trace_sched_wakeup(rq, p, success);
|
|
trace_sched_wakeup(rq, p, success);
|
|
check_preempt_curr(rq, p, wake_flags);
|
|
check_preempt_curr(rq, p, wake_flags);
|
|
@@ -2522,8 +2508,6 @@ static void __sched_fork(struct task_struct *p)
|
|
p->se.sum_exec_runtime = 0;
|
|
p->se.sum_exec_runtime = 0;
|
|
p->se.prev_sum_exec_runtime = 0;
|
|
p->se.prev_sum_exec_runtime = 0;
|
|
p->se.nr_migrations = 0;
|
|
p->se.nr_migrations = 0;
|
|
- p->se.last_wakeup = 0;
|
|
|
|
- p->se.avg_overlap = 0;
|
|
|
|
|
|
|
|
#ifdef CONFIG_SCHEDSTATS
|
|
#ifdef CONFIG_SCHEDSTATS
|
|
memset(&p->se.statistics, 0, sizeof(p->se.statistics));
|
|
memset(&p->se.statistics, 0, sizeof(p->se.statistics));
|
|
@@ -3594,23 +3578,6 @@ static inline void schedule_debug(struct task_struct *prev)
|
|
|
|
|
|
static void put_prev_task(struct rq *rq, struct task_struct *prev)
|
|
static void put_prev_task(struct rq *rq, struct task_struct *prev)
|
|
{
|
|
{
|
|
- if (prev->state == TASK_RUNNING) {
|
|
|
|
- u64 runtime = prev->se.sum_exec_runtime;
|
|
|
|
-
|
|
|
|
- runtime -= prev->se.prev_sum_exec_runtime;
|
|
|
|
- runtime = min_t(u64, runtime, 2*sysctl_sched_migration_cost);
|
|
|
|
-
|
|
|
|
- /*
|
|
|
|
- * In order to avoid avg_overlap growing stale when we are
|
|
|
|
- * indeed overlapping and hence not getting put to sleep, grow
|
|
|
|
- * the avg_overlap on preemption.
|
|
|
|
- *
|
|
|
|
- * We use the average preemption runtime because that
|
|
|
|
- * correlates to the amount of cache footprint a task can
|
|
|
|
- * build up.
|
|
|
|
- */
|
|
|
|
- update_avg(&prev->se.avg_overlap, runtime);
|
|
|
|
- }
|
|
|
|
prev->sched_class->put_prev_task(rq, prev);
|
|
prev->sched_class->put_prev_task(rq, prev);
|
|
}
|
|
}
|
|
|
|
|