|
@@ -1831,37 +1831,6 @@ void fastcall wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
|
|
task_rq_unlock(this_rq, &flags);
|
|
task_rq_unlock(this_rq, &flags);
|
|
}
|
|
}
|
|
|
|
|
|
-/*
|
|
|
|
- * Potentially available exiting-child timeslices are
|
|
|
|
- * retrieved here - this way the parent does not get
|
|
|
|
- * penalized for creating too many threads.
|
|
|
|
- *
|
|
|
|
- * (this cannot be used to 'generate' timeslices
|
|
|
|
- * artificially, because any timeslice recovered here
|
|
|
|
- * was given away by the parent in the first place.)
|
|
|
|
- */
|
|
|
|
-void fastcall sched_exit(struct task_struct *p)
|
|
|
|
-{
|
|
|
|
- unsigned long flags;
|
|
|
|
- struct rq *rq;
|
|
|
|
-
|
|
|
|
- /*
|
|
|
|
- * If the child was a (relative-) CPU hog then decrease
|
|
|
|
- * the sleep_avg of the parent as well.
|
|
|
|
- */
|
|
|
|
- rq = task_rq_lock(p->parent, &flags);
|
|
|
|
- if (p->first_time_slice && task_cpu(p) == task_cpu(p->parent)) {
|
|
|
|
- p->parent->time_slice += p->time_slice;
|
|
|
|
- if (unlikely(p->parent->time_slice > task_timeslice(p)))
|
|
|
|
- p->parent->time_slice = task_timeslice(p);
|
|
|
|
- }
|
|
|
|
- if (p->sleep_avg < p->parent->sleep_avg)
|
|
|
|
- p->parent->sleep_avg = p->parent->sleep_avg /
|
|
|
|
- (EXIT_WEIGHT + 1) * EXIT_WEIGHT + p->sleep_avg /
|
|
|
|
- (EXIT_WEIGHT + 1);
|
|
|
|
- task_rq_unlock(rq, &flags);
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
/**
|
|
/**
|
|
* prepare_task_switch - prepare to switch tasks
|
|
* prepare_task_switch - prepare to switch tasks
|
|
* @rq: the runqueue preparing to switch
|
|
* @rq: the runqueue preparing to switch
|