|
@@ -1811,6 +1811,20 @@ static void cfs_rq_set_shares(struct cfs_rq *cfs_rq, unsigned long shares)
|
|
|
|
|
|
static void calc_load_account_active(struct rq *this_rq);
|
|
|
|
|
|
+static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
|
|
|
+{
|
|
|
+ set_task_rq(p, cpu);
|
|
|
+#ifdef CONFIG_SMP
|
|
|
+ /*
|
|
|
+ * After ->cpu is set up to a new value, task_rq_lock(p, ...) can be
|
|
|
+ * successfuly executed on another CPU. We must ensure that updates of
|
|
|
+ * per-task data have been completed by this moment.
|
|
|
+ */
|
|
|
+ smp_wmb();
|
|
|
+ task_thread_info(p)->cpu = cpu;
|
|
|
+#endif
|
|
|
+}
|
|
|
+
|
|
|
#include "sched_stats.h"
|
|
|
#include "sched_idletask.c"
|
|
|
#include "sched_fair.c"
|
|
@@ -1967,20 +1981,6 @@ inline int task_curr(const struct task_struct *p)
|
|
|
return cpu_curr(task_cpu(p)) == p;
|
|
|
}
|
|
|
|
|
|
-static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
|
|
|
-{
|
|
|
- set_task_rq(p, cpu);
|
|
|
-#ifdef CONFIG_SMP
|
|
|
- /*
|
|
|
- * After ->cpu is set up to a new value, task_rq_lock(p, ...) can be
|
|
|
- * successfuly executed on another CPU. We must ensure that updates of
|
|
|
- * per-task data have been completed by this moment.
|
|
|
- */
|
|
|
- smp_wmb();
|
|
|
- task_thread_info(p)->cpu = cpu;
|
|
|
-#endif
|
|
|
-}
|
|
|
-
|
|
|
static inline void check_class_changed(struct rq *rq, struct task_struct *p,
|
|
|
const struct sched_class *prev_class,
|
|
|
int oldprio, int running)
|
|
@@ -2552,7 +2552,6 @@ static void __sched_fork(struct task_struct *p)
|
|
|
void sched_fork(struct task_struct *p, int clone_flags)
|
|
|
{
|
|
|
int cpu = get_cpu();
|
|
|
- unsigned long flags;
|
|
|
|
|
|
__sched_fork(p);
|
|
|
|
|
@@ -2586,13 +2585,13 @@ void sched_fork(struct task_struct *p, int clone_flags)
|
|
|
if (!rt_prio(p->prio))
|
|
|
p->sched_class = &fair_sched_class;
|
|
|
|
|
|
+ if (p->sched_class->task_fork)
|
|
|
+ p->sched_class->task_fork(p);
|
|
|
+
|
|
|
#ifdef CONFIG_SMP
|
|
|
cpu = select_task_rq(p, SD_BALANCE_FORK, 0);
|
|
|
#endif
|
|
|
- local_irq_save(flags);
|
|
|
- update_rq_clock(cpu_rq(cpu));
|
|
|
set_task_cpu(p, cpu);
|
|
|
- local_irq_restore(flags);
|
|
|
|
|
|
#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
|
|
|
if (likely(sched_info_on()))
|
|
@@ -2625,17 +2624,7 @@ void wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
|
|
|
rq = task_rq_lock(p, &flags);
|
|
|
BUG_ON(p->state != TASK_RUNNING);
|
|
|
update_rq_clock(rq);
|
|
|
-
|
|
|
- if (!p->sched_class->task_new || !current->se.on_rq) {
|
|
|
- activate_task(rq, p, 0);
|
|
|
- } else {
|
|
|
- /*
|
|
|
- * Let the scheduling class do new task startup
|
|
|
- * management (if any):
|
|
|
- */
|
|
|
- p->sched_class->task_new(rq, p);
|
|
|
- inc_nr_running(rq);
|
|
|
- }
|
|
|
+ activate_task(rq, p, 0);
|
|
|
trace_sched_wakeup_new(rq, p, 1);
|
|
|
check_preempt_curr(rq, p, WF_FORK);
|
|
|
#ifdef CONFIG_SMP
|