|
@@ -2280,6 +2280,9 @@ void task_oncpu_function_call(struct task_struct *p,
|
|
|
}
|
|
|
|
|
|
#ifdef CONFIG_SMP
|
|
|
+/*
|
|
|
+ * ->cpus_allowed is protected by either TASK_WAKING or rq->lock held.
|
|
|
+ */
|
|
|
static int select_fallback_rq(int cpu, struct task_struct *p)
|
|
|
{
|
|
|
int dest_cpu;
|
|
@@ -2316,12 +2319,7 @@ static int select_fallback_rq(int cpu, struct task_struct *p)
|
|
|
}
|
|
|
|
|
|
/*
|
|
|
- * Gets called from 3 sites (exec, fork, wakeup), since it is called without
|
|
|
- * holding rq->lock we need to ensure ->cpus_allowed is stable, this is done
|
|
|
- * by:
|
|
|
- *
|
|
|
- * exec: is unstable, retry loop
|
|
|
- * fork & wake-up: serialize ->cpus_allowed against TASK_WAKING
|
|
|
+ * The caller (fork, wakeup) owns TASK_WAKING, ->cpus_allowed is stable.
|
|
|
*/
|
|
|
static inline
|
|
|
int select_task_rq(struct task_struct *p, int sd_flags, int wake_flags)
|
|
@@ -3076,9 +3074,8 @@ void sched_exec(void)
|
|
|
unsigned long flags;
|
|
|
struct rq *rq;
|
|
|
|
|
|
-again:
|
|
|
this_cpu = get_cpu();
|
|
|
- dest_cpu = select_task_rq(p, SD_BALANCE_EXEC, 0);
|
|
|
+ dest_cpu = p->sched_class->select_task_rq(p, SD_BALANCE_EXEC, 0);
|
|
|
if (dest_cpu == this_cpu) {
|
|
|
put_cpu();
|
|
|
return;
|
|
@@ -3086,18 +3083,12 @@ again:
|
|
|
|
|
|
rq = task_rq_lock(p, &flags);
|
|
|
put_cpu();
|
|
|
-
|
|
|
/*
|
|
|
* select_task_rq() can race against ->cpus_allowed
|
|
|
*/
|
|
|
- if (!cpumask_test_cpu(dest_cpu, &p->cpus_allowed)
|
|
|
- || unlikely(!cpu_active(dest_cpu))) {
|
|
|
- task_rq_unlock(rq, &flags);
|
|
|
- goto again;
|
|
|
- }
|
|
|
-
|
|
|
- /* force the process onto the specified CPU */
|
|
|
- if (migrate_task(p, dest_cpu, &req)) {
|
|
|
+ if (cpumask_test_cpu(dest_cpu, &p->cpus_allowed) &&
|
|
|
+ likely(cpu_active(dest_cpu)) &&
|
|
|
+ migrate_task(p, dest_cpu, &req)) {
|
|
|
/* Need to wait for migration thread (might exit: take ref). */
|
|
|
struct task_struct *mt = rq->migration_thread;
|
|
|
|