|
@@ -2061,10 +2061,8 @@ static int migration_cpu_stop(void *data);
|
|
* The task's runqueue lock must be held.
|
|
* The task's runqueue lock must be held.
|
|
* Returns true if you have to wait for migration thread.
|
|
* Returns true if you have to wait for migration thread.
|
|
*/
|
|
*/
|
|
-static bool migrate_task(struct task_struct *p, int dest_cpu)
|
|
|
|
|
|
+static bool migrate_task(struct task_struct *p, struct rq *rq)
|
|
{
|
|
{
|
|
- struct rq *rq = task_rq(p);
|
|
|
|
-
|
|
|
|
/*
|
|
/*
|
|
* If the task is not on a runqueue (and not running), then
|
|
* If the task is not on a runqueue (and not running), then
|
|
* the next wake-up will properly place the task.
|
|
* the next wake-up will properly place the task.
|
|
@@ -3224,7 +3222,7 @@ void sched_exec(void)
|
|
* select_task_rq() can race against ->cpus_allowed
|
|
* select_task_rq() can race against ->cpus_allowed
|
|
*/
|
|
*/
|
|
if (cpumask_test_cpu(dest_cpu, &p->cpus_allowed) &&
|
|
if (cpumask_test_cpu(dest_cpu, &p->cpus_allowed) &&
|
|
- likely(cpu_active(dest_cpu)) && migrate_task(p, dest_cpu)) {
|
|
|
|
|
|
+ likely(cpu_active(dest_cpu)) && migrate_task(p, rq)) {
|
|
struct migration_arg arg = { p, dest_cpu };
|
|
struct migration_arg arg = { p, dest_cpu };
|
|
|
|
|
|
task_rq_unlock(rq, &flags);
|
|
task_rq_unlock(rq, &flags);
|
|
@@ -5504,7 +5502,7 @@ again:
|
|
goto out;
|
|
goto out;
|
|
|
|
|
|
dest_cpu = cpumask_any_and(cpu_active_mask, new_mask);
|
|
dest_cpu = cpumask_any_and(cpu_active_mask, new_mask);
|
|
- if (migrate_task(p, dest_cpu)) {
|
|
|
|
|
|
+ if (migrate_task(p, rq)) {
|
|
struct migration_arg arg = { p, dest_cpu };
|
|
struct migration_arg arg = { p, dest_cpu };
|
|
/* Need help from migration thread: drop lock and wait. */
|
|
/* Need help from migration thread: drop lock and wait. */
|
|
task_rq_unlock(rq, &flags);
|
|
task_rq_unlock(rq, &flags);
|