|
@@ -1758,10 +1758,6 @@ static void pull_task(struct rq *src_rq, struct task_struct *p,
|
|
set_task_cpu(p, this_cpu);
|
|
set_task_cpu(p, this_cpu);
|
|
activate_task(this_rq, p, 0);
|
|
activate_task(this_rq, p, 0);
|
|
check_preempt_curr(this_rq, p, 0);
|
|
check_preempt_curr(this_rq, p, 0);
|
|
-
|
|
|
|
- /* re-arm NEWIDLE balancing when moving tasks */
|
|
|
|
- src_rq->avg_idle = this_rq->avg_idle = 2*sysctl_sched_migration_cost;
|
|
|
|
- this_rq->idle_stamp = 0;
|
|
|
|
}
|
|
}
|
|
|
|
|
|
/*
|
|
/*
|
|
@@ -3219,8 +3215,10 @@ static void idle_balance(int this_cpu, struct rq *this_rq)
|
|
interval = msecs_to_jiffies(sd->balance_interval);
|
|
interval = msecs_to_jiffies(sd->balance_interval);
|
|
if (time_after(next_balance, sd->last_balance + interval))
|
|
if (time_after(next_balance, sd->last_balance + interval))
|
|
next_balance = sd->last_balance + interval;
|
|
next_balance = sd->last_balance + interval;
|
|
- if (pulled_task)
|
|
|
|
|
|
+ if (pulled_task) {
|
|
|
|
+ this_rq->idle_stamp = 0;
|
|
break;
|
|
break;
|
|
|
|
+ }
|
|
}
|
|
}
|
|
|
|
|
|
raw_spin_lock(&this_rq->lock);
|
|
raw_spin_lock(&this_rq->lock);
|