|
@@ -2017,6 +2017,7 @@ void kthread_bind(struct task_struct *p, unsigned int cpu)
|
|
|
}
|
|
|
|
|
|
spin_lock_irqsave(&rq->lock, flags);
|
|
|
+ update_rq_clock(rq);
|
|
|
set_task_cpu(p, cpu);
|
|
|
p->cpus_allowed = cpumask_of_cpu(cpu);
|
|
|
p->rt.nr_cpus_allowed = 1;
|
|
@@ -2115,6 +2116,7 @@ migrate_task(struct task_struct *p, int dest_cpu, struct migration_req *req)
|
|
|
* it is sufficient to simply update the task's cpu field.
|
|
|
*/
|
|
|
if (!p->se.on_rq && !task_running(rq, p)) {
|
|
|
+ update_rq_clock(rq);
|
|
|
set_task_cpu(p, dest_cpu);
|
|
|
return 0;
|
|
|
}
|
|
@@ -2376,14 +2378,15 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state,
|
|
|
task_rq_unlock(rq, &flags);
|
|
|
|
|
|
cpu = p->sched_class->select_task_rq(p, SD_BALANCE_WAKE, wake_flags);
|
|
|
- if (cpu != orig_cpu)
|
|
|
+ if (cpu != orig_cpu) {
|
|
|
+ local_irq_save(flags);
|
|
|
+ rq = cpu_rq(cpu);
|
|
|
+ update_rq_clock(rq);
|
|
|
set_task_cpu(p, cpu);
|
|
|
-
|
|
|
+ local_irq_restore(flags);
|
|
|
+ }
|
|
|
rq = task_rq_lock(p, &flags);
|
|
|
|
|
|
- if (rq != orig_rq)
|
|
|
- update_rq_clock(rq);
|
|
|
-
|
|
|
WARN_ON(p->state != TASK_WAKING);
|
|
|
cpu = task_cpu(p);
|
|
|
|
|
@@ -2545,6 +2548,7 @@ static void __sched_fork(struct task_struct *p)
|
|
|
void sched_fork(struct task_struct *p, int clone_flags)
|
|
|
{
|
|
|
int cpu = get_cpu();
|
|
|
+ unsigned long flags;
|
|
|
|
|
|
__sched_fork(p);
|
|
|
|
|
@@ -2581,7 +2585,10 @@ void sched_fork(struct task_struct *p, int clone_flags)
|
|
|
#ifdef CONFIG_SMP
|
|
|
cpu = p->sched_class->select_task_rq(p, SD_BALANCE_FORK, 0);
|
|
|
#endif
|
|
|
+ local_irq_save(flags);
|
|
|
+ update_rq_clock(cpu_rq(cpu));
|
|
|
set_task_cpu(p, cpu);
|
|
|
+ local_irq_restore(flags);
|
|
|
|
|
|
#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
|
|
|
if (likely(sched_info_on()))
|