|
@@ -1189,10 +1189,10 @@ static void resched_task(struct task_struct *p)
|
|
|
|
|
|
assert_spin_locked(&task_rq(p)->lock);
|
|
|
|
|
|
- if (unlikely(test_tsk_thread_flag(p, TIF_NEED_RESCHED)))
|
|
|
+ if (test_tsk_need_resched(p))
|
|
|
return;
|
|
|
|
|
|
- set_tsk_thread_flag(p, TIF_NEED_RESCHED);
|
|
|
+ set_tsk_need_resched(p);
|
|
|
|
|
|
cpu = task_cpu(p);
|
|
|
if (cpu == smp_processor_id())
|
|
@@ -1248,7 +1248,7 @@ void wake_up_idle_cpu(int cpu)
|
|
|
* lockless. The worst case is that the other CPU runs the
|
|
|
* idle task through an additional NOOP schedule()
|
|
|
*/
|
|
|
- set_tsk_thread_flag(rq->idle, TIF_NEED_RESCHED);
|
|
|
+ set_tsk_need_resched(rq->idle);
|
|
|
|
|
|
/* NEED_RESCHED must be visible before we test polling */
|
|
|
smp_mb();
|
|
@@ -4740,7 +4740,7 @@ asmlinkage void __sched preempt_schedule(void)
|
|
|
* between schedule and now.
|
|
|
*/
|
|
|
barrier();
|
|
|
- } while (unlikely(test_thread_flag(TIF_NEED_RESCHED)));
|
|
|
+ } while (need_resched());
|
|
|
}
|
|
|
EXPORT_SYMBOL(preempt_schedule);
|
|
|
|
|
@@ -4769,7 +4769,7 @@ asmlinkage void __sched preempt_schedule_irq(void)
|
|
|
* between schedule and now.
|
|
|
*/
|
|
|
barrier();
|
|
|
- } while (unlikely(test_thread_flag(TIF_NEED_RESCHED)));
|
|
|
+ } while (need_resched());
|
|
|
}
|
|
|
|
|
|
#endif /* CONFIG_PREEMPT */
|