|
@@ -136,7 +136,7 @@ static inline void sg_inc_cpu_power(struct sched_group *sg, u32 val)
|
|
|
|
|
|
static inline int rt_policy(int policy)
|
|
static inline int rt_policy(int policy)
|
|
{
|
|
{
|
|
- if (unlikely(policy == SCHED_FIFO) || unlikely(policy == SCHED_RR))
|
|
|
|
|
|
+ if (unlikely(policy == SCHED_FIFO || policy == SCHED_RR))
|
|
return 1;
|
|
return 1;
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
@@ -4433,7 +4433,7 @@ static inline void schedule_debug(struct task_struct *prev)
|
|
* schedule() atomically, we ignore that path for now.
|
|
* schedule() atomically, we ignore that path for now.
|
|
* Otherwise, whine if we are scheduling when we should not be.
|
|
* Otherwise, whine if we are scheduling when we should not be.
|
|
*/
|
|
*/
|
|
- if (unlikely(in_atomic_preempt_off()) && unlikely(!prev->exit_state))
|
|
|
|
|
|
+ if (unlikely(in_atomic_preempt_off() && !prev->exit_state))
|
|
__schedule_bug(prev);
|
|
__schedule_bug(prev);
|
|
|
|
|
|
profile_hit(SCHED_PROFILING, __builtin_return_address(0));
|
|
profile_hit(SCHED_PROFILING, __builtin_return_address(0));
|