Переглянути джерело

Merge branch 'unlikely/sched' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-2.6-trace into sched/urgent

Ingo Molnar 14 роки тому
батько
коміт
e197f094b7
2 змінених файлів з 4 додано та 4 видалено
  1. 2 2
      kernel/sched.c
  2. 2 2
      kernel/sched_rt.c

+ 2 - 2
kernel/sched.c

@@ -124,7 +124,7 @@
 
 
 static inline int rt_policy(int policy)
 static inline int rt_policy(int policy)
 {
 {
-	if (unlikely(policy == SCHED_FIFO || policy == SCHED_RR))
+	if (policy == SCHED_FIFO || policy == SCHED_RR)
 		return 1;
 		return 1;
 	return 0;
 	return 0;
 }
 }
@@ -2486,7 +2486,7 @@ ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags)
 	if (p->sched_class->task_woken)
 	if (p->sched_class->task_woken)
 		p->sched_class->task_woken(rq, p);
 		p->sched_class->task_woken(rq, p);
 
 
-	if (unlikely(rq->idle_stamp)) {
+	if (rq->idle_stamp) {
 		u64 delta = rq->clock - rq->idle_stamp;
 		u64 delta = rq->clock - rq->idle_stamp;
 		u64 max = 2*sysctl_sched_migration_cost;
 		u64 max = 2*sysctl_sched_migration_cost;
 
 

+ 2 - 2
kernel/sched_rt.c

@@ -1126,7 +1126,7 @@ static struct task_struct *_pick_next_task_rt(struct rq *rq)
 
 
 	rt_rq = &rq->rt;
 	rt_rq = &rq->rt;
 
 
-	if (unlikely(!rt_rq->rt_nr_running))
+	if (!rt_rq->rt_nr_running)
 		return NULL;
 		return NULL;
 
 
 	if (rt_rq_throttled(rt_rq))
 	if (rt_rq_throttled(rt_rq))
@@ -1544,7 +1544,7 @@ skip:
 static void pre_schedule_rt(struct rq *rq, struct task_struct *prev)
 static void pre_schedule_rt(struct rq *rq, struct task_struct *prev)
 {
 {
 	/* Try to pull RT tasks here if we lower this rq's prio */
 	/* Try to pull RT tasks here if we lower this rq's prio */
-	if (unlikely(rt_task(prev)) && rq->rt.highest_prio.curr > prev->prio)
+	if (rq->rt.highest_prio.curr > prev->prio)
 		pull_rt_task(rq);
 		pull_rt_task(rq);
 }
 }