Bladeren bron

sched: simplify SCHED_FEAT_* code

Peter Zijlstra suggested to simplify SCHED_FEAT_* checks via the
sched_feat(x) macro.

No code changed:

   text    data     bss     dec     hex filename
   38895    3550      24   42469    a5e5 sched.o.before
   38895    3550      24   42469    a5e5 sched.o.after

Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Mike Galbraith <efault@gmx.de>
Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
Peter Zijlstra 17 jaren geleden
bovenliggende
commit
e59c80c5bb
1 gewijzigde bestanden met toevoegingen van 7 en 5 verwijderingen
  1. 7 5
      kernel/sched_fair.c

+ 7 - 5
kernel/sched_fair.c

@@ -105,6 +105,8 @@ const_debug unsigned int sysctl_sched_features =
 		SCHED_FEAT_START_DEBIT		*1 |
 		SCHED_FEAT_START_DEBIT		*1 |
 		SCHED_FEAT_SKIP_INITIAL		*0;
 		SCHED_FEAT_SKIP_INITIAL		*0;
 
 
+#define sched_feat(x) (sysctl_sched_features & SCHED_FEAT_##x)
+
 extern struct sched_class fair_sched_class;
 extern struct sched_class fair_sched_class;
 
 
 /**************************************************************
 /**************************************************************
@@ -541,14 +543,14 @@ static void __enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se,
 	if (unlikely(cfs_rq->sleeper_bonus > sysctl_sched_runtime_limit))
 	if (unlikely(cfs_rq->sleeper_bonus > sysctl_sched_runtime_limit))
 		return;
 		return;
 
 
-	if (sysctl_sched_features & SCHED_FEAT_SLEEPER_LOAD_AVG)
+	if (sched_feat(SLEEPER_LOAD_AVG))
 		load = rq_of(cfs_rq)->cpu_load[2];
 		load = rq_of(cfs_rq)->cpu_load[2];
 
 
 	/*
 	/*
 	 * Fix up delta_fair with the effect of us running
 	 * Fix up delta_fair with the effect of us running
 	 * during the whole sleep period:
 	 * during the whole sleep period:
 	 */
 	 */
-	if (sysctl_sched_features & SCHED_FEAT_SLEEPER_AVG)
+	if (sched_feat(SLEEPER_AVG))
 		delta_fair = div64_likely32((u64)delta_fair * load,
 		delta_fair = div64_likely32((u64)delta_fair * load,
 						load + se->load.weight);
 						load + se->load.weight);
 
 
@@ -572,7 +574,7 @@ static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
 	unsigned long delta_fair;
 	unsigned long delta_fair;
 
 
 	if ((entity_is_task(se) && tsk->policy == SCHED_BATCH) ||
 	if ((entity_is_task(se) && tsk->policy == SCHED_BATCH) ||
-			 !(sysctl_sched_features & SCHED_FEAT_FAIR_SLEEPERS))
+			 !sched_feat(FAIR_SLEEPERS))
 		return;
 		return;
 
 
 	delta_fair = (unsigned long)min((u64)(2*sysctl_sched_runtime_limit),
 	delta_fair = (unsigned long)min((u64)(2*sysctl_sched_runtime_limit),
@@ -1158,14 +1160,14 @@ static void task_new_fair(struct rq *rq, struct task_struct *p)
 	 * The first wait is dominated by the child-runs-first logic,
 	 * The first wait is dominated by the child-runs-first logic,
 	 * so do not credit it with that waiting time yet:
 	 * so do not credit it with that waiting time yet:
 	 */
 	 */
-	if (sysctl_sched_features & SCHED_FEAT_SKIP_INITIAL)
+	if (sched_feat(SKIP_INITIAL))
 		se->wait_start_fair = 0;
 		se->wait_start_fair = 0;
 
 
 	/*
 	/*
 	 * The statistical average of wait_runtime is about
 	 * The statistical average of wait_runtime is about
 	 * -granularity/2, so initialize the task with that:
 	 * -granularity/2, so initialize the task with that:
 	 */
 	 */
-	if (sysctl_sched_features & SCHED_FEAT_START_DEBIT)
+	if (sched_feat(START_DEBIT))
 		se->wait_runtime = -(sched_granularity(cfs_rq) / 2);
 		se->wait_runtime = -(sched_granularity(cfs_rq) / 2);
 
 
 	__enqueue_entity(cfs_rq, se);
 	__enqueue_entity(cfs_rq, se);