Browse Source

sched: optimize vruntime based scheduling

optimize vruntime based scheduling.

Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Mike Galbraith <efault@gmx.de>
Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
Ingo Molnar 17 years ago
parent
commit
6cb5819514
2 changed files with 6 additions and 2 deletions
  1. 3 2
      kernel/sched.c
  2. 3 0
      kernel/sched_fair.c

+ 3 - 2
kernel/sched.c

@@ -732,13 +732,14 @@ calc_delta_fair(unsigned long delta_exec, struct load_weight *lw)
 static inline void update_load_add(struct load_weight *lw, unsigned long inc)
 {
 	lw->weight += inc;
-	lw->inv_weight = WMULT_CONST / lw->weight;
+	if (sched_feat(FAIR_SLEEPERS))
+		lw->inv_weight = WMULT_CONST / lw->weight;
 }
 
 static inline void update_load_sub(struct load_weight *lw, unsigned long dec)
 {
 	lw->weight -= dec;
-	if (likely(lw->weight))
+	if (sched_feat(FAIR_SLEEPERS) && likely(lw->weight))
 		lw->inv_weight = WMULT_CONST / lw->weight;
 }
 

+ 3 - 0
kernel/sched_fair.c

@@ -336,6 +336,9 @@ __update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr,
 	}
 	curr->vruntime += delta_exec_weighted;
 
+	if (!sched_feat(FAIR_SLEEPERS))
+		return;
+
 	if (unlikely(!load))
 		return;