浏览代码

sched: fixlet for group load balance

We should not only correct the increment for the initial group, but should
be consistent and do so for all the groups we encounter.

Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Peter Zijlstra 16 年之前
父节点
当前提交
940959e939
共有 1 个文件被更改,包括 14 次插入13 次删除
  1. 14 13
      kernel/sched_fair.c

+ 14 - 13
kernel/sched_fair.c

@@ -1027,7 +1027,6 @@ static long effective_load(struct task_group *tg, int cpu,
 		long wl, long wg)
 		long wl, long wg)
 {
 {
 	struct sched_entity *se = tg->se[cpu];
 	struct sched_entity *se = tg->se[cpu];
-	long more_w;
 
 
 	if (!tg->parent)
 	if (!tg->parent)
 		return wl;
 		return wl;
@@ -1039,18 +1038,17 @@ static long effective_load(struct task_group *tg, int cpu,
 	if (!wl && sched_feat(ASYM_EFF_LOAD))
 	if (!wl && sched_feat(ASYM_EFF_LOAD))
 		return wl;
 		return wl;
 
 
-	/*
-	 * Instead of using this increment, also add the difference
-	 * between when the shares were last updated and now.
-	 */
-	more_w = se->my_q->load.weight - se->my_q->rq_weight;
-	wl += more_w;
-	wg += more_w;
-
 	for_each_sched_entity(se) {
 	for_each_sched_entity(se) {
-#define D(n) (likely(n) ? (n) : 1)
-
 		long S, rw, s, a, b;
 		long S, rw, s, a, b;
+		long more_w;
+
+		/*
+		 * Instead of using this increment, also add the difference
+		 * between when the shares were last updated and now.
+		 */
+		more_w = se->my_q->load.weight - se->my_q->rq_weight;
+		wl += more_w;
+		wg += more_w;
 
 
 		S = se->my_q->tg->shares;
 		S = se->my_q->tg->shares;
 		s = se->my_q->shares;
 		s = se->my_q->shares;
@@ -1059,7 +1057,11 @@ static long effective_load(struct task_group *tg, int cpu,
 		a = S*(rw + wl);
 		a = S*(rw + wl);
 		b = S*rw + s*wg;
 		b = S*rw + s*wg;
 
 
-		wl = s*(a-b)/D(b);
+		wl = s*(a-b);
+
+		if (likely(b))
+			wl /= b;
+
 		/*
 		/*
 		 * Assume the group is already running and will
 		 * Assume the group is already running and will
 		 * thus already be accounted for in the weight.
 		 * thus already be accounted for in the weight.
@@ -1068,7 +1070,6 @@ static long effective_load(struct task_group *tg, int cpu,
 		 * alter the group weight.
 		 * alter the group weight.
 		 */
 		 */
 		wg = 0;
 		wg = 0;
-#undef D
 	}
 	}
 
 
 	return wl;
 	return wl;