瀏覽代碼

sched: extract walk_tg_tree()

Extract walk_tg_tree() and make it a little more generic so we can use it
in the schedulablity test.

Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Peter Zijlstra 16 年之前
父節點
當前提交
eb755805f2
共有 1 個文件被更改,包括 46 次插入33 次删除
  1. 46 33
      kernel/sched.c

+ 46 - 33
kernel/sched.c

@@ -1387,38 +1387,24 @@ static inline void dec_cpu_load(struct rq *rq, unsigned long load)
 	update_load_sub(&rq->load, load);
 	update_load_sub(&rq->load, load);
 }
 }
 
 
-#ifdef CONFIG_SMP
-static unsigned long source_load(int cpu, int type);
-static unsigned long target_load(int cpu, int type);
-static int task_hot(struct task_struct *p, u64 now, struct sched_domain *sd);
-
-static unsigned long cpu_avg_load_per_task(int cpu)
-{
-	struct rq *rq = cpu_rq(cpu);
-
-	if (rq->nr_running)
-		rq->avg_load_per_task = rq->load.weight / rq->nr_running;
-
-	return rq->avg_load_per_task;
-}
-
-#ifdef CONFIG_FAIR_GROUP_SCHED
-
-typedef void (*tg_visitor)(struct task_group *, int, struct sched_domain *);
+#if (defined(CONFIG_SMP) && defined(CONFIG_FAIR_GROUP_SCHED))
+typedef int (*tg_visitor)(struct task_group *, void *);
 
 
 /*
 /*
  * Iterate the full tree, calling @down when first entering a node and @up when
  * Iterate the full tree, calling @down when first entering a node and @up when
  * leaving it for the final time.
  * leaving it for the final time.
  */
  */
-static void
-walk_tg_tree(tg_visitor down, tg_visitor up, int cpu, struct sched_domain *sd)
+static int walk_tg_tree(tg_visitor down, tg_visitor up, void *data)
 {
 {
 	struct task_group *parent, *child;
 	struct task_group *parent, *child;
+	int ret;
 
 
 	rcu_read_lock();
 	rcu_read_lock();
 	parent = &root_task_group;
 	parent = &root_task_group;
 down:
 down:
-	(*down)(parent, cpu, sd);
+	ret = (*down)(parent, data);
+	if (ret)
+		goto out_unlock;
 	list_for_each_entry_rcu(child, &parent->children, siblings) {
 	list_for_each_entry_rcu(child, &parent->children, siblings) {
 		parent = child;
 		parent = child;
 		goto down;
 		goto down;
@@ -1426,14 +1412,42 @@ down:
 up:
 up:
 		continue;
 		continue;
 	}
 	}
-	(*up)(parent, cpu, sd);
+	ret = (*up)(parent, data);
+	if (ret)
+		goto out_unlock;
 
 
 	child = parent;
 	child = parent;
 	parent = parent->parent;
 	parent = parent->parent;
 	if (parent)
 	if (parent)
 		goto up;
 		goto up;
+out_unlock:
 	rcu_read_unlock();
 	rcu_read_unlock();
+
+	return ret;
+}
+
+static int tg_nop(struct task_group *tg, void *data)
+{
+	return 0;
 }
 }
+#endif
+
+#ifdef CONFIG_SMP
+static unsigned long source_load(int cpu, int type);
+static unsigned long target_load(int cpu, int type);
+static int task_hot(struct task_struct *p, u64 now, struct sched_domain *sd);
+
+static unsigned long cpu_avg_load_per_task(int cpu)
+{
+	struct rq *rq = cpu_rq(cpu);
+
+	if (rq->nr_running)
+		rq->avg_load_per_task = rq->load.weight / rq->nr_running;
+
+	return rq->avg_load_per_task;
+}
+
+#ifdef CONFIG_FAIR_GROUP_SCHED
 
 
 static void __set_se_shares(struct sched_entity *se, unsigned long shares);
 static void __set_se_shares(struct sched_entity *se, unsigned long shares);
 
 
@@ -1493,11 +1507,11 @@ __update_group_shares_cpu(struct task_group *tg, int cpu,
  * This needs to be done in a bottom-up fashion because the rq weight of a
  * This needs to be done in a bottom-up fashion because the rq weight of a
  * parent group depends on the shares of its child groups.
  * parent group depends on the shares of its child groups.
  */
  */
-static void
-tg_shares_up(struct task_group *tg, int cpu, struct sched_domain *sd)
+static int tg_shares_up(struct task_group *tg, void *data)
 {
 {
 	unsigned long rq_weight = 0;
 	unsigned long rq_weight = 0;
 	unsigned long shares = 0;
 	unsigned long shares = 0;
+	struct sched_domain *sd = data;
 	int i;
 	int i;
 
 
 	for_each_cpu_mask(i, sd->span) {
 	for_each_cpu_mask(i, sd->span) {
@@ -1522,6 +1536,8 @@ tg_shares_up(struct task_group *tg, int cpu, struct sched_domain *sd)
 		__update_group_shares_cpu(tg, i, shares, rq_weight);
 		__update_group_shares_cpu(tg, i, shares, rq_weight);
 		spin_unlock_irqrestore(&rq->lock, flags);
 		spin_unlock_irqrestore(&rq->lock, flags);
 	}
 	}
+
+	return 0;
 }
 }
 
 
 /*
 /*
@@ -1529,10 +1545,10 @@ tg_shares_up(struct task_group *tg, int cpu, struct sched_domain *sd)
  * This needs to be done in a top-down fashion because the load of a child
  * This needs to be done in a top-down fashion because the load of a child
  * group is a fraction of its parents load.
  * group is a fraction of its parents load.
  */
  */
-static void
-tg_load_down(struct task_group *tg, int cpu, struct sched_domain *sd)
+static int tg_load_down(struct task_group *tg, void *data)
 {
 {
 	unsigned long load;
 	unsigned long load;
+	long cpu = (long)data;
 
 
 	if (!tg->parent) {
 	if (!tg->parent) {
 		load = cpu_rq(cpu)->load.weight;
 		load = cpu_rq(cpu)->load.weight;
@@ -1543,11 +1559,8 @@ tg_load_down(struct task_group *tg, int cpu, struct sched_domain *sd)
 	}
 	}
 
 
 	tg->cfs_rq[cpu]->h_load = load;
 	tg->cfs_rq[cpu]->h_load = load;
-}
 
 
-static void
-tg_nop(struct task_group *tg, int cpu, struct sched_domain *sd)
-{
+	return 0;
 }
 }
 
 
 static void update_shares(struct sched_domain *sd)
 static void update_shares(struct sched_domain *sd)
@@ -1557,7 +1570,7 @@ static void update_shares(struct sched_domain *sd)
 
 
 	if (elapsed >= (s64)(u64)sysctl_sched_shares_ratelimit) {
 	if (elapsed >= (s64)(u64)sysctl_sched_shares_ratelimit) {
 		sd->last_update = now;
 		sd->last_update = now;
-		walk_tg_tree(tg_nop, tg_shares_up, 0, sd);
+		walk_tg_tree(tg_nop, tg_shares_up, sd);
 	}
 	}
 }
 }
 
 
@@ -1568,9 +1581,9 @@ static void update_shares_locked(struct rq *rq, struct sched_domain *sd)
 	spin_lock(&rq->lock);
 	spin_lock(&rq->lock);
 }
 }
 
 
-static void update_h_load(int cpu)
+static void update_h_load(long cpu)
 {
 {
-	walk_tg_tree(tg_load_down, tg_nop, cpu, NULL);
+	walk_tg_tree(tg_load_down, tg_nop, (void *)cpu);
 }
 }
 
 
 #else
 #else