|
@@ -278,14 +278,12 @@ struct task_group {
|
|
#endif
|
|
#endif
|
|
};
|
|
};
|
|
|
|
|
|
-#define root_task_group init_task_group
|
|
|
|
-
|
|
|
|
/* task_group_lock serializes the addition/removal of task groups */
|
|
/* task_group_lock serializes the addition/removal of task groups */
|
|
static DEFINE_SPINLOCK(task_group_lock);
|
|
static DEFINE_SPINLOCK(task_group_lock);
|
|
|
|
|
|
#ifdef CONFIG_FAIR_GROUP_SCHED
|
|
#ifdef CONFIG_FAIR_GROUP_SCHED
|
|
|
|
|
|
-# define INIT_TASK_GROUP_LOAD NICE_0_LOAD
|
|
|
|
|
|
+# define ROOT_TASK_GROUP_LOAD NICE_0_LOAD
|
|
|
|
|
|
/*
|
|
/*
|
|
* A weight of 0 or 1 can cause arithmetics problems.
|
|
* A weight of 0 or 1 can cause arithmetics problems.
|
|
@@ -298,13 +296,13 @@ static DEFINE_SPINLOCK(task_group_lock);
|
|
#define MIN_SHARES 2
|
|
#define MIN_SHARES 2
|
|
#define MAX_SHARES (1UL << 18)
|
|
#define MAX_SHARES (1UL << 18)
|
|
|
|
|
|
-static int init_task_group_load = INIT_TASK_GROUP_LOAD;
|
|
|
|
|
|
+static int root_task_group_load = ROOT_TASK_GROUP_LOAD;
|
|
#endif
|
|
#endif
|
|
|
|
|
|
/* Default task group.
|
|
/* Default task group.
|
|
* Every task in system belong to this group at bootup.
|
|
* Every task in system belong to this group at bootup.
|
|
*/
|
|
*/
|
|
-struct task_group init_task_group;
|
|
|
|
|
|
+struct task_group root_task_group;
|
|
|
|
|
|
#endif /* CONFIG_CGROUP_SCHED */
|
|
#endif /* CONFIG_CGROUP_SCHED */
|
|
|
|
|
|
@@ -743,7 +741,7 @@ sched_feat_write(struct file *filp, const char __user *ubuf,
|
|
buf[cnt] = 0;
|
|
buf[cnt] = 0;
|
|
cmp = strstrip(buf);
|
|
cmp = strstrip(buf);
|
|
|
|
|
|
- if (strncmp(buf, "NO_", 3) == 0) {
|
|
|
|
|
|
+ if (strncmp(cmp, "NO_", 3) == 0) {
|
|
neg = 1;
|
|
neg = 1;
|
|
cmp += 3;
|
|
cmp += 3;
|
|
}
|
|
}
|
|
@@ -7848,7 +7846,7 @@ static void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
|
|
cfs_rq->tg = tg;
|
|
cfs_rq->tg = tg;
|
|
|
|
|
|
tg->se[cpu] = se;
|
|
tg->se[cpu] = se;
|
|
- /* se could be NULL for init_task_group */
|
|
|
|
|
|
+ /* se could be NULL for root_task_group */
|
|
if (!se)
|
|
if (!se)
|
|
return;
|
|
return;
|
|
|
|
|
|
@@ -7908,18 +7906,18 @@ void __init sched_init(void)
|
|
ptr = (unsigned long)kzalloc(alloc_size, GFP_NOWAIT);
|
|
ptr = (unsigned long)kzalloc(alloc_size, GFP_NOWAIT);
|
|
|
|
|
|
#ifdef CONFIG_FAIR_GROUP_SCHED
|
|
#ifdef CONFIG_FAIR_GROUP_SCHED
|
|
- init_task_group.se = (struct sched_entity **)ptr;
|
|
|
|
|
|
+ root_task_group.se = (struct sched_entity **)ptr;
|
|
ptr += nr_cpu_ids * sizeof(void **);
|
|
ptr += nr_cpu_ids * sizeof(void **);
|
|
|
|
|
|
- init_task_group.cfs_rq = (struct cfs_rq **)ptr;
|
|
|
|
|
|
+ root_task_group.cfs_rq = (struct cfs_rq **)ptr;
|
|
ptr += nr_cpu_ids * sizeof(void **);
|
|
ptr += nr_cpu_ids * sizeof(void **);
|
|
|
|
|
|
#endif /* CONFIG_FAIR_GROUP_SCHED */
|
|
#endif /* CONFIG_FAIR_GROUP_SCHED */
|
|
#ifdef CONFIG_RT_GROUP_SCHED
|
|
#ifdef CONFIG_RT_GROUP_SCHED
|
|
- init_task_group.rt_se = (struct sched_rt_entity **)ptr;
|
|
|
|
|
|
+ root_task_group.rt_se = (struct sched_rt_entity **)ptr;
|
|
ptr += nr_cpu_ids * sizeof(void **);
|
|
ptr += nr_cpu_ids * sizeof(void **);
|
|
|
|
|
|
- init_task_group.rt_rq = (struct rt_rq **)ptr;
|
|
|
|
|
|
+ root_task_group.rt_rq = (struct rt_rq **)ptr;
|
|
ptr += nr_cpu_ids * sizeof(void **);
|
|
ptr += nr_cpu_ids * sizeof(void **);
|
|
|
|
|
|
#endif /* CONFIG_RT_GROUP_SCHED */
|
|
#endif /* CONFIG_RT_GROUP_SCHED */
|
|
@@ -7939,13 +7937,13 @@ void __init sched_init(void)
|
|
global_rt_period(), global_rt_runtime());
|
|
global_rt_period(), global_rt_runtime());
|
|
|
|
|
|
#ifdef CONFIG_RT_GROUP_SCHED
|
|
#ifdef CONFIG_RT_GROUP_SCHED
|
|
- init_rt_bandwidth(&init_task_group.rt_bandwidth,
|
|
|
|
|
|
+ init_rt_bandwidth(&root_task_group.rt_bandwidth,
|
|
global_rt_period(), global_rt_runtime());
|
|
global_rt_period(), global_rt_runtime());
|
|
#endif /* CONFIG_RT_GROUP_SCHED */
|
|
#endif /* CONFIG_RT_GROUP_SCHED */
|
|
|
|
|
|
#ifdef CONFIG_CGROUP_SCHED
|
|
#ifdef CONFIG_CGROUP_SCHED
|
|
- list_add(&init_task_group.list, &task_groups);
|
|
|
|
- INIT_LIST_HEAD(&init_task_group.children);
|
|
|
|
|
|
+ list_add(&root_task_group.list, &task_groups);
|
|
|
|
+ INIT_LIST_HEAD(&root_task_group.children);
|
|
autogroup_init(&init_task);
|
|
autogroup_init(&init_task);
|
|
#endif /* CONFIG_CGROUP_SCHED */
|
|
#endif /* CONFIG_CGROUP_SCHED */
|
|
|
|
|
|
@@ -7960,34 +7958,34 @@ void __init sched_init(void)
|
|
init_cfs_rq(&rq->cfs, rq);
|
|
init_cfs_rq(&rq->cfs, rq);
|
|
init_rt_rq(&rq->rt, rq);
|
|
init_rt_rq(&rq->rt, rq);
|
|
#ifdef CONFIG_FAIR_GROUP_SCHED
|
|
#ifdef CONFIG_FAIR_GROUP_SCHED
|
|
- init_task_group.shares = init_task_group_load;
|
|
|
|
|
|
+ root_task_group.shares = root_task_group_load;
|
|
INIT_LIST_HEAD(&rq->leaf_cfs_rq_list);
|
|
INIT_LIST_HEAD(&rq->leaf_cfs_rq_list);
|
|
/*
|
|
/*
|
|
- * How much cpu bandwidth does init_task_group get?
|
|
|
|
|
|
+ * How much cpu bandwidth does root_task_group get?
|
|
*
|
|
*
|
|
* In case of task-groups formed thr' the cgroup filesystem, it
|
|
* In case of task-groups formed thr' the cgroup filesystem, it
|
|
* gets 100% of the cpu resources in the system. This overall
|
|
* gets 100% of the cpu resources in the system. This overall
|
|
* system cpu resource is divided among the tasks of
|
|
* system cpu resource is divided among the tasks of
|
|
- * init_task_group and its child task-groups in a fair manner,
|
|
|
|
|
|
+ * root_task_group and its child task-groups in a fair manner,
|
|
* based on each entity's (task or task-group's) weight
|
|
* based on each entity's (task or task-group's) weight
|
|
* (se->load.weight).
|
|
* (se->load.weight).
|
|
*
|
|
*
|
|
- * In other words, if init_task_group has 10 tasks of weight
|
|
|
|
|
|
+ * In other words, if root_task_group has 10 tasks of weight
|
|
* 1024) and two child groups A0 and A1 (of weight 1024 each),
|
|
* 1024) and two child groups A0 and A1 (of weight 1024 each),
|
|
* then A0's share of the cpu resource is:
|
|
* then A0's share of the cpu resource is:
|
|
*
|
|
*
|
|
* A0's bandwidth = 1024 / (10*1024 + 1024 + 1024) = 8.33%
|
|
* A0's bandwidth = 1024 / (10*1024 + 1024 + 1024) = 8.33%
|
|
*
|
|
*
|
|
- * We achieve this by letting init_task_group's tasks sit
|
|
|
|
- * directly in rq->cfs (i.e init_task_group->se[] = NULL).
|
|
|
|
|
|
+ * We achieve this by letting root_task_group's tasks sit
|
|
|
|
+ * directly in rq->cfs (i.e root_task_group->se[] = NULL).
|
|
*/
|
|
*/
|
|
- init_tg_cfs_entry(&init_task_group, &rq->cfs, NULL, i, NULL);
|
|
|
|
|
|
+ init_tg_cfs_entry(&root_task_group, &rq->cfs, NULL, i, NULL);
|
|
#endif /* CONFIG_FAIR_GROUP_SCHED */
|
|
#endif /* CONFIG_FAIR_GROUP_SCHED */
|
|
|
|
|
|
rq->rt.rt_runtime = def_rt_bandwidth.rt_runtime;
|
|
rq->rt.rt_runtime = def_rt_bandwidth.rt_runtime;
|
|
#ifdef CONFIG_RT_GROUP_SCHED
|
|
#ifdef CONFIG_RT_GROUP_SCHED
|
|
INIT_LIST_HEAD(&rq->leaf_rt_rq_list);
|
|
INIT_LIST_HEAD(&rq->leaf_rt_rq_list);
|
|
- init_tg_rt_entry(&init_task_group, &rq->rt, NULL, i, NULL);
|
|
|
|
|
|
+ init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, NULL);
|
|
#endif
|
|
#endif
|
|
|
|
|
|
for (j = 0; j < CPU_LOAD_IDX_MAX; j++)
|
|
for (j = 0; j < CPU_LOAD_IDX_MAX; j++)
|
|
@@ -8379,6 +8377,7 @@ static void free_sched_group(struct task_group *tg)
|
|
{
|
|
{
|
|
free_fair_sched_group(tg);
|
|
free_fair_sched_group(tg);
|
|
free_rt_sched_group(tg);
|
|
free_rt_sched_group(tg);
|
|
|
|
+ autogroup_free(tg);
|
|
kfree(tg);
|
|
kfree(tg);
|
|
}
|
|
}
|
|
|
|
|
|
@@ -8812,7 +8811,7 @@ cpu_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cgrp)
|
|
|
|
|
|
if (!cgrp->parent) {
|
|
if (!cgrp->parent) {
|
|
/* This is early initialization for the top cgroup */
|
|
/* This is early initialization for the top cgroup */
|
|
- return &init_task_group.css;
|
|
|
|
|
|
+ return &root_task_group.css;
|
|
}
|
|
}
|
|
|
|
|
|
parent = cgroup_tg(cgrp->parent);
|
|
parent = cgroup_tg(cgrp->parent);
|