|
@@ -274,6 +274,14 @@ struct task_group {
|
|
|
};
|
|
|
|
|
|
#ifdef CONFIG_USER_SCHED
|
|
|
+
|
|
|
+/*
|
|
|
+ * Root task group.
|
|
|
+ * Every UID task group (including init_task_group aka UID-0) will
|
|
|
+ * be a child to this group.
|
|
|
+ */
|
|
|
+struct task_group root_task_group;
|
|
|
+
|
|
|
#ifdef CONFIG_FAIR_GROUP_SCHED
|
|
|
/* Default task group's sched entity on each cpu */
|
|
|
static DEFINE_PER_CPU(struct sched_entity, init_sched_entity);
|
|
@@ -285,6 +293,8 @@ static DEFINE_PER_CPU(struct cfs_rq, init_cfs_rq) ____cacheline_aligned_in_smp;
|
|
|
static DEFINE_PER_CPU(struct sched_rt_entity, init_sched_rt_entity);
|
|
|
static DEFINE_PER_CPU(struct rt_rq, init_rt_rq) ____cacheline_aligned_in_smp;
|
|
|
#endif
|
|
|
+#else
|
|
|
+#define root_task_group init_task_group
|
|
|
#endif
|
|
|
|
|
|
/* task_group_lock serializes add/remove of task groups and also changes to
|
|
@@ -7507,6 +7517,9 @@ void __init sched_init(void)
|
|
|
#endif
|
|
|
#ifdef CONFIG_RT_GROUP_SCHED
|
|
|
alloc_size += 2 * nr_cpu_ids * sizeof(void **);
|
|
|
+#endif
|
|
|
+#ifdef CONFIG_USER_SCHED
|
|
|
+ alloc_size *= 2;
|
|
|
#endif
|
|
|
/*
|
|
|
* As sched_init() is called before page_alloc is setup,
|
|
@@ -7521,12 +7534,29 @@ void __init sched_init(void)
|
|
|
|
|
|
init_task_group.cfs_rq = (struct cfs_rq **)ptr;
|
|
|
ptr += nr_cpu_ids * sizeof(void **);
|
|
|
+
|
|
|
+#ifdef CONFIG_USER_SCHED
|
|
|
+ root_task_group.se = (struct sched_entity **)ptr;
|
|
|
+ ptr += nr_cpu_ids * sizeof(void **);
|
|
|
+
|
|
|
+ root_task_group.cfs_rq = (struct cfs_rq **)ptr;
|
|
|
+ ptr += nr_cpu_ids * sizeof(void **);
|
|
|
+#endif
|
|
|
#endif
|
|
|
#ifdef CONFIG_RT_GROUP_SCHED
|
|
|
init_task_group.rt_se = (struct sched_rt_entity **)ptr;
|
|
|
ptr += nr_cpu_ids * sizeof(void **);
|
|
|
|
|
|
init_task_group.rt_rq = (struct rt_rq **)ptr;
|
|
|
+ ptr += nr_cpu_ids * sizeof(void **);
|
|
|
+
|
|
|
+#ifdef CONFIG_USER_SCHED
|
|
|
+ root_task_group.rt_se = (struct sched_rt_entity **)ptr;
|
|
|
+ ptr += nr_cpu_ids * sizeof(void **);
|
|
|
+
|
|
|
+ root_task_group.rt_rq = (struct rt_rq **)ptr;
|
|
|
+ ptr += nr_cpu_ids * sizeof(void **);
|
|
|
+#endif
|
|
|
#endif
|
|
|
}
|
|
|
|
|
@@ -7540,6 +7570,10 @@ void __init sched_init(void)
|
|
|
#ifdef CONFIG_RT_GROUP_SCHED
|
|
|
init_rt_bandwidth(&init_task_group.rt_bandwidth,
|
|
|
global_rt_period(), global_rt_runtime());
|
|
|
+#ifdef CONFIG_USER_SCHED
|
|
|
+ init_rt_bandwidth(&root_task_group.rt_bandwidth,
|
|
|
+ global_rt_period(), RUNTIME_INF);
|
|
|
+#endif
|
|
|
#endif
|
|
|
|
|
|
#ifdef CONFIG_GROUP_SCHED
|
|
@@ -7582,6 +7616,8 @@ void __init sched_init(void)
|
|
|
*/
|
|
|
init_tg_cfs_entry(&init_task_group, &rq->cfs, NULL, i, 1, NULL);
|
|
|
#elif defined CONFIG_USER_SCHED
|
|
|
+ root_task_group.shares = NICE_0_LOAD;
|
|
|
+ init_tg_cfs_entry(&root_task_group, &rq->cfs, NULL, i, 0, NULL);
|
|
|
/*
|
|
|
* In case of task-groups formed thr' the user id of tasks,
|
|
|
* init_task_group represents tasks belonging to root user.
|
|
@@ -7595,7 +7631,8 @@ void __init sched_init(void)
|
|
|
*/
|
|
|
init_tg_cfs_entry(&init_task_group,
|
|
|
&per_cpu(init_cfs_rq, i),
|
|
|
- &per_cpu(init_sched_entity, i), i, 1, NULL);
|
|
|
+ &per_cpu(init_sched_entity, i), i, 1,
|
|
|
+ root_task_group.se[i]);
|
|
|
|
|
|
#endif
|
|
|
#endif /* CONFIG_FAIR_GROUP_SCHED */
|
|
@@ -7606,9 +7643,11 @@ void __init sched_init(void)
|
|
|
#ifdef CONFIG_CGROUP_SCHED
|
|
|
init_tg_rt_entry(&init_task_group, &rq->rt, NULL, i, 1, NULL);
|
|
|
#elif defined CONFIG_USER_SCHED
|
|
|
+ init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, 0, NULL);
|
|
|
init_tg_rt_entry(&init_task_group,
|
|
|
&per_cpu(init_rt_rq, i),
|
|
|
- &per_cpu(init_sched_rt_entity, i), i, 1, NULL);
|
|
|
+ &per_cpu(init_sched_rt_entity, i), i, 1,
|
|
|
+ root_task_group.rt_se[i]);
|
|
|
#endif
|
|
|
#endif
|
|
|
|