|
@@ -8443,7 +8443,6 @@ int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
|
|
|
{
|
|
|
struct cfs_rq *cfs_rq;
|
|
|
struct sched_entity *se;
|
|
|
- struct rq *rq;
|
|
|
int i;
|
|
|
|
|
|
tg->cfs_rq = kzalloc(sizeof(cfs_rq) * nr_cpu_ids, GFP_KERNEL);
|
|
@@ -8456,8 +8455,6 @@ int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
|
|
|
tg->shares = NICE_0_LOAD;
|
|
|
|
|
|
for_each_possible_cpu(i) {
|
|
|
- rq = cpu_rq(i);
|
|
|
-
|
|
|
cfs_rq = kzalloc_node(sizeof(struct cfs_rq),
|
|
|
GFP_KERNEL, cpu_to_node(i));
|
|
|
if (!cfs_rq)
|