|
@@ -487,14 +487,14 @@ struct rt_rq {
|
|
|
*/
|
|
|
struct root_domain {
|
|
|
atomic_t refcount;
|
|
|
- cpumask_t span;
|
|
|
- cpumask_t online;
|
|
|
+ cpumask_var_t span;
|
|
|
+ cpumask_var_t online;
|
|
|
|
|
|
/*
|
|
|
* The "RT overload" flag: it gets set if a CPU has more than
|
|
|
* one runnable RT task.
|
|
|
*/
|
|
|
- cpumask_t rto_mask;
|
|
|
+ cpumask_var_t rto_mask;
|
|
|
atomic_t rto_count;
|
|
|
#ifdef CONFIG_SMP
|
|
|
struct cpupri cpupri;
|
|
@@ -6444,7 +6444,7 @@ static void set_rq_online(struct rq *rq)
|
|
|
if (!rq->online) {
|
|
|
const struct sched_class *class;
|
|
|
|
|
|
- cpu_set(rq->cpu, rq->rd->online);
|
|
|
+ cpumask_set_cpu(rq->cpu, rq->rd->online);
|
|
|
rq->online = 1;
|
|
|
|
|
|
for_each_class(class) {
|
|
@@ -6464,7 +6464,7 @@ static void set_rq_offline(struct rq *rq)
|
|
|
class->rq_offline(rq);
|
|
|
}
|
|
|
|
|
|
- cpu_clear(rq->cpu, rq->rd->online);
|
|
|
+ cpumask_clear_cpu(rq->cpu, rq->rd->online);
|
|
|
rq->online = 0;
|
|
|
}
|
|
|
}
|
|
@@ -6505,7 +6505,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
|
|
|
rq = cpu_rq(cpu);
|
|
|
spin_lock_irqsave(&rq->lock, flags);
|
|
|
if (rq->rd) {
|
|
|
- BUG_ON(!cpu_isset(cpu, rq->rd->span));
|
|
|
+ BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
|
|
|
|
|
|
set_rq_online(rq);
|
|
|
}
|
|
@@ -6567,7 +6567,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
|
|
|
rq = cpu_rq(cpu);
|
|
|
spin_lock_irqsave(&rq->lock, flags);
|
|
|
if (rq->rd) {
|
|
|
- BUG_ON(!cpu_isset(cpu, rq->rd->span));
|
|
|
+ BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
|
|
|
set_rq_offline(rq);
|
|
|
}
|
|
|
spin_unlock_irqrestore(&rq->lock, flags);
|
|
@@ -6768,6 +6768,14 @@ sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent)
|
|
|
return 1;
|
|
|
}
|
|
|
|
|
|
+static void free_rootdomain(struct root_domain *rd)
|
|
|
+{
|
|
|
+ free_cpumask_var(rd->rto_mask);
|
|
|
+ free_cpumask_var(rd->online);
|
|
|
+ free_cpumask_var(rd->span);
|
|
|
+ kfree(rd);
|
|
|
+}
|
|
|
+
|
|
|
static void rq_attach_root(struct rq *rq, struct root_domain *rd)
|
|
|
{
|
|
|
unsigned long flags;
|
|
@@ -6777,38 +6785,60 @@ static void rq_attach_root(struct rq *rq, struct root_domain *rd)
|
|
|
if (rq->rd) {
|
|
|
struct root_domain *old_rd = rq->rd;
|
|
|
|
|
|
- if (cpu_isset(rq->cpu, old_rd->online))
|
|
|
+ if (cpumask_test_cpu(rq->cpu, old_rd->online))
|
|
|
set_rq_offline(rq);
|
|
|
|
|
|
- cpu_clear(rq->cpu, old_rd->span);
|
|
|
+ cpumask_clear_cpu(rq->cpu, old_rd->span);
|
|
|
|
|
|
if (atomic_dec_and_test(&old_rd->refcount))
|
|
|
- kfree(old_rd);
|
|
|
+ free_rootdomain(old_rd);
|
|
|
}
|
|
|
|
|
|
atomic_inc(&rd->refcount);
|
|
|
rq->rd = rd;
|
|
|
|
|
|
- cpu_set(rq->cpu, rd->span);
|
|
|
- if (cpu_isset(rq->cpu, cpu_online_map))
|
|
|
+ cpumask_set_cpu(rq->cpu, rd->span);
|
|
|
+ if (cpumask_test_cpu(rq->cpu, cpu_online_mask))
|
|
|
set_rq_online(rq);
|
|
|
|
|
|
spin_unlock_irqrestore(&rq->lock, flags);
|
|
|
}
|
|
|
|
|
|
-static void init_rootdomain(struct root_domain *rd)
|
|
|
+static int init_rootdomain(struct root_domain *rd, bool bootmem)
|
|
|
{
|
|
|
memset(rd, 0, sizeof(*rd));
|
|
|
|
|
|
- cpus_clear(rd->span);
|
|
|
- cpus_clear(rd->online);
|
|
|
+ if (bootmem) {
|
|
|
+ alloc_bootmem_cpumask_var(&def_root_domain.span);
|
|
|
+ alloc_bootmem_cpumask_var(&def_root_domain.online);
|
|
|
+ alloc_bootmem_cpumask_var(&def_root_domain.rto_mask);
|
|
|
+ cpupri_init(&rd->cpupri);
|
|
|
+ return 0;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (!alloc_cpumask_var(&rd->span, GFP_KERNEL))
|
|
|
+ goto free_rd;
|
|
|
+ if (!alloc_cpumask_var(&rd->online, GFP_KERNEL))
|
|
|
+ goto free_span;
|
|
|
+ if (!alloc_cpumask_var(&rd->rto_mask, GFP_KERNEL))
|
|
|
+ goto free_online;
|
|
|
|
|
|
cpupri_init(&rd->cpupri);
|
|
|
+ return 0;
|
|
|
+
|
|
|
+free_online:
|
|
|
+ free_cpumask_var(rd->online);
|
|
|
+free_span:
|
|
|
+ free_cpumask_var(rd->span);
|
|
|
+free_rd:
|
|
|
+ kfree(rd);
|
|
|
+ return -ENOMEM;
|
|
|
}
|
|
|
|
|
|
static void init_defrootdomain(void)
|
|
|
{
|
|
|
- init_rootdomain(&def_root_domain);
|
|
|
+ init_rootdomain(&def_root_domain, true);
|
|
|
+
|
|
|
atomic_set(&def_root_domain.refcount, 1);
|
|
|
}
|
|
|
|
|
@@ -6820,7 +6850,10 @@ static struct root_domain *alloc_rootdomain(void)
|
|
|
if (!rd)
|
|
|
return NULL;
|
|
|
|
|
|
- init_rootdomain(rd);
|
|
|
+ if (init_rootdomain(rd, false) != 0) {
|
|
|
+ kfree(rd);
|
|
|
+ return NULL;
|
|
|
+ }
|
|
|
|
|
|
return rd;
|
|
|
}
|
|
@@ -7632,7 +7665,7 @@ free_sched_groups:
|
|
|
#ifdef CONFIG_NUMA
|
|
|
error:
|
|
|
free_sched_groups(cpu_map, tmpmask);
|
|
|
- kfree(rd);
|
|
|
+ free_rootdomain(rd);
|
|
|
goto free_tmpmask;
|
|
|
#endif
|
|
|
}
|