|
@@ -6822,7 +6822,6 @@ struct sd_data {
|
|
};
|
|
};
|
|
|
|
|
|
struct s_data {
|
|
struct s_data {
|
|
- cpumask_var_t send_covered;
|
|
|
|
struct sched_domain ** __percpu sd;
|
|
struct sched_domain ** __percpu sd;
|
|
struct sd_data sdd[SD_LV_MAX];
|
|
struct sd_data sdd[SD_LV_MAX];
|
|
struct root_domain *rd;
|
|
struct root_domain *rd;
|
|
@@ -6832,7 +6831,6 @@ enum s_alloc {
|
|
sa_rootdomain,
|
|
sa_rootdomain,
|
|
sa_sd,
|
|
sa_sd,
|
|
sa_sd_storage,
|
|
sa_sd_storage,
|
|
- sa_send_covered,
|
|
|
|
sa_none,
|
|
sa_none,
|
|
};
|
|
};
|
|
|
|
|
|
@@ -6853,6 +6851,8 @@ static int get_group(int cpu, struct sd_data *sdd, struct sched_group **sg)
|
|
return cpu;
|
|
return cpu;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+static cpumask_var_t sched_domains_tmpmask; /* sched_domains_mutex */
|
|
|
|
+
|
|
/*
|
|
/*
|
|
* build_sched_groups takes the cpumask we wish to span, and a pointer
|
|
* build_sched_groups takes the cpumask we wish to span, and a pointer
|
|
* to a function which identifies what group(along with sched group) a CPU
|
|
* to a function which identifies what group(along with sched group) a CPU
|
|
@@ -6864,13 +6864,17 @@ static int get_group(int cpu, struct sd_data *sdd, struct sched_group **sg)
|
|
* and ->cpu_power to 0.
|
|
* and ->cpu_power to 0.
|
|
*/
|
|
*/
|
|
static void
|
|
static void
|
|
-build_sched_groups(struct sched_domain *sd, struct cpumask *covered)
|
|
|
|
|
|
+build_sched_groups(struct sched_domain *sd)
|
|
{
|
|
{
|
|
struct sched_group *first = NULL, *last = NULL;
|
|
struct sched_group *first = NULL, *last = NULL;
|
|
struct sd_data *sdd = sd->private;
|
|
struct sd_data *sdd = sd->private;
|
|
const struct cpumask *span = sched_domain_span(sd);
|
|
const struct cpumask *span = sched_domain_span(sd);
|
|
|
|
+ struct cpumask *covered;
|
|
int i;
|
|
int i;
|
|
|
|
|
|
|
|
+ lockdep_assert_held(&sched_domains_mutex);
|
|
|
|
+ covered = sched_domains_tmpmask;
|
|
|
|
+
|
|
cpumask_clear(covered);
|
|
cpumask_clear(covered);
|
|
|
|
|
|
for_each_cpu(i, span) {
|
|
for_each_cpu(i, span) {
|
|
@@ -7015,8 +7019,6 @@ static void __free_domain_allocs(struct s_data *d, enum s_alloc what,
|
|
free_percpu(d->sdd[i].sd);
|
|
free_percpu(d->sdd[i].sd);
|
|
free_percpu(d->sdd[i].sg);
|
|
free_percpu(d->sdd[i].sg);
|
|
} /* fall through */
|
|
} /* fall through */
|
|
- case sa_send_covered:
|
|
|
|
- free_cpumask_var(d->send_covered); /* fall through */
|
|
|
|
case sa_none:
|
|
case sa_none:
|
|
break;
|
|
break;
|
|
}
|
|
}
|
|
@@ -7029,8 +7031,6 @@ static enum s_alloc __visit_domain_allocation_hell(struct s_data *d,
|
|
|
|
|
|
memset(d, 0, sizeof(*d));
|
|
memset(d, 0, sizeof(*d));
|
|
|
|
|
|
- if (!alloc_cpumask_var(&d->send_covered, GFP_KERNEL))
|
|
|
|
- return sa_none;
|
|
|
|
for (i = 0; i < SD_LV_MAX; i++) {
|
|
for (i = 0; i < SD_LV_MAX; i++) {
|
|
d->sdd[i].sd = alloc_percpu(struct sched_domain *);
|
|
d->sdd[i].sd = alloc_percpu(struct sched_domain *);
|
|
if (!d->sdd[i].sd)
|
|
if (!d->sdd[i].sd)
|
|
@@ -7219,7 +7219,7 @@ static int build_sched_domains(const struct cpumask *cpu_map,
|
|
if (i != cpumask_first(sched_domain_span(sd)))
|
|
if (i != cpumask_first(sched_domain_span(sd)))
|
|
continue;
|
|
continue;
|
|
|
|
|
|
- build_sched_groups(sd, d.send_covered);
|
|
|
|
|
|
+ build_sched_groups(sd);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
@@ -7896,6 +7896,7 @@ void __init sched_init(void)
|
|
|
|
|
|
/* Allocate the nohz_cpu_mask if CONFIG_CPUMASK_OFFSTACK */
|
|
/* Allocate the nohz_cpu_mask if CONFIG_CPUMASK_OFFSTACK */
|
|
zalloc_cpumask_var(&nohz_cpu_mask, GFP_NOWAIT);
|
|
zalloc_cpumask_var(&nohz_cpu_mask, GFP_NOWAIT);
|
|
|
|
+ zalloc_cpumask_var(&sched_domains_tmpmask, GFP_NOWAIT);
|
|
#ifdef CONFIG_SMP
|
|
#ifdef CONFIG_SMP
|
|
#ifdef CONFIG_NO_HZ
|
|
#ifdef CONFIG_NO_HZ
|
|
zalloc_cpumask_var(&nohz.idle_cpus_mask, GFP_NOWAIT);
|
|
zalloc_cpumask_var(&nohz.idle_cpus_mask, GFP_NOWAIT);
|