|
@@ -8846,7 +8846,7 @@ static int build_sched_domains(const struct cpumask *cpu_map)
|
|
return __build_sched_domains(cpu_map, NULL);
|
|
return __build_sched_domains(cpu_map, NULL);
|
|
}
|
|
}
|
|
|
|
|
|
-static struct cpumask *doms_cur; /* current sched domains */
|
|
|
|
|
|
+static cpumask_var_t *doms_cur; /* current sched domains */
|
|
static int ndoms_cur; /* number of sched domains in 'doms_cur' */
|
|
static int ndoms_cur; /* number of sched domains in 'doms_cur' */
|
|
static struct sched_domain_attr *dattr_cur;
|
|
static struct sched_domain_attr *dattr_cur;
|
|
/* attribues of custom domains in 'doms_cur' */
|
|
/* attribues of custom domains in 'doms_cur' */
|
|
@@ -8868,6 +8868,31 @@ int __attribute__((weak)) arch_update_cpu_topology(void)
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+cpumask_var_t *alloc_sched_domains(unsigned int ndoms)
|
|
|
|
+{
|
|
|
|
+ int i;
|
|
|
|
+ cpumask_var_t *doms;
|
|
|
|
+
|
|
|
|
+ doms = kmalloc(sizeof(*doms) * ndoms, GFP_KERNEL);
|
|
|
|
+ if (!doms)
|
|
|
|
+ return NULL;
|
|
|
|
+ for (i = 0; i < ndoms; i++) {
|
|
|
|
+ if (!alloc_cpumask_var(&doms[i], GFP_KERNEL)) {
|
|
|
|
+ free_sched_domains(doms, i);
|
|
|
|
+ return NULL;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ return doms;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms)
|
|
|
|
+{
|
|
|
|
+ unsigned int i;
|
|
|
|
+ for (i = 0; i < ndoms; i++)
|
|
|
|
+ free_cpumask_var(doms[i]);
|
|
|
|
+ kfree(doms);
|
|
|
|
+}
|
|
|
|
+
|
|
/*
|
|
/*
|
|
* Set up scheduler domains and groups. Callers must hold the hotplug lock.
|
|
* Set up scheduler domains and groups. Callers must hold the hotplug lock.
|
|
* For now this just excludes isolated cpus, but could be used to
|
|
* For now this just excludes isolated cpus, but could be used to
|
|
@@ -8879,12 +8904,12 @@ static int arch_init_sched_domains(const struct cpumask *cpu_map)
|
|
|
|
|
|
arch_update_cpu_topology();
|
|
arch_update_cpu_topology();
|
|
ndoms_cur = 1;
|
|
ndoms_cur = 1;
|
|
- doms_cur = kmalloc(cpumask_size(), GFP_KERNEL);
|
|
|
|
|
|
+ doms_cur = alloc_sched_domains(ndoms_cur);
|
|
if (!doms_cur)
|
|
if (!doms_cur)
|
|
- doms_cur = fallback_doms;
|
|
|
|
- cpumask_andnot(doms_cur, cpu_map, cpu_isolated_map);
|
|
|
|
|
|
+ doms_cur = &fallback_doms;
|
|
|
|
+ cpumask_andnot(doms_cur[0], cpu_map, cpu_isolated_map);
|
|
dattr_cur = NULL;
|
|
dattr_cur = NULL;
|
|
- err = build_sched_domains(doms_cur);
|
|
|
|
|
|
+ err = build_sched_domains(doms_cur[0]);
|
|
register_sched_domain_sysctl();
|
|
register_sched_domain_sysctl();
|
|
|
|
|
|
return err;
|
|
return err;
|
|
@@ -8934,19 +8959,19 @@ static int dattrs_equal(struct sched_domain_attr *cur, int idx_cur,
|
|
* doms_new[] to the current sched domain partitioning, doms_cur[].
|
|
* doms_new[] to the current sched domain partitioning, doms_cur[].
|
|
* It destroys each deleted domain and builds each new domain.
|
|
* It destroys each deleted domain and builds each new domain.
|
|
*
|
|
*
|
|
- * 'doms_new' is an array of cpumask's of length 'ndoms_new'.
|
|
|
|
|
|
+ * 'doms_new' is an array of cpumask_var_t's of length 'ndoms_new'.
|
|
* The masks don't intersect (don't overlap.) We should setup one
|
|
* The masks don't intersect (don't overlap.) We should setup one
|
|
* sched domain for each mask. CPUs not in any of the cpumasks will
|
|
* sched domain for each mask. CPUs not in any of the cpumasks will
|
|
* not be load balanced. If the same cpumask appears both in the
|
|
* not be load balanced. If the same cpumask appears both in the
|
|
* current 'doms_cur' domains and in the new 'doms_new', we can leave
|
|
* current 'doms_cur' domains and in the new 'doms_new', we can leave
|
|
* it as it is.
|
|
* it as it is.
|
|
*
|
|
*
|
|
- * The passed in 'doms_new' should be kmalloc'd. This routine takes
|
|
|
|
- * ownership of it and will kfree it when done with it. If the caller
|
|
|
|
- * failed the kmalloc call, then it can pass in doms_new == NULL &&
|
|
|
|
- * ndoms_new == 1, and partition_sched_domains() will fallback to
|
|
|
|
- * the single partition 'fallback_doms', it also forces the domains
|
|
|
|
- * to be rebuilt.
|
|
|
|
|
|
+ * The passed in 'doms_new' should be allocated using
|
|
|
|
+ * alloc_sched_domains. This routine takes ownership of it and will
|
|
|
|
+ * free_sched_domains it when done with it. If the caller failed the
|
|
|
|
+ * alloc call, then it can pass in doms_new == NULL && ndoms_new == 1,
|
|
|
|
+ * and partition_sched_domains() will fallback to the single partition
|
|
|
|
+ * 'fallback_doms', it also forces the domains to be rebuilt.
|
|
*
|
|
*
|
|
* If doms_new == NULL it will be replaced with cpu_online_mask.
|
|
* If doms_new == NULL it will be replaced with cpu_online_mask.
|
|
* ndoms_new == 0 is a special case for destroying existing domains,
|
|
* ndoms_new == 0 is a special case for destroying existing domains,
|
|
@@ -8954,8 +8979,7 @@ static int dattrs_equal(struct sched_domain_attr *cur, int idx_cur,
|
|
*
|
|
*
|
|
* Call with hotplug lock held
|
|
* Call with hotplug lock held
|
|
*/
|
|
*/
|
|
-/* FIXME: Change to struct cpumask *doms_new[] */
|
|
|
|
-void partition_sched_domains(int ndoms_new, struct cpumask *doms_new,
|
|
|
|
|
|
+void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
|
|
struct sched_domain_attr *dattr_new)
|
|
struct sched_domain_attr *dattr_new)
|
|
{
|
|
{
|
|
int i, j, n;
|
|
int i, j, n;
|
|
@@ -8974,40 +8998,40 @@ void partition_sched_domains(int ndoms_new, struct cpumask *doms_new,
|
|
/* Destroy deleted domains */
|
|
/* Destroy deleted domains */
|
|
for (i = 0; i < ndoms_cur; i++) {
|
|
for (i = 0; i < ndoms_cur; i++) {
|
|
for (j = 0; j < n && !new_topology; j++) {
|
|
for (j = 0; j < n && !new_topology; j++) {
|
|
- if (cpumask_equal(&doms_cur[i], &doms_new[j])
|
|
|
|
|
|
+ if (cpumask_equal(doms_cur[i], doms_new[j])
|
|
&& dattrs_equal(dattr_cur, i, dattr_new, j))
|
|
&& dattrs_equal(dattr_cur, i, dattr_new, j))
|
|
goto match1;
|
|
goto match1;
|
|
}
|
|
}
|
|
/* no match - a current sched domain not in new doms_new[] */
|
|
/* no match - a current sched domain not in new doms_new[] */
|
|
- detach_destroy_domains(doms_cur + i);
|
|
|
|
|
|
+ detach_destroy_domains(doms_cur[i]);
|
|
match1:
|
|
match1:
|
|
;
|
|
;
|
|
}
|
|
}
|
|
|
|
|
|
if (doms_new == NULL) {
|
|
if (doms_new == NULL) {
|
|
ndoms_cur = 0;
|
|
ndoms_cur = 0;
|
|
- doms_new = fallback_doms;
|
|
|
|
- cpumask_andnot(&doms_new[0], cpu_online_mask, cpu_isolated_map);
|
|
|
|
|
|
+ doms_new = &fallback_doms;
|
|
|
|
+ cpumask_andnot(doms_new[0], cpu_online_mask, cpu_isolated_map);
|
|
WARN_ON_ONCE(dattr_new);
|
|
WARN_ON_ONCE(dattr_new);
|
|
}
|
|
}
|
|
|
|
|
|
/* Build new domains */
|
|
/* Build new domains */
|
|
for (i = 0; i < ndoms_new; i++) {
|
|
for (i = 0; i < ndoms_new; i++) {
|
|
for (j = 0; j < ndoms_cur && !new_topology; j++) {
|
|
for (j = 0; j < ndoms_cur && !new_topology; j++) {
|
|
- if (cpumask_equal(&doms_new[i], &doms_cur[j])
|
|
|
|
|
|
+ if (cpumask_equal(doms_new[i], doms_cur[j])
|
|
&& dattrs_equal(dattr_new, i, dattr_cur, j))
|
|
&& dattrs_equal(dattr_new, i, dattr_cur, j))
|
|
goto match2;
|
|
goto match2;
|
|
}
|
|
}
|
|
/* no match - add a new doms_new */
|
|
/* no match - add a new doms_new */
|
|
- __build_sched_domains(doms_new + i,
|
|
|
|
|
|
+ __build_sched_domains(doms_new[i],
|
|
dattr_new ? dattr_new + i : NULL);
|
|
dattr_new ? dattr_new + i : NULL);
|
|
match2:
|
|
match2:
|
|
;
|
|
;
|
|
}
|
|
}
|
|
|
|
|
|
/* Remember the new sched domains */
|
|
/* Remember the new sched domains */
|
|
- if (doms_cur != fallback_doms)
|
|
|
|
- kfree(doms_cur);
|
|
|
|
|
|
+ if (doms_cur != &fallback_doms)
|
|
|
|
+ free_sched_domains(doms_cur, ndoms_cur);
|
|
kfree(dattr_cur); /* kfree(NULL) is safe */
|
|
kfree(dattr_cur); /* kfree(NULL) is safe */
|
|
doms_cur = doms_new;
|
|
doms_cur = doms_new;
|
|
dattr_cur = dattr_new;
|
|
dattr_cur = dattr_new;
|