|
@@ -754,61 +754,6 @@ static int validate_change(const struct cpuset *cur, const struct cpuset *trial)
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
-/*
|
|
|
- * For a given cpuset cur, partition the system as follows
|
|
|
- * a. All cpus in the parent cpuset's cpus_allowed that are not part of any
|
|
|
- * exclusive child cpusets
|
|
|
- * b. All cpus in the current cpuset's cpus_allowed that are not part of any
|
|
|
- * exclusive child cpusets
|
|
|
- * Build these two partitions by calling partition_sched_domains
|
|
|
- *
|
|
|
- * Call with manage_mutex held. May nest a call to the
|
|
|
- * lock_cpu_hotplug()/unlock_cpu_hotplug() pair.
|
|
|
- * Must not be called holding callback_mutex, because we must
|
|
|
- * not call lock_cpu_hotplug() while holding callback_mutex.
|
|
|
- */
|
|
|
-
|
|
|
-static void update_cpu_domains(struct cpuset *cur)
|
|
|
-{
|
|
|
- struct cpuset *c, *par = cur->parent;
|
|
|
- cpumask_t pspan, cspan;
|
|
|
-
|
|
|
- if (par == NULL || cpus_empty(cur->cpus_allowed))
|
|
|
- return;
|
|
|
-
|
|
|
- /*
|
|
|
- * Get all cpus from parent's cpus_allowed not part of exclusive
|
|
|
- * children
|
|
|
- */
|
|
|
- pspan = par->cpus_allowed;
|
|
|
- list_for_each_entry(c, &par->children, sibling) {
|
|
|
- if (is_cpu_exclusive(c))
|
|
|
- cpus_andnot(pspan, pspan, c->cpus_allowed);
|
|
|
- }
|
|
|
- if (!is_cpu_exclusive(cur)) {
|
|
|
- cpus_or(pspan, pspan, cur->cpus_allowed);
|
|
|
- if (cpus_equal(pspan, cur->cpus_allowed))
|
|
|
- return;
|
|
|
- cspan = CPU_MASK_NONE;
|
|
|
- } else {
|
|
|
- if (cpus_empty(pspan))
|
|
|
- return;
|
|
|
- cspan = cur->cpus_allowed;
|
|
|
- /*
|
|
|
- * Get all cpus from current cpuset's cpus_allowed not part
|
|
|
- * of exclusive children
|
|
|
- */
|
|
|
- list_for_each_entry(c, &cur->children, sibling) {
|
|
|
- if (is_cpu_exclusive(c))
|
|
|
- cpus_andnot(cspan, cspan, c->cpus_allowed);
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
- lock_cpu_hotplug();
|
|
|
- partition_sched_domains(&pspan, &cspan);
|
|
|
- unlock_cpu_hotplug();
|
|
|
-}
|
|
|
-
|
|
|
/*
|
|
|
* Call with manage_mutex held. May take callback_mutex during call.
|
|
|
*/
|
|
@@ -816,7 +761,7 @@ static void update_cpu_domains(struct cpuset *cur)
|
|
|
static int update_cpumask(struct cpuset *cs, char *buf)
|
|
|
{
|
|
|
struct cpuset trialcs;
|
|
|
- int retval, cpus_unchanged;
|
|
|
+ int retval;
|
|
|
|
|
|
/* top_cpuset.cpus_allowed tracks cpu_online_map; it's read-only */
|
|
|
if (cs == &top_cpuset)
|
|
@@ -843,12 +788,9 @@ static int update_cpumask(struct cpuset *cs, char *buf)
|
|
|
retval = validate_change(cs, &trialcs);
|
|
|
if (retval < 0)
|
|
|
return retval;
|
|
|
- cpus_unchanged = cpus_equal(cs->cpus_allowed, trialcs.cpus_allowed);
|
|
|
mutex_lock(&callback_mutex);
|
|
|
cs->cpus_allowed = trialcs.cpus_allowed;
|
|
|
mutex_unlock(&callback_mutex);
|
|
|
- if (is_cpu_exclusive(cs) && !cpus_unchanged)
|
|
|
- update_cpu_domains(cs);
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
@@ -1085,7 +1027,7 @@ static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs, char *buf)
|
|
|
{
|
|
|
int turning_on;
|
|
|
struct cpuset trialcs;
|
|
|
- int err, cpu_exclusive_changed;
|
|
|
+ int err;
|
|
|
|
|
|
turning_on = (simple_strtoul(buf, NULL, 10) != 0);
|
|
|
|
|
@@ -1098,14 +1040,10 @@ static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs, char *buf)
|
|
|
err = validate_change(cs, &trialcs);
|
|
|
if (err < 0)
|
|
|
return err;
|
|
|
- cpu_exclusive_changed =
|
|
|
- (is_cpu_exclusive(cs) != is_cpu_exclusive(&trialcs));
|
|
|
mutex_lock(&callback_mutex);
|
|
|
cs->flags = trialcs.flags;
|
|
|
mutex_unlock(&callback_mutex);
|
|
|
|
|
|
- if (cpu_exclusive_changed)
|
|
|
- update_cpu_domains(cs);
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
@@ -1965,17 +1903,6 @@ static int cpuset_mkdir(struct inode *dir, struct dentry *dentry, int mode)
|
|
|
return cpuset_create(c_parent, dentry->d_name.name, mode | S_IFDIR);
|
|
|
}
|
|
|
|
|
|
-/*
|
|
|
- * Locking note on the strange update_flag() call below:
|
|
|
- *
|
|
|
- * If the cpuset being removed is marked cpu_exclusive, then simulate
|
|
|
- * turning cpu_exclusive off, which will call update_cpu_domains().
|
|
|
- * The lock_cpu_hotplug() call in update_cpu_domains() must not be
|
|
|
- * made while holding callback_mutex. Elsewhere the kernel nests
|
|
|
- * callback_mutex inside lock_cpu_hotplug() calls. So the reverse
|
|
|
- * nesting would risk an ABBA deadlock.
|
|
|
- */
|
|
|
-
|
|
|
static int cpuset_rmdir(struct inode *unused_dir, struct dentry *dentry)
|
|
|
{
|
|
|
struct cpuset *cs = dentry->d_fsdata;
|
|
@@ -1995,13 +1922,6 @@ static int cpuset_rmdir(struct inode *unused_dir, struct dentry *dentry)
|
|
|
mutex_unlock(&manage_mutex);
|
|
|
return -EBUSY;
|
|
|
}
|
|
|
- if (is_cpu_exclusive(cs)) {
|
|
|
- int retval = update_flag(CS_CPU_EXCLUSIVE, cs, "0");
|
|
|
- if (retval < 0) {
|
|
|
- mutex_unlock(&manage_mutex);
|
|
|
- return retval;
|
|
|
- }
|
|
|
- }
|
|
|
parent = cs->parent;
|
|
|
mutex_lock(&callback_mutex);
|
|
|
set_bit(CS_REMOVED, &cs->flags);
|