|
@@ -1342,19 +1342,22 @@ static int cpuset_can_attach(struct cgroup_subsys *ss,
|
|
|
struct cgroup *cont, struct task_struct *tsk)
|
|
|
{
|
|
|
struct cpuset *cs = cgroup_cs(cont);
|
|
|
- int ret = 0;
|
|
|
|
|
|
if (cpumask_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed))
|
|
|
return -ENOSPC;
|
|
|
|
|
|
- if (tsk->flags & PF_THREAD_BOUND) {
|
|
|
- mutex_lock(&callback_mutex);
|
|
|
- if (!cpumask_equal(&tsk->cpus_allowed, cs->cpus_allowed))
|
|
|
- ret = -EINVAL;
|
|
|
- mutex_unlock(&callback_mutex);
|
|
|
- }
|
|
|
+ /*
|
|
|
+ * Kthreads bound to specific cpus cannot be moved to a new cpuset; we
|
|
|
+ * cannot change their cpu affinity and isolating such threads by their
|
|
|
+ * set of allowed nodes is unnecessary. Thus, cpusets are not
|
|
|
+ * applicable for such threads. This prevents checking for success of
|
|
|
+ * set_cpus_allowed_ptr() on all attached tasks before cpus_allowed may
|
|
|
+ * be changed.
|
|
|
+ */
|
|
|
+ if (tsk->flags & PF_THREAD_BOUND)
|
|
|
+ return -EINVAL;
|
|
|
|
|
|
- return ret < 0 ? ret : security_task_setscheduler(tsk, 0, NULL);
|
|
|
+ return security_task_setscheduler(tsk, 0, NULL);
|
|
|
}
|
|
|
|
|
|
static void cpuset_attach(struct cgroup_subsys *ss,
|