|
@@ -6516,22 +6516,18 @@ long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
|
|
|
int retval;
|
|
|
|
|
|
get_online_cpus();
|
|
|
- read_lock(&tasklist_lock);
|
|
|
+ rcu_read_lock();
|
|
|
|
|
|
p = find_process_by_pid(pid);
|
|
|
if (!p) {
|
|
|
- read_unlock(&tasklist_lock);
|
|
|
+ rcu_read_unlock();
|
|
|
put_online_cpus();
|
|
|
return -ESRCH;
|
|
|
}
|
|
|
|
|
|
- /*
|
|
|
- * It is not safe to call set_cpus_allowed with the
|
|
|
- * tasklist_lock held. We will bump the task_struct's
|
|
|
- * usage count and then drop tasklist_lock.
|
|
|
- */
|
|
|
+ /* Prevent p going away */
|
|
|
get_task_struct(p);
|
|
|
- read_unlock(&tasklist_lock);
|
|
|
+ rcu_read_unlock();
|
|
|
|
|
|
if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) {
|
|
|
retval = -ENOMEM;
|
|
@@ -6617,7 +6613,7 @@ long sched_getaffinity(pid_t pid, struct cpumask *mask)
|
|
|
int retval;
|
|
|
|
|
|
get_online_cpus();
|
|
|
- read_lock(&tasklist_lock);
|
|
|
+ rcu_read_lock();
|
|
|
|
|
|
retval = -ESRCH;
|
|
|
p = find_process_by_pid(pid);
|
|
@@ -6633,7 +6629,7 @@ long sched_getaffinity(pid_t pid, struct cpumask *mask)
|
|
|
task_rq_unlock(rq, &flags);
|
|
|
|
|
|
out_unlock:
|
|
|
- read_unlock(&tasklist_lock);
|
|
|
+ rcu_read_unlock();
|
|
|
put_online_cpus();
|
|
|
|
|
|
return retval;
|