|
@@ -5486,7 +5486,7 @@ static inline void sched_init_granularity(void)
|
|
|
* task must not exit() & deallocate itself prematurely. The
|
|
|
* call is not atomic; no spinlocks may be held.
|
|
|
*/
|
|
|
-int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
|
|
|
+int set_cpus_allowed_ptr(struct task_struct *p, const cpumask_t *new_mask)
|
|
|
{
|
|
|
struct migration_req req;
|
|
|
unsigned long flags;
|
|
@@ -5494,23 +5494,23 @@ int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
|
|
|
int ret = 0;
|
|
|
|
|
|
rq = task_rq_lock(p, &flags);
|
|
|
- if (!cpus_intersects(new_mask, cpu_online_map)) {
|
|
|
+ if (!cpus_intersects(*new_mask, cpu_online_map)) {
|
|
|
ret = -EINVAL;
|
|
|
goto out;
|
|
|
}
|
|
|
|
|
|
if (p->sched_class->set_cpus_allowed)
|
|
|
- p->sched_class->set_cpus_allowed(p, &new_mask);
|
|
|
+ p->sched_class->set_cpus_allowed(p, new_mask);
|
|
|
else {
|
|
|
- p->cpus_allowed = new_mask;
|
|
|
- p->rt.nr_cpus_allowed = cpus_weight(new_mask);
|
|
|
+ p->cpus_allowed = *new_mask;
|
|
|
+ p->rt.nr_cpus_allowed = cpus_weight(*new_mask);
|
|
|
}
|
|
|
|
|
|
/* Can the task run on the task's current CPU? If so, we're done */
|
|
|
- if (cpu_isset(task_cpu(p), new_mask))
|
|
|
+ if (cpu_isset(task_cpu(p), *new_mask))
|
|
|
goto out;
|
|
|
|
|
|
- if (migrate_task(p, any_online_cpu(new_mask), &req)) {
|
|
|
+ if (migrate_task(p, any_online_cpu(*new_mask), &req)) {
|
|
|
/* Need help from migration thread: drop lock and wait. */
|
|
|
task_rq_unlock(rq, &flags);
|
|
|
wake_up_process(rq->migration_thread);
|
|
@@ -5523,7 +5523,7 @@ out:
|
|
|
|
|
|
return ret;
|
|
|
}
|
|
|
-EXPORT_SYMBOL_GPL(set_cpus_allowed);
|
|
|
+EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);
|
|
|
|
|
|
/*
|
|
|
* Move (not current) task off this cpu, onto dest cpu. We're doing
|