|
@@ -914,8 +914,8 @@ static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
|
|
|
#endif /* __ARCH_WANT_UNLOCKED_CTXSW */
|
|
|
|
|
|
/*
|
|
|
- * Check whether the task is waking, we use this to synchronize against
|
|
|
- * ttwu() so that task_cpu() reports a stable number.
|
|
|
+ * Check whether the task is waking, we use this to synchronize ->cpus_allowed
|
|
|
+ * against ttwu().
|
|
|
*/
|
|
|
static inline int task_is_waking(struct task_struct *p)
|
|
|
{
|
|
@@ -932,11 +932,9 @@ static inline struct rq *__task_rq_lock(struct task_struct *p)
|
|
|
struct rq *rq;
|
|
|
|
|
|
for (;;) {
|
|
|
- while (task_is_waking(p))
|
|
|
- cpu_relax();
|
|
|
rq = task_rq(p);
|
|
|
raw_spin_lock(&rq->lock);
|
|
|
- if (likely(rq == task_rq(p) && !task_is_waking(p)))
|
|
|
+ if (likely(rq == task_rq(p)))
|
|
|
return rq;
|
|
|
raw_spin_unlock(&rq->lock);
|
|
|
}
|
|
@@ -953,12 +951,10 @@ static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags)
|
|
|
struct rq *rq;
|
|
|
|
|
|
for (;;) {
|
|
|
- while (task_is_waking(p))
|
|
|
- cpu_relax();
|
|
|
local_irq_save(*flags);
|
|
|
rq = task_rq(p);
|
|
|
raw_spin_lock(&rq->lock);
|
|
|
- if (likely(rq == task_rq(p) && !task_is_waking(p)))
|
|
|
+ if (likely(rq == task_rq(p)))
|
|
|
return rq;
|
|
|
raw_spin_unlock_irqrestore(&rq->lock, *flags);
|
|
|
}
|
|
@@ -5262,7 +5258,18 @@ int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
|
|
|
struct rq *rq;
|
|
|
int ret = 0;
|
|
|
|
|
|
+ /*
|
|
|
+ * Serialize against TASK_WAKING so that ttwu() and wunt() can
|
|
|
+ * drop the rq->lock and still rely on ->cpus_allowed.
|
|
|
+ */
|
|
|
+again:
|
|
|
+ while (task_is_waking(p))
|
|
|
+ cpu_relax();
|
|
|
rq = task_rq_lock(p, &flags);
|
|
|
+ if (task_is_waking(p)) {
|
|
|
+ task_rq_unlock(rq, &flags);
|
|
|
+ goto again;
|
|
|
+ }
|
|
|
|
|
|
if (!cpumask_intersects(new_mask, cpu_active_mask)) {
|
|
|
ret = -EINVAL;
|