|
@@ -4306,11 +4306,8 @@ EXPORT_SYMBOL(schedule);
|
|
|
|
|
|
static inline bool owner_running(struct mutex *lock, struct task_struct *owner)
|
|
|
{
|
|
|
- bool ret = false;
|
|
|
-
|
|
|
- rcu_read_lock();
|
|
|
if (lock->owner != owner)
|
|
|
- goto fail;
|
|
|
+ return false;
|
|
|
|
|
|
/*
|
|
|
* Ensure we emit the owner->on_cpu, dereference _after_ checking
|
|
@@ -4320,11 +4317,7 @@ static inline bool owner_running(struct mutex *lock, struct task_struct *owner)
|
|
|
*/
|
|
|
barrier();
|
|
|
|
|
|
- ret = owner->on_cpu;
|
|
|
-fail:
|
|
|
- rcu_read_unlock();
|
|
|
-
|
|
|
- return ret;
|
|
|
+ return owner->on_cpu;
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -4336,21 +4329,21 @@ int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner)
|
|
|
if (!sched_feat(OWNER_SPIN))
|
|
|
return 0;
|
|
|
|
|
|
+ rcu_read_lock();
|
|
|
while (owner_running(lock, owner)) {
|
|
|
if (need_resched())
|
|
|
- return 0;
|
|
|
+ break;
|
|
|
|
|
|
arch_mutex_cpu_relax();
|
|
|
}
|
|
|
+ rcu_read_unlock();
|
|
|
|
|
|
/*
|
|
|
- * If the owner changed to another task there is likely
|
|
|
- * heavy contention, stop spinning.
|
|
|
+ * We break out the loop above on need_resched() and when the
|
|
|
+ * owner changed, which is a sign for heavy contention. Return
|
|
|
+ * success only when lock->owner is NULL.
|
|
|
*/
|
|
|
- if (lock->owner)
|
|
|
- return 0;
|
|
|
-
|
|
|
- return 1;
|
|
|
+ return lock->owner == NULL;
|
|
|
}
|
|
|
#endif
|
|
|
|