|
@@ -4173,70 +4173,53 @@ need_resched:
|
|
|
EXPORT_SYMBOL(schedule);
|
|
|
|
|
|
#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
|
|
|
-/*
|
|
|
- * Look out! "owner" is an entirely speculative pointer
|
|
|
- * access and not reliable.
|
|
|
- */
|
|
|
-int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner)
|
|
|
-{
|
|
|
- unsigned int cpu;
|
|
|
- struct rq *rq;
|
|
|
|
|
|
- if (!sched_feat(OWNER_SPIN))
|
|
|
- return 0;
|
|
|
+static inline bool owner_running(struct mutex *lock, struct task_struct *owner)
|
|
|
+{
|
|
|
+ bool ret = false;
|
|
|
|
|
|
-#ifdef CONFIG_DEBUG_PAGEALLOC
|
|
|
- /*
|
|
|
- * Need to access the cpu field knowing that
|
|
|
- * DEBUG_PAGEALLOC could have unmapped it if
|
|
|
- * the mutex owner just released it and exited.
|
|
|
- */
|
|
|
- if (probe_kernel_address(&owner->cpu, cpu))
|
|
|
- return 0;
|
|
|
-#else
|
|
|
- cpu = owner->cpu;
|
|
|
-#endif
|
|
|
+ rcu_read_lock();
|
|
|
+ if (lock->owner != owner)
|
|
|
+ goto fail;
|
|
|
|
|
|
/*
|
|
|
- * Even if the access succeeded (likely case),
|
|
|
- * the cpu field may no longer be valid.
|
|
|
+ * Ensure we emit the owner->on_cpu, dereference _after_ checking
|
|
|
+ * lock->owner still matches owner, if that fails, owner might
|
|
|
+ * point to free()d memory, if it still matches, the rcu_read_lock()
|
|
|
+ * ensures the memory stays valid.
|
|
|
*/
|
|
|
- if (cpu >= nr_cpumask_bits)
|
|
|
- return 0;
|
|
|
+ barrier();
|
|
|
|
|
|
- /*
|
|
|
- * We need to validate that we can do a
|
|
|
- * get_cpu() and that we have the percpu area.
|
|
|
- */
|
|
|
- if (!cpu_online(cpu))
|
|
|
- return 0;
|
|
|
+ ret = owner->on_cpu;
|
|
|
+fail:
|
|
|
+ rcu_read_unlock();
|
|
|
|
|
|
- rq = cpu_rq(cpu);
|
|
|
+ return ret;
|
|
|
+}
|
|
|
|
|
|
- for (;;) {
|
|
|
- /*
|
|
|
- * Owner changed, break to re-assess state.
|
|
|
- */
|
|
|
- if (lock->owner != owner) {
|
|
|
- /*
|
|
|
- * If the lock has switched to a different owner,
|
|
|
- * we likely have heavy contention. Return 0 to quit
|
|
|
- * optimistic spinning and not contend further:
|
|
|
- */
|
|
|
- if (lock->owner)
|
|
|
- return 0;
|
|
|
- break;
|
|
|
- }
|
|
|
+/*
|
|
|
+ * Look out! "owner" is an entirely speculative pointer
|
|
|
+ * access and not reliable.
|
|
|
+ */
|
|
|
+int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner)
|
|
|
+{
|
|
|
+ if (!sched_feat(OWNER_SPIN))
|
|
|
+ return 0;
|
|
|
|
|
|
- /*
|
|
|
- * Is that owner really running on that cpu?
|
|
|
- */
|
|
|
- if (task_thread_info(rq->curr) != owner || need_resched())
|
|
|
+ while (owner_running(lock, owner)) {
|
|
|
+ if (need_resched())
|
|
|
return 0;
|
|
|
|
|
|
arch_mutex_cpu_relax();
|
|
|
}
|
|
|
|
|
|
+ /*
|
|
|
+ * If the owner changed to another task there is likely
|
|
|
+ * heavy contention, stop spinning.
|
|
|
+ */
|
|
|
+ if (lock->owner)
|
|
|
+ return 0;
|
|
|
+
|
|
|
return 1;
|
|
|
}
|
|
|
#endif
|