|
@@ -95,6 +95,52 @@ void __sched mutex_lock(struct mutex *lock)
|
|
|
EXPORT_SYMBOL(mutex_lock);
|
|
|
#endif
|
|
|
|
|
|
+#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
|
|
|
+/*
|
|
|
+ * Mutex spinning code migrated from kernel/sched/core.c
|
|
|
+ */
|
|
|
+
|
|
|
+static inline bool owner_running(struct mutex *lock, struct task_struct *owner)
|
|
|
+{
|
|
|
+ if (lock->owner != owner)
|
|
|
+ return false;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Ensure we emit the owner->on_cpu, dereference _after_ checking
|
|
|
+ * lock->owner still matches owner, if that fails, owner might
|
|
|
+ * point to free()d memory, if it still matches, the rcu_read_lock()
|
|
|
+ * ensures the memory stays valid.
|
|
|
+ */
|
|
|
+ barrier();
|
|
|
+
|
|
|
+ return owner->on_cpu;
|
|
|
+}
|
|
|
+
|
|
|
+/*
|
|
|
+ * Look out! "owner" is an entirely speculative pointer
|
|
|
+ * access and not reliable.
|
|
|
+ */
|
|
|
+static noinline
|
|
|
+int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner)
|
|
|
+{
|
|
|
+ rcu_read_lock();
|
|
|
+ while (owner_running(lock, owner)) {
|
|
|
+ if (need_resched())
|
|
|
+ break;
|
|
|
+
|
|
|
+ arch_mutex_cpu_relax();
|
|
|
+ }
|
|
|
+ rcu_read_unlock();
|
|
|
+
|
|
|
+ /*
|
|
|
+ * We break out the loop above on need_resched() and when the
|
|
|
+ * owner changed, which is a sign for heavy contention. Return
|
|
|
+ * success only when lock->owner is NULL.
|
|
|
+ */
|
|
|
+ return lock->owner == NULL;
|
|
|
+}
|
|
|
+#endif
|
|
|
+
|
|
|
static __used noinline void __sched __mutex_unlock_slowpath(atomic_t *lock_count);
|
|
|
|
|
|
/**
|