|
@@ -10,6 +10,11 @@
|
|
|
* Many thanks to Arjan van de Ven, Thomas Gleixner, Steven Rostedt and
|
|
|
* David Howells for suggestions and improvements.
|
|
|
*
|
|
|
+ * - Adaptive spinning for mutexes by Peter Zijlstra. (Ported to mainline
|
|
|
+ * from the -rt tree, where it was originally implemented for rtmutexes
|
|
|
+ * by Steven Rostedt, based on work by Gregory Haskins, Peter Morreale
|
|
|
+ * and Sven Dietrich.
|
|
|
+ *
|
|
|
* Also see Documentation/mutex-design.txt.
|
|
|
*/
|
|
|
#include <linux/mutex.h>
|
|
@@ -46,6 +51,7 @@ __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
|
|
|
atomic_set(&lock->count, 1);
|
|
|
spin_lock_init(&lock->wait_lock);
|
|
|
INIT_LIST_HEAD(&lock->wait_list);
|
|
|
+ mutex_clear_owner(lock);
|
|
|
|
|
|
debug_mutex_init(lock, name, key);
|
|
|
}
|
|
@@ -91,6 +97,7 @@ void inline __sched mutex_lock(struct mutex *lock)
|
|
|
* 'unlocked' into 'locked' state.
|
|
|
*/
|
|
|
__mutex_fastpath_lock(&lock->count, __mutex_lock_slowpath);
|
|
|
+ mutex_set_owner(lock);
|
|
|
}
|
|
|
|
|
|
EXPORT_SYMBOL(mutex_lock);
|
|
@@ -115,6 +122,14 @@ void __sched mutex_unlock(struct mutex *lock)
|
|
|
* The unlocking fastpath is the 0->1 transition from 'locked'
|
|
|
* into 'unlocked' state:
|
|
|
*/
|
|
|
+#ifndef CONFIG_DEBUG_MUTEXES
|
|
|
+ /*
|
|
|
+ * When debugging is enabled we must not clear the owner before time,
|
|
|
+ * the slow path will always be taken, and that clears the owner field
|
|
|
+ * after verifying that it was indeed current.
|
|
|
+ */
|
|
|
+ mutex_clear_owner(lock);
|
|
|
+#endif
|
|
|
__mutex_fastpath_unlock(&lock->count, __mutex_unlock_slowpath);
|
|
|
}
|
|
|
|
|
@@ -132,10 +147,71 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
|
|
|
unsigned long flags;
|
|
|
|
|
|
preempt_disable();
|
|
|
+ mutex_acquire(&lock->dep_map, subclass, 0, ip);
|
|
|
+#if defined(CONFIG_SMP) && !defined(CONFIG_DEBUG_MUTEXES)
|
|
|
+ /*
|
|
|
+ * Optimistic spinning.
|
|
|
+ *
|
|
|
+ * We try to spin for acquisition when we find that there are no
|
|
|
+ * pending waiters and the lock owner is currently running on a
|
|
|
+ * (different) CPU.
|
|
|
+ *
|
|
|
+ * The rationale is that if the lock owner is running, it is likely to
|
|
|
+ * release the lock soon.
|
|
|
+ *
|
|
|
+ * Since this needs the lock owner, and this mutex implementation
|
|
|
+ * doesn't track the owner atomically in the lock field, we need to
|
|
|
+ * track it non-atomically.
|
|
|
+ *
|
|
|
+ * We can't do this for DEBUG_MUTEXES because that relies on wait_lock
|
|
|
+ * to serialize everything.
|
|
|
+ */
|
|
|
+
|
|
|
+ for (;;) {
|
|
|
+ struct thread_info *owner;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * If there are pending waiters, join them.
|
|
|
+ */
|
|
|
+ if (!list_empty(&lock->wait_list))
|
|
|
+ break;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * If there's an owner, wait for it to either
|
|
|
+ * release the lock or go to sleep.
|
|
|
+ */
|
|
|
+ owner = ACCESS_ONCE(lock->owner);
|
|
|
+ if (owner && !mutex_spin_on_owner(lock, owner))
|
|
|
+ break;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * When there's no owner, we might have preempted between the
|
|
|
+ * owner acquiring the lock and setting the owner field. If
|
|
|
+ * we're an RT task that will live-lock because we won't let
|
|
|
+ * the owner complete.
|
|
|
+ */
|
|
|
+ if (!owner && (need_resched() || rt_task(task)))
|
|
|
+ break;
|
|
|
+
|
|
|
+ if (atomic_cmpxchg(&lock->count, 1, 0) == 1) {
|
|
|
+ lock_acquired(&lock->dep_map, ip);
|
|
|
+ mutex_set_owner(lock);
|
|
|
+ preempt_enable();
|
|
|
+ return 0;
|
|
|
+ }
|
|
|
+
|
|
|
+ /*
|
|
|
+ * The cpu_relax() call is a compiler barrier which forces
|
|
|
+ * everything in this loop to be re-loaded. We don't need
|
|
|
+ * memory barriers as we'll eventually observe the right
|
|
|
+ * values at the cost of a few extra spins.
|
|
|
+ */
|
|
|
+ cpu_relax();
|
|
|
+ }
|
|
|
+#endif
|
|
|
spin_lock_mutex(&lock->wait_lock, flags);
|
|
|
|
|
|
debug_mutex_lock_common(lock, &waiter);
|
|
|
- mutex_acquire(&lock->dep_map, subclass, 0, ip);
|
|
|
debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
|
|
|
|
|
|
/* add waiting tasks to the end of the waitqueue (FIFO): */
|
|
@@ -185,8 +261,8 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
|
|
|
done:
|
|
|
lock_acquired(&lock->dep_map, ip);
|
|
|
/* got the lock - rejoice! */
|
|
|
- mutex_remove_waiter(lock, &waiter, task_thread_info(task));
|
|
|
- debug_mutex_set_owner(lock, task_thread_info(task));
|
|
|
+ mutex_remove_waiter(lock, &waiter, current_thread_info());
|
|
|
+ mutex_set_owner(lock);
|
|
|
|
|
|
/* set it to 0 if there are no waiters left: */
|
|
|
if (likely(list_empty(&lock->wait_list)))
|
|
@@ -222,7 +298,8 @@ int __sched
|
|
|
mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass)
|
|
|
{
|
|
|
might_sleep();
|
|
|
- return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, subclass, _RET_IP_);
|
|
|
+ return __mutex_lock_common(lock, TASK_INTERRUPTIBLE,
|
|
|
+ subclass, _RET_IP_);
|
|
|
}
|
|
|
|
|
|
EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested);
|
|
@@ -260,8 +337,6 @@ __mutex_unlock_common_slowpath(atomic_t *lock_count, int nested)
|
|
|
wake_up_process(waiter->task);
|
|
|
}
|
|
|
|
|
|
- debug_mutex_clear_owner(lock);
|
|
|
-
|
|
|
spin_unlock_mutex(&lock->wait_lock, flags);
|
|
|
}
|
|
|
|
|
@@ -298,18 +373,30 @@ __mutex_lock_interruptible_slowpath(atomic_t *lock_count);
|
|
|
*/
|
|
|
int __sched mutex_lock_interruptible(struct mutex *lock)
|
|
|
{
|
|
|
+ int ret;
|
|
|
+
|
|
|
might_sleep();
|
|
|
- return __mutex_fastpath_lock_retval
|
|
|
+ ret = __mutex_fastpath_lock_retval
|
|
|
(&lock->count, __mutex_lock_interruptible_slowpath);
|
|
|
+ if (!ret)
|
|
|
+ mutex_set_owner(lock);
|
|
|
+
|
|
|
+ return ret;
|
|
|
}
|
|
|
|
|
|
EXPORT_SYMBOL(mutex_lock_interruptible);
|
|
|
|
|
|
int __sched mutex_lock_killable(struct mutex *lock)
|
|
|
{
|
|
|
+ int ret;
|
|
|
+
|
|
|
might_sleep();
|
|
|
- return __mutex_fastpath_lock_retval
|
|
|
+ ret = __mutex_fastpath_lock_retval
|
|
|
(&lock->count, __mutex_lock_killable_slowpath);
|
|
|
+ if (!ret)
|
|
|
+ mutex_set_owner(lock);
|
|
|
+
|
|
|
+ return ret;
|
|
|
}
|
|
|
EXPORT_SYMBOL(mutex_lock_killable);
|
|
|
|
|
@@ -352,9 +439,10 @@ static inline int __mutex_trylock_slowpath(atomic_t *lock_count)
|
|
|
|
|
|
prev = atomic_xchg(&lock->count, -1);
|
|
|
if (likely(prev == 1)) {
|
|
|
- debug_mutex_set_owner(lock, current_thread_info());
|
|
|
+ mutex_set_owner(lock);
|
|
|
mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
|
|
|
}
|
|
|
+
|
|
|
/* Set it back to 0 if there are no waiters: */
|
|
|
if (likely(list_empty(&lock->wait_list)))
|
|
|
atomic_set(&lock->count, 0);
|
|
@@ -380,8 +468,13 @@ static inline int __mutex_trylock_slowpath(atomic_t *lock_count)
|
|
|
*/
|
|
|
int __sched mutex_trylock(struct mutex *lock)
|
|
|
{
|
|
|
- return __mutex_fastpath_trylock(&lock->count,
|
|
|
- __mutex_trylock_slowpath);
|
|
|
+ int ret;
|
|
|
+
|
|
|
+ ret = __mutex_fastpath_trylock(&lock->count, __mutex_trylock_slowpath);
|
|
|
+ if (ret)
|
|
|
+ mutex_set_owner(lock);
|
|
|
+
|
|
|
+ return ret;
|
|
|
}
|
|
|
|
|
|
EXPORT_SYMBOL(mutex_trylock);
|