|
@@ -463,7 +463,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
|
|
|
* performed the optimistic spinning cannot be done.
|
|
|
*/
|
|
|
if (ACCESS_ONCE(ww->ctx))
|
|
|
- break;
|
|
|
+ goto slowpath;
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -474,7 +474,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
|
|
|
owner = ACCESS_ONCE(lock->owner);
|
|
|
if (owner && !mutex_spin_on_owner(lock, owner)) {
|
|
|
mspin_unlock(MLOCK(lock), &node);
|
|
|
- break;
|
|
|
+ goto slowpath;
|
|
|
}
|
|
|
|
|
|
if ((atomic_read(&lock->count) == 1) &&
|
|
@@ -489,8 +489,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
|
|
|
|
|
|
mutex_set_owner(lock);
|
|
|
mspin_unlock(MLOCK(lock), &node);
|
|
|
- preempt_enable();
|
|
|
- return 0;
|
|
|
+ goto done;
|
|
|
}
|
|
|
mspin_unlock(MLOCK(lock), &node);
|
|
|
|
|
@@ -501,7 +500,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
|
|
|
* the owner complete.
|
|
|
*/
|
|
|
if (!owner && (need_resched() || rt_task(task)))
|
|
|
- break;
|
|
|
+ goto slowpath;
|
|
|
|
|
|
/*
|
|
|
* The cpu_relax() call is a compiler barrier which forces
|
|
@@ -515,6 +514,10 @@ slowpath:
|
|
|
#endif
|
|
|
spin_lock_mutex(&lock->wait_lock, flags);
|
|
|
|
|
|
+ /* once more, can we acquire the lock? */
|
|
|
+ if (MUTEX_SHOW_NO_WAITER(lock) && (atomic_xchg(&lock->count, 0) == 1))
|
|
|
+ goto skip_wait;
|
|
|
+
|
|
|
debug_mutex_lock_common(lock, &waiter);
|
|
|
debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
|
|
|
|
|
@@ -522,9 +525,6 @@ slowpath:
|
|
|
list_add_tail(&waiter.list, &lock->wait_list);
|
|
|
waiter.task = task;
|
|
|
|
|
|
- if (MUTEX_SHOW_NO_WAITER(lock) && (atomic_xchg(&lock->count, -1) == 1))
|
|
|
- goto done;
|
|
|
-
|
|
|
lock_contended(&lock->dep_map, ip);
|
|
|
|
|
|
for (;;) {
|
|
@@ -538,7 +538,7 @@ slowpath:
|
|
|
* other waiters:
|
|
|
*/
|
|
|
if (MUTEX_SHOW_NO_WAITER(lock) &&
|
|
|
- (atomic_xchg(&lock->count, -1) == 1))
|
|
|
+ (atomic_xchg(&lock->count, -1) == 1))
|
|
|
break;
|
|
|
|
|
|
/*
|
|
@@ -563,24 +563,25 @@ slowpath:
|
|
|
schedule_preempt_disabled();
|
|
|
spin_lock_mutex(&lock->wait_lock, flags);
|
|
|
}
|
|
|
+ mutex_remove_waiter(lock, &waiter, current_thread_info());
|
|
|
+ /* set it to 0 if there are no waiters left: */
|
|
|
+ if (likely(list_empty(&lock->wait_list)))
|
|
|
+ atomic_set(&lock->count, 0);
|
|
|
+ debug_mutex_free_waiter(&waiter);
|
|
|
|
|
|
-done:
|
|
|
+skip_wait:
|
|
|
+ /* got the lock - cleanup and rejoice! */
|
|
|
lock_acquired(&lock->dep_map, ip);
|
|
|
- /* got the lock - rejoice! */
|
|
|
- mutex_remove_waiter(lock, &waiter, current_thread_info());
|
|
|
mutex_set_owner(lock);
|
|
|
|
|
|
if (!__builtin_constant_p(ww_ctx == NULL)) {
|
|
|
- struct ww_mutex *ww = container_of(lock,
|
|
|
- struct ww_mutex,
|
|
|
- base);
|
|
|
+ struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
|
|
|
struct mutex_waiter *cur;
|
|
|
|
|
|
/*
|
|
|
* This branch gets optimized out for the common case,
|
|
|
* and is only important for ww_mutex_lock.
|
|
|
*/
|
|
|
-
|
|
|
ww_mutex_lock_acquired(ww, ww_ctx);
|
|
|
ww->ctx = ww_ctx;
|
|
|
|
|
@@ -594,15 +595,9 @@ done:
|
|
|
}
|
|
|
}
|
|
|
|
|
|
- /* set it to 0 if there are no waiters left: */
|
|
|
- if (likely(list_empty(&lock->wait_list)))
|
|
|
- atomic_set(&lock->count, 0);
|
|
|
-
|
|
|
spin_unlock_mutex(&lock->wait_lock, flags);
|
|
|
-
|
|
|
- debug_mutex_free_waiter(&waiter);
|
|
|
+done:
|
|
|
preempt_enable();
|
|
|
-
|
|
|
return 0;
|
|
|
|
|
|
err:
|