|
@@ -170,12 +170,6 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
|
|
|
for (;;) {
|
|
|
struct thread_info *owner;
|
|
|
|
|
|
- /*
|
|
|
- * If there are pending waiters, join them.
|
|
|
- */
|
|
|
- if (!list_empty(&lock->wait_list))
|
|
|
- break;
|
|
|
-
|
|
|
/*
|
|
|
* If there's an owner, wait for it to either
|
|
|
* release the lock or go to sleep.
|
|
@@ -184,6 +178,13 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
|
|
|
if (owner && !mutex_spin_on_owner(lock, owner))
|
|
|
break;
|
|
|
|
|
|
+ if (atomic_cmpxchg(&lock->count, 1, 0) == 1) {
|
|
|
+ lock_acquired(&lock->dep_map, ip);
|
|
|
+ mutex_set_owner(lock);
|
|
|
+ preempt_enable();
|
|
|
+ return 0;
|
|
|
+ }
|
|
|
+
|
|
|
/*
|
|
|
* When there's no owner, we might have preempted between the
|
|
|
* owner acquiring the lock and setting the owner field. If
|
|
@@ -193,13 +194,6 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
|
|
|
if (!owner && (need_resched() || rt_task(task)))
|
|
|
break;
|
|
|
|
|
|
- if (atomic_cmpxchg(&lock->count, 1, 0) == 1) {
|
|
|
- lock_acquired(&lock->dep_map, ip);
|
|
|
- mutex_set_owner(lock);
|
|
|
- preempt_enable();
|
|
|
- return 0;
|
|
|
- }
|
|
|
-
|
|
|
/*
|
|
|
* The cpu_relax() call is a compiler barrier which forces
|
|
|
* everything in this loop to be re-loaded. We don't need
|