|
@@ -216,14 +216,15 @@ struct rw_semaphore __sched *rwsem_down_write_failed(struct rw_semaphore *sem)
|
|
|
/* wait until we successfully acquire the lock */
|
|
|
set_task_state(tsk, TASK_UNINTERRUPTIBLE);
|
|
|
while (true) {
|
|
|
-
|
|
|
- /* Try acquiring the write lock. */
|
|
|
- count = RWSEM_ACTIVE_WRITE_BIAS;
|
|
|
- if (!list_is_singular(&sem->wait_list))
|
|
|
- count += RWSEM_WAITING_BIAS;
|
|
|
- if (cmpxchg(&sem->count, RWSEM_WAITING_BIAS, count) ==
|
|
|
+ if (!(count & RWSEM_ACTIVE_MASK)) {
|
|
|
+ /* Try acquiring the write lock. */
|
|
|
+ count = RWSEM_ACTIVE_WRITE_BIAS;
|
|
|
+ if (!list_is_singular(&sem->wait_list))
|
|
|
+ count += RWSEM_WAITING_BIAS;
|
|
|
+ if (cmpxchg(&sem->count, RWSEM_WAITING_BIAS, count) ==
|
|
|
RWSEM_WAITING_BIAS)
|
|
|
- break;
|
|
|
+ break;
|
|
|
+ }
|
|
|
|
|
|
raw_spin_unlock_irq(&sem->wait_lock);
|
|
|
|
|
@@ -231,7 +232,7 @@ struct rw_semaphore __sched *rwsem_down_write_failed(struct rw_semaphore *sem)
|
|
|
do {
|
|
|
schedule();
|
|
|
set_task_state(tsk, TASK_UNINTERRUPTIBLE);
|
|
|
- } while (sem->count & RWSEM_ACTIVE_MASK);
|
|
|
+ } while ((count = sem->count) & RWSEM_ACTIVE_MASK);
|
|
|
|
|
|
raw_spin_lock_irq(&sem->wait_lock);
|
|
|
}
|