|
@@ -178,12 +178,12 @@ try_again_write:
|
|
|
}
|
|
|
|
|
|
/*
|
|
|
- * wait for a lock to be granted
|
|
|
+ * wait for the read lock to be granted
|
|
|
*/
|
|
|
-static struct rw_semaphore __sched *
|
|
|
-rwsem_down_failed_common(struct rw_semaphore *sem,
|
|
|
- enum rwsem_waiter_type type, signed long adjustment)
|
|
|
+struct rw_semaphore __sched *rwsem_down_read_failed(struct rw_semaphore *sem)
|
|
|
{
|
|
|
+ enum rwsem_waiter_type type = RWSEM_WAITING_FOR_READ;
|
|
|
+ signed long adjustment = -RWSEM_ACTIVE_READ_BIAS;
|
|
|
struct rwsem_waiter waiter;
|
|
|
struct task_struct *tsk = current;
|
|
|
signed long count;
|
|
@@ -237,22 +237,64 @@ rwsem_down_failed_common(struct rw_semaphore *sem,
|
|
|
return sem;
|
|
|
}
|
|
|
|
|
|
-/*
|
|
|
- * wait for the read lock to be granted
|
|
|
- */
|
|
|
-struct rw_semaphore __sched *rwsem_down_read_failed(struct rw_semaphore *sem)
|
|
|
-{
|
|
|
- return rwsem_down_failed_common(sem, RWSEM_WAITING_FOR_READ,
|
|
|
- -RWSEM_ACTIVE_READ_BIAS);
|
|
|
-}
|
|
|
-
|
|
|
/*
|
|
|
* wait for the write lock to be granted
|
|
|
*/
|
|
|
struct rw_semaphore __sched *rwsem_down_write_failed(struct rw_semaphore *sem)
|
|
|
{
|
|
|
- return rwsem_down_failed_common(sem, RWSEM_WAITING_FOR_WRITE,
|
|
|
- -RWSEM_ACTIVE_WRITE_BIAS);
|
|
|
+ enum rwsem_waiter_type type = RWSEM_WAITING_FOR_WRITE;
|
|
|
+ signed long adjustment = -RWSEM_ACTIVE_WRITE_BIAS;
|
|
|
+ struct rwsem_waiter waiter;
|
|
|
+ struct task_struct *tsk = current;
|
|
|
+ signed long count;
|
|
|
+
|
|
|
+ /* set up my own style of waitqueue */
|
|
|
+ waiter.task = tsk;
|
|
|
+ waiter.type = type;
|
|
|
+ get_task_struct(tsk);
|
|
|
+
|
|
|
+ raw_spin_lock_irq(&sem->wait_lock);
|
|
|
+ if (list_empty(&sem->wait_list))
|
|
|
+ adjustment += RWSEM_WAITING_BIAS;
|
|
|
+ list_add_tail(&waiter.list, &sem->wait_list);
|
|
|
+
|
|
|
+ /* we're now waiting on the lock, but no longer actively locking */
|
|
|
+ count = rwsem_atomic_update(adjustment, sem);
|
|
|
+
|
|
|
+ /* If there are no active locks, wake the front queued process(es) up.
|
|
|
+ *
|
|
|
+ * Alternatively, if we're called from a failed down_write(), there
|
|
|
+ * were already threads queued before us and there are no active
|
|
|
+ * writers, the lock must be read owned; so we try to wake any read
|
|
|
+ * locks that were queued ahead of us. */
|
|
|
+ if (count == RWSEM_WAITING_BIAS)
|
|
|
+ sem = __rwsem_do_wake(sem, RWSEM_WAKE_NO_ACTIVE);
|
|
|
+ else if (count > RWSEM_WAITING_BIAS &&
|
|
|
+ adjustment == -RWSEM_ACTIVE_WRITE_BIAS)
|
|
|
+ sem = __rwsem_do_wake(sem, RWSEM_WAKE_READ_OWNED);
|
|
|
+
|
|
|
+ raw_spin_unlock_irq(&sem->wait_lock);
|
|
|
+
|
|
|
+ /* wait to be given the lock */
|
|
|
+ while (true) {
|
|
|
+ set_task_state(tsk, TASK_UNINTERRUPTIBLE);
|
|
|
+ if (!waiter.task)
|
|
|
+ break;
|
|
|
+
|
|
|
+ raw_spin_lock_irq(&sem->wait_lock);
|
|
|
+ /* Try to get the writer sem, may steal from the head writer: */
|
|
|
+ if (type == RWSEM_WAITING_FOR_WRITE)
|
|
|
+ if (try_get_writer_sem(sem, &waiter)) {
|
|
|
+ raw_spin_unlock_irq(&sem->wait_lock);
|
|
|
+ return sem;
|
|
|
+ }
|
|
|
+ raw_spin_unlock_irq(&sem->wait_lock);
|
|
|
+ schedule();
|
|
|
+ }
|
|
|
+
|
|
|
+ tsk->state = TASK_RUNNING;
|
|
|
+
|
|
|
+ return sem;
|
|
|
}
|
|
|
|
|
|
/*
|