|
@@ -252,23 +252,37 @@ static void rpc_set_active(struct rpc_task *task)
|
|
|
|
|
|
/*
|
|
/*
|
|
* Mark an RPC call as having completed by clearing the 'active' bit
|
|
* Mark an RPC call as having completed by clearing the 'active' bit
|
|
|
|
+ * and then waking up all tasks that were sleeping.
|
|
*/
|
|
*/
|
|
-static void rpc_mark_complete_task(struct rpc_task *task)
|
|
|
|
|
|
+static int rpc_complete_task(struct rpc_task *task)
|
|
{
|
|
{
|
|
- smp_mb__before_clear_bit();
|
|
|
|
|
|
+ void *m = &task->tk_runstate;
|
|
|
|
+ wait_queue_head_t *wq = bit_waitqueue(m, RPC_TASK_ACTIVE);
|
|
|
|
+ struct wait_bit_key k = __WAIT_BIT_KEY_INITIALIZER(m, RPC_TASK_ACTIVE);
|
|
|
|
+ unsigned long flags;
|
|
|
|
+ int ret;
|
|
|
|
+
|
|
|
|
+ spin_lock_irqsave(&wq->lock, flags);
|
|
clear_bit(RPC_TASK_ACTIVE, &task->tk_runstate);
|
|
clear_bit(RPC_TASK_ACTIVE, &task->tk_runstate);
|
|
- smp_mb__after_clear_bit();
|
|
|
|
- wake_up_bit(&task->tk_runstate, RPC_TASK_ACTIVE);
|
|
|
|
|
|
+ ret = atomic_dec_and_test(&task->tk_count);
|
|
|
|
+ if (waitqueue_active(wq))
|
|
|
|
+ __wake_up_locked_key(wq, TASK_NORMAL, &k);
|
|
|
|
+ spin_unlock_irqrestore(&wq->lock, flags);
|
|
|
|
+ return ret;
|
|
}
|
|
}
|
|
|
|
|
|
/*
|
|
/*
|
|
* Allow callers to wait for completion of an RPC call
|
|
* Allow callers to wait for completion of an RPC call
|
|
|
|
+ *
|
|
|
|
+ * Note the use of out_of_line_wait_on_bit() rather than wait_on_bit()
|
|
|
|
+ * to enforce taking of the wq->lock and hence avoid races with
|
|
|
|
+ * rpc_complete_task().
|
|
*/
|
|
*/
|
|
int __rpc_wait_for_completion_task(struct rpc_task *task, int (*action)(void *))
|
|
int __rpc_wait_for_completion_task(struct rpc_task *task, int (*action)(void *))
|
|
{
|
|
{
|
|
if (action == NULL)
|
|
if (action == NULL)
|
|
action = rpc_wait_bit_killable;
|
|
action = rpc_wait_bit_killable;
|
|
- return wait_on_bit(&task->tk_runstate, RPC_TASK_ACTIVE,
|
|
|
|
|
|
+ return out_of_line_wait_on_bit(&task->tk_runstate, RPC_TASK_ACTIVE,
|
|
action, TASK_KILLABLE);
|
|
action, TASK_KILLABLE);
|
|
}
|
|
}
|
|
EXPORT_SYMBOL_GPL(__rpc_wait_for_completion_task);
|
|
EXPORT_SYMBOL_GPL(__rpc_wait_for_completion_task);
|
|
@@ -857,34 +871,67 @@ static void rpc_async_release(struct work_struct *work)
|
|
rpc_free_task(container_of(work, struct rpc_task, u.tk_work));
|
|
rpc_free_task(container_of(work, struct rpc_task, u.tk_work));
|
|
}
|
|
}
|
|
|
|
|
|
-void rpc_put_task(struct rpc_task *task)
|
|
|
|
|
|
+static void rpc_release_resources_task(struct rpc_task *task)
|
|
{
|
|
{
|
|
- if (!atomic_dec_and_test(&task->tk_count))
|
|
|
|
- return;
|
|
|
|
- /* Release resources */
|
|
|
|
if (task->tk_rqstp)
|
|
if (task->tk_rqstp)
|
|
xprt_release(task);
|
|
xprt_release(task);
|
|
if (task->tk_msg.rpc_cred)
|
|
if (task->tk_msg.rpc_cred)
|
|
put_rpccred(task->tk_msg.rpc_cred);
|
|
put_rpccred(task->tk_msg.rpc_cred);
|
|
rpc_task_release_client(task);
|
|
rpc_task_release_client(task);
|
|
- if (task->tk_workqueue != NULL) {
|
|
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void rpc_final_put_task(struct rpc_task *task,
|
|
|
|
+ struct workqueue_struct *q)
|
|
|
|
+{
|
|
|
|
+ if (q != NULL) {
|
|
INIT_WORK(&task->u.tk_work, rpc_async_release);
|
|
INIT_WORK(&task->u.tk_work, rpc_async_release);
|
|
- queue_work(task->tk_workqueue, &task->u.tk_work);
|
|
|
|
|
|
+ queue_work(q, &task->u.tk_work);
|
|
} else
|
|
} else
|
|
rpc_free_task(task);
|
|
rpc_free_task(task);
|
|
}
|
|
}
|
|
|
|
+
|
|
|
|
+static void rpc_do_put_task(struct rpc_task *task, struct workqueue_struct *q)
|
|
|
|
+{
|
|
|
|
+ if (atomic_dec_and_test(&task->tk_count)) {
|
|
|
|
+ rpc_release_resources_task(task);
|
|
|
|
+ rpc_final_put_task(task, q);
|
|
|
|
+ }
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+void rpc_put_task(struct rpc_task *task)
|
|
|
|
+{
|
|
|
|
+ rpc_do_put_task(task, NULL);
|
|
|
|
+}
|
|
EXPORT_SYMBOL_GPL(rpc_put_task);
|
|
EXPORT_SYMBOL_GPL(rpc_put_task);
|
|
|
|
|
|
|
|
+void rpc_put_task_async(struct rpc_task *task)
|
|
|
|
+{
|
|
|
|
+ rpc_do_put_task(task, task->tk_workqueue);
|
|
|
|
+}
|
|
|
|
+EXPORT_SYMBOL_GPL(rpc_put_task_async);
|
|
|
|
+
|
|
static void rpc_release_task(struct rpc_task *task)
|
|
static void rpc_release_task(struct rpc_task *task)
|
|
{
|
|
{
|
|
dprintk("RPC: %5u release task\n", task->tk_pid);
|
|
dprintk("RPC: %5u release task\n", task->tk_pid);
|
|
|
|
|
|
BUG_ON (RPC_IS_QUEUED(task));
|
|
BUG_ON (RPC_IS_QUEUED(task));
|
|
|
|
|
|
- /* Wake up anyone who is waiting for task completion */
|
|
|
|
- rpc_mark_complete_task(task);
|
|
|
|
|
|
+ rpc_release_resources_task(task);
|
|
|
|
|
|
- rpc_put_task(task);
|
|
|
|
|
|
+ /*
|
|
|
|
+ * Note: at this point we have been removed from rpc_clnt->cl_tasks,
|
|
|
|
+ * so it should be safe to use task->tk_count as a test for whether
|
|
|
|
+ * or not any other processes still hold references to our rpc_task.
|
|
|
|
+ */
|
|
|
|
+ if (atomic_read(&task->tk_count) != 1 + !RPC_IS_ASYNC(task)) {
|
|
|
|
+ /* Wake up anyone who may be waiting for task completion */
|
|
|
|
+ if (!rpc_complete_task(task))
|
|
|
|
+ return;
|
|
|
|
+ } else {
|
|
|
|
+ if (!atomic_dec_and_test(&task->tk_count))
|
|
|
|
+ return;
|
|
|
|
+ }
|
|
|
|
+ rpc_final_put_task(task, task->tk_workqueue);
|
|
}
|
|
}
|
|
|
|
|
|
int rpciod_up(void)
|
|
int rpciod_up(void)
|