|
@@ -531,7 +531,7 @@ static inline void really_put_req(struct kioctx *ctx, struct kiocb *req)
|
|
|
/* __aio_put_req
|
|
|
* Returns true if this put was the last user of the request.
|
|
|
*/
|
|
|
-static int __aio_put_req(struct kioctx *ctx, struct kiocb *req)
|
|
|
+static void __aio_put_req(struct kioctx *ctx, struct kiocb *req)
|
|
|
{
|
|
|
dprintk(KERN_DEBUG "aio_put(%p): f_count=%ld\n",
|
|
|
req, atomic_long_read(&req->ki_filp->f_count));
|
|
@@ -541,7 +541,7 @@ static int __aio_put_req(struct kioctx *ctx, struct kiocb *req)
|
|
|
req->ki_users--;
|
|
|
BUG_ON(req->ki_users < 0);
|
|
|
if (likely(req->ki_users))
|
|
|
- return 0;
|
|
|
+ return;
|
|
|
list_del(&req->ki_list); /* remove from active_reqs */
|
|
|
req->ki_cancel = NULL;
|
|
|
req->ki_retry = NULL;
|
|
@@ -549,21 +549,18 @@ static int __aio_put_req(struct kioctx *ctx, struct kiocb *req)
|
|
|
fput(req->ki_filp);
|
|
|
req->ki_filp = NULL;
|
|
|
really_put_req(ctx, req);
|
|
|
- return 1;
|
|
|
}
|
|
|
|
|
|
/* aio_put_req
|
|
|
* Returns true if this put was the last user of the kiocb,
|
|
|
* false if the request is still in use.
|
|
|
*/
|
|
|
-int aio_put_req(struct kiocb *req)
|
|
|
+void aio_put_req(struct kiocb *req)
|
|
|
{
|
|
|
struct kioctx *ctx = req->ki_ctx;
|
|
|
- int ret;
|
|
|
spin_lock_irq(&ctx->ctx_lock);
|
|
|
- ret = __aio_put_req(ctx, req);
|
|
|
+ __aio_put_req(ctx, req);
|
|
|
spin_unlock_irq(&ctx->ctx_lock);
|
|
|
- return ret;
|
|
|
}
|
|
|
EXPORT_SYMBOL(aio_put_req);
|
|
|
|
|
@@ -593,10 +590,8 @@ static struct kioctx *lookup_ioctx(unsigned long ctx_id)
|
|
|
|
|
|
/* aio_complete
|
|
|
* Called when the io request on the given iocb is complete.
|
|
|
- * Returns true if this is the last user of the request. The
|
|
|
- * only other user of the request can be the cancellation code.
|
|
|
*/
|
|
|
-int aio_complete(struct kiocb *iocb, long res, long res2)
|
|
|
+void aio_complete(struct kiocb *iocb, long res, long res2)
|
|
|
{
|
|
|
struct kioctx *ctx = iocb->ki_ctx;
|
|
|
struct aio_ring_info *info;
|
|
@@ -604,7 +599,6 @@ int aio_complete(struct kiocb *iocb, long res, long res2)
|
|
|
struct io_event *event;
|
|
|
unsigned long flags;
|
|
|
unsigned long tail;
|
|
|
- int ret;
|
|
|
|
|
|
/*
|
|
|
* Special case handling for sync iocbs:
|
|
@@ -618,7 +612,7 @@ int aio_complete(struct kiocb *iocb, long res, long res2)
|
|
|
iocb->ki_user_data = res;
|
|
|
iocb->ki_users = 0;
|
|
|
wake_up_process(iocb->ki_obj.tsk);
|
|
|
- return 1;
|
|
|
+ return;
|
|
|
}
|
|
|
|
|
|
info = &ctx->ring_info;
|
|
@@ -677,7 +671,7 @@ int aio_complete(struct kiocb *iocb, long res, long res2)
|
|
|
|
|
|
put_rq:
|
|
|
/* everything turned out well, dispose of the aiocb. */
|
|
|
- ret = __aio_put_req(ctx, iocb);
|
|
|
+ __aio_put_req(ctx, iocb);
|
|
|
|
|
|
/*
|
|
|
* We have to order our ring_info tail store above and test
|
|
@@ -691,7 +685,6 @@ put_rq:
|
|
|
wake_up(&ctx->wait);
|
|
|
|
|
|
spin_unlock_irqrestore(&ctx->ctx_lock, flags);
|
|
|
- return ret;
|
|
|
}
|
|
|
EXPORT_SYMBOL(aio_complete);
|
|
|
|