|
@@ -443,7 +443,7 @@ static struct kiocb *__aio_get_req(struct kioctx *ctx)
|
|
|
req->private = NULL;
|
|
|
req->ki_iovec = NULL;
|
|
|
INIT_LIST_HEAD(&req->ki_run_list);
|
|
|
- req->ki_eventfd = ERR_PTR(-EINVAL);
|
|
|
+ req->ki_eventfd = NULL;
|
|
|
|
|
|
/* Check if the completion queue has enough free space to
|
|
|
* accept an event from this io.
|
|
@@ -485,8 +485,6 @@ static inline void really_put_req(struct kioctx *ctx, struct kiocb *req)
|
|
|
{
|
|
|
assert_spin_locked(&ctx->ctx_lock);
|
|
|
|
|
|
- if (!IS_ERR(req->ki_eventfd))
|
|
|
- fput(req->ki_eventfd);
|
|
|
if (req->ki_dtor)
|
|
|
req->ki_dtor(req);
|
|
|
if (req->ki_iovec != &req->ki_inline_vec)
|
|
@@ -508,8 +506,11 @@ static void aio_fput_routine(struct work_struct *data)
|
|
|
list_del(&req->ki_list);
|
|
|
spin_unlock_irq(&fput_lock);
|
|
|
|
|
|
- /* Complete the fput */
|
|
|
- __fput(req->ki_filp);
|
|
|
+ /* Complete the fput(s) */
|
|
|
+ if (req->ki_filp != NULL)
|
|
|
+ __fput(req->ki_filp);
|
|
|
+ if (req->ki_eventfd != NULL)
|
|
|
+ __fput(req->ki_eventfd);
|
|
|
|
|
|
/* Link the iocb into the context's free list */
|
|
|
spin_lock_irq(&ctx->ctx_lock);
|
|
@@ -527,12 +528,14 @@ static void aio_fput_routine(struct work_struct *data)
|
|
|
*/
|
|
|
static int __aio_put_req(struct kioctx *ctx, struct kiocb *req)
|
|
|
{
|
|
|
+ int schedule_putreq = 0;
|
|
|
+
|
|
|
dprintk(KERN_DEBUG "aio_put(%p): f_count=%ld\n",
|
|
|
req, atomic_long_read(&req->ki_filp->f_count));
|
|
|
|
|
|
assert_spin_locked(&ctx->ctx_lock);
|
|
|
|
|
|
- req->ki_users --;
|
|
|
+ req->ki_users--;
|
|
|
BUG_ON(req->ki_users < 0);
|
|
|
if (likely(req->ki_users))
|
|
|
return 0;
|
|
@@ -540,10 +543,23 @@ static int __aio_put_req(struct kioctx *ctx, struct kiocb *req)
|
|
|
req->ki_cancel = NULL;
|
|
|
req->ki_retry = NULL;
|
|
|
|
|
|
- /* Must be done under the lock to serialise against cancellation.
|
|
|
- * Call this aio_fput as it duplicates fput via the fput_work.
|
|
|
+ /*
|
|
|
+ * Try to optimize the aio and eventfd file* puts, by avoiding to
|
|
|
+ * schedule work in case it is not __fput() time. In normal cases,
|
|
|
+ * we would not be holding the last reference to the file*, so
|
|
|
+ * this function will be executed w/out any aio kthread wakeup.
|
|
|
*/
|
|
|
- if (unlikely(atomic_long_dec_and_test(&req->ki_filp->f_count))) {
|
|
|
+ if (unlikely(atomic_long_dec_and_test(&req->ki_filp->f_count)))
|
|
|
+ schedule_putreq++;
|
|
|
+ else
|
|
|
+ req->ki_filp = NULL;
|
|
|
+ if (req->ki_eventfd != NULL) {
|
|
|
+ if (unlikely(atomic_long_dec_and_test(&req->ki_eventfd->f_count)))
|
|
|
+ schedule_putreq++;
|
|
|
+ else
|
|
|
+ req->ki_eventfd = NULL;
|
|
|
+ }
|
|
|
+ if (unlikely(schedule_putreq)) {
|
|
|
get_ioctx(ctx);
|
|
|
spin_lock(&fput_lock);
|
|
|
list_add(&req->ki_list, &fput_head);
|
|
@@ -1009,7 +1025,7 @@ int aio_complete(struct kiocb *iocb, long res, long res2)
|
|
|
* eventfd. The eventfd_signal() function is safe to be called
|
|
|
* from IRQ context.
|
|
|
*/
|
|
|
- if (!IS_ERR(iocb->ki_eventfd))
|
|
|
+ if (iocb->ki_eventfd != NULL)
|
|
|
eventfd_signal(iocb->ki_eventfd, 1);
|
|
|
|
|
|
put_rq:
|
|
@@ -1608,6 +1624,7 @@ static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
|
|
|
req->ki_eventfd = eventfd_fget((int) iocb->aio_resfd);
|
|
|
if (IS_ERR(req->ki_eventfd)) {
|
|
|
ret = PTR_ERR(req->ki_eventfd);
|
|
|
+ req->ki_eventfd = NULL;
|
|
|
goto out_put_req;
|
|
|
}
|
|
|
}
|