|
@@ -510,108 +510,27 @@ void exit_aio(struct mm_struct *mm)
|
|
|
* This prevents races between the aio code path referencing the
|
|
|
* req (after submitting it) and aio_complete() freeing the req.
|
|
|
*/
|
|
|
-static struct kiocb *__aio_get_req(struct kioctx *ctx)
|
|
|
+static inline struct kiocb *aio_get_req(struct kioctx *ctx)
|
|
|
{
|
|
|
- struct kiocb *req = NULL;
|
|
|
+ struct kiocb *req;
|
|
|
+
|
|
|
+ if (atomic_read(&ctx->reqs_active) >= ctx->ring_info.nr)
|
|
|
+ return NULL;
|
|
|
+
|
|
|
+ if (atomic_inc_return(&ctx->reqs_active) > ctx->ring_info.nr - 1)
|
|
|
+ goto out_put;
|
|
|
|
|
|
req = kmem_cache_alloc(kiocb_cachep, GFP_KERNEL|__GFP_ZERO);
|
|
|
if (unlikely(!req))
|
|
|
- return NULL;
|
|
|
+ goto out_put;
|
|
|
|
|
|
atomic_set(&req->ki_users, 2);
|
|
|
req->ki_ctx = ctx;
|
|
|
|
|
|
return req;
|
|
|
-}
|
|
|
-
|
|
|
-/*
|
|
|
- * struct kiocb's are allocated in batches to reduce the number of
|
|
|
- * times the ctx lock is acquired and released.
|
|
|
- */
|
|
|
-#define KIOCB_BATCH_SIZE 32L
|
|
|
-struct kiocb_batch {
|
|
|
- struct list_head head;
|
|
|
- long count; /* number of requests left to allocate */
|
|
|
-};
|
|
|
-
|
|
|
-static void kiocb_batch_init(struct kiocb_batch *batch, long total)
|
|
|
-{
|
|
|
- INIT_LIST_HEAD(&batch->head);
|
|
|
- batch->count = total;
|
|
|
-}
|
|
|
-
|
|
|
-static void kiocb_batch_free(struct kioctx *ctx, struct kiocb_batch *batch)
|
|
|
-{
|
|
|
- struct kiocb *req, *n;
|
|
|
-
|
|
|
- if (list_empty(&batch->head))
|
|
|
- return;
|
|
|
-
|
|
|
- spin_lock_irq(&ctx->ctx_lock);
|
|
|
- list_for_each_entry_safe(req, n, &batch->head, ki_batch) {
|
|
|
- list_del(&req->ki_batch);
|
|
|
- kmem_cache_free(kiocb_cachep, req);
|
|
|
- atomic_dec(&ctx->reqs_active);
|
|
|
- }
|
|
|
- spin_unlock_irq(&ctx->ctx_lock);
|
|
|
-}
|
|
|
-
|
|
|
-/*
|
|
|
- * Allocate a batch of kiocbs. This avoids taking and dropping the
|
|
|
- * context lock a lot during setup.
|
|
|
- */
|
|
|
-static int kiocb_batch_refill(struct kioctx *ctx, struct kiocb_batch *batch)
|
|
|
-{
|
|
|
- unsigned short allocated, to_alloc;
|
|
|
- long avail;
|
|
|
- struct kiocb *req, *n;
|
|
|
-
|
|
|
- to_alloc = min(batch->count, KIOCB_BATCH_SIZE);
|
|
|
- for (allocated = 0; allocated < to_alloc; allocated++) {
|
|
|
- req = __aio_get_req(ctx);
|
|
|
- if (!req)
|
|
|
- /* allocation failed, go with what we've got */
|
|
|
- break;
|
|
|
- list_add(&req->ki_batch, &batch->head);
|
|
|
- }
|
|
|
-
|
|
|
- if (allocated == 0)
|
|
|
- goto out;
|
|
|
-
|
|
|
- spin_lock_irq(&ctx->ctx_lock);
|
|
|
-
|
|
|
- avail = ctx->ring_info.nr - atomic_read(&ctx->reqs_active) - 1;
|
|
|
- BUG_ON(avail < 0);
|
|
|
- if (avail < allocated) {
|
|
|
- /* Trim back the number of requests. */
|
|
|
- list_for_each_entry_safe(req, n, &batch->head, ki_batch) {
|
|
|
- list_del(&req->ki_batch);
|
|
|
- kmem_cache_free(kiocb_cachep, req);
|
|
|
- if (--allocated <= avail)
|
|
|
- break;
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
- batch->count -= allocated;
|
|
|
- atomic_add(allocated, &ctx->reqs_active);
|
|
|
-
|
|
|
- spin_unlock_irq(&ctx->ctx_lock);
|
|
|
-
|
|
|
-out:
|
|
|
- return allocated;
|
|
|
-}
|
|
|
-
|
|
|
-static inline struct kiocb *aio_get_req(struct kioctx *ctx,
|
|
|
- struct kiocb_batch *batch)
|
|
|
-{
|
|
|
- struct kiocb *req;
|
|
|
-
|
|
|
- if (list_empty(&batch->head))
|
|
|
- if (kiocb_batch_refill(ctx, batch) == 0)
|
|
|
- return NULL;
|
|
|
- req = list_first_entry(&batch->head, struct kiocb, ki_batch);
|
|
|
- list_del(&req->ki_batch);
|
|
|
- return req;
|
|
|
+out_put:
|
|
|
+ atomic_dec(&ctx->reqs_active);
|
|
|
+ return NULL;
|
|
|
}
|
|
|
|
|
|
static void kiocb_free(struct kiocb *req)
|
|
@@ -1198,8 +1117,7 @@ static ssize_t aio_setup_iocb(struct kiocb *kiocb, bool compat)
|
|
|
}
|
|
|
|
|
|
static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
|
|
|
- struct iocb *iocb, struct kiocb_batch *batch,
|
|
|
- bool compat)
|
|
|
+ struct iocb *iocb, bool compat)
|
|
|
{
|
|
|
struct kiocb *req;
|
|
|
ssize_t ret;
|
|
@@ -1220,7 +1138,7 @@ static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
|
|
|
return -EINVAL;
|
|
|
}
|
|
|
|
|
|
- req = aio_get_req(ctx, batch); /* returns with 2 references to req */
|
|
|
+ req = aio_get_req(ctx); /* returns with 2 references to req */
|
|
|
if (unlikely(!req))
|
|
|
return -EAGAIN;
|
|
|
|
|
@@ -1293,7 +1211,6 @@ long do_io_submit(aio_context_t ctx_id, long nr,
|
|
|
long ret = 0;
|
|
|
int i = 0;
|
|
|
struct blk_plug plug;
|
|
|
- struct kiocb_batch batch;
|
|
|
|
|
|
if (unlikely(nr < 0))
|
|
|
return -EINVAL;
|
|
@@ -1310,8 +1227,6 @@ long do_io_submit(aio_context_t ctx_id, long nr,
|
|
|
return -EINVAL;
|
|
|
}
|
|
|
|
|
|
- kiocb_batch_init(&batch, nr);
|
|
|
-
|
|
|
blk_start_plug(&plug);
|
|
|
|
|
|
/*
|
|
@@ -1332,13 +1247,12 @@ long do_io_submit(aio_context_t ctx_id, long nr,
|
|
|
break;
|
|
|
}
|
|
|
|
|
|
- ret = io_submit_one(ctx, user_iocb, &tmp, &batch, compat);
|
|
|
+ ret = io_submit_one(ctx, user_iocb, &tmp, compat);
|
|
|
if (ret)
|
|
|
break;
|
|
|
}
|
|
|
blk_finish_plug(&plug);
|
|
|
|
|
|
- kiocb_batch_free(ctx, &batch);
|
|
|
put_ioctx(ctx);
|
|
|
return i ? i : ret;
|
|
|
}
|