|
@@ -199,16 +199,7 @@ static int aio_setup_ring(struct kioctx *ctx)
|
|
|
static void ctx_rcu_free(struct rcu_head *head)
|
|
|
{
|
|
|
struct kioctx *ctx = container_of(head, struct kioctx, rcu_head);
|
|
|
- unsigned nr_events = ctx->max_reqs;
|
|
|
-
|
|
|
kmem_cache_free(kioctx_cachep, ctx);
|
|
|
-
|
|
|
- if (nr_events) {
|
|
|
- spin_lock(&aio_nr_lock);
|
|
|
- BUG_ON(aio_nr - nr_events > aio_nr);
|
|
|
- aio_nr -= nr_events;
|
|
|
- spin_unlock(&aio_nr_lock);
|
|
|
- }
|
|
|
}
|
|
|
|
|
|
/* __put_ioctx
|
|
@@ -217,6 +208,7 @@ static void ctx_rcu_free(struct rcu_head *head)
|
|
|
*/
|
|
|
static void __put_ioctx(struct kioctx *ctx)
|
|
|
{
|
|
|
+ unsigned nr_events = ctx->max_reqs;
|
|
|
BUG_ON(ctx->reqs_active);
|
|
|
|
|
|
cancel_delayed_work(&ctx->wq);
|
|
@@ -224,6 +216,12 @@ static void __put_ioctx(struct kioctx *ctx)
|
|
|
aio_free_ring(ctx);
|
|
|
mmdrop(ctx->mm);
|
|
|
ctx->mm = NULL;
|
|
|
+ if (nr_events) {
|
|
|
+ spin_lock(&aio_nr_lock);
|
|
|
+ BUG_ON(aio_nr - nr_events > aio_nr);
|
|
|
+ aio_nr -= nr_events;
|
|
|
+ spin_unlock(&aio_nr_lock);
|
|
|
+ }
|
|
|
pr_debug("__put_ioctx: freeing %p\n", ctx);
|
|
|
call_rcu(&ctx->rcu_head, ctx_rcu_free);
|
|
|
}
|
|
@@ -247,7 +245,6 @@ static struct kioctx *ioctx_alloc(unsigned nr_events)
|
|
|
{
|
|
|
struct mm_struct *mm;
|
|
|
struct kioctx *ctx;
|
|
|
- int did_sync = 0;
|
|
|
int err = -ENOMEM;
|
|
|
|
|
|
/* Prevent overflows */
|
|
@@ -257,7 +254,7 @@ static struct kioctx *ioctx_alloc(unsigned nr_events)
|
|
|
return ERR_PTR(-EINVAL);
|
|
|
}
|
|
|
|
|
|
- if ((unsigned long)nr_events > aio_max_nr)
|
|
|
+ if (!nr_events || (unsigned long)nr_events > aio_max_nr)
|
|
|
return ERR_PTR(-EAGAIN);
|
|
|
|
|
|
ctx = kmem_cache_zalloc(kioctx_cachep, GFP_KERNEL);
|
|
@@ -281,25 +278,14 @@ static struct kioctx *ioctx_alloc(unsigned nr_events)
|
|
|
goto out_freectx;
|
|
|
|
|
|
/* limit the number of system wide aios */
|
|
|
- do {
|
|
|
- spin_lock_bh(&aio_nr_lock);
|
|
|
- if (aio_nr + nr_events > aio_max_nr ||
|
|
|
- aio_nr + nr_events < aio_nr)
|
|
|
- ctx->max_reqs = 0;
|
|
|
- else
|
|
|
- aio_nr += ctx->max_reqs;
|
|
|
+ spin_lock_bh(&aio_nr_lock);
|
|
|
+ if (aio_nr + nr_events > aio_max_nr ||
|
|
|
+ aio_nr + nr_events < aio_nr) {
|
|
|
spin_unlock_bh(&aio_nr_lock);
|
|
|
- if (ctx->max_reqs || did_sync)
|
|
|
- break;
|
|
|
-
|
|
|
- /* wait for rcu callbacks to have completed before giving up */
|
|
|
- synchronize_rcu();
|
|
|
- did_sync = 1;
|
|
|
- ctx->max_reqs = nr_events;
|
|
|
- } while (1);
|
|
|
-
|
|
|
- if (ctx->max_reqs == 0)
|
|
|
goto out_cleanup;
|
|
|
+ }
|
|
|
+ aio_nr += ctx->max_reqs;
|
|
|
+ spin_unlock_bh(&aio_nr_lock);
|
|
|
|
|
|
/* now link into global list. */
|
|
|
spin_lock(&mm->ioctx_lock);
|