|
@@ -141,9 +141,6 @@ static void aio_free_ring(struct kioctx *ctx)
|
|
|
for (i = 0; i < ctx->nr_pages; i++)
|
|
|
put_page(ctx->ring_pages[i]);
|
|
|
|
|
|
- if (ctx->mmap_size)
|
|
|
- vm_munmap(ctx->mmap_base, ctx->mmap_size);
|
|
|
-
|
|
|
if (ctx->ring_pages && ctx->ring_pages != ctx->internal_pages)
|
|
|
kfree(ctx->ring_pages);
|
|
|
}
|
|
@@ -322,11 +319,6 @@ static void free_ioctx(struct kioctx *ctx)
|
|
|
|
|
|
aio_free_ring(ctx);
|
|
|
|
|
|
- spin_lock(&aio_nr_lock);
|
|
|
- BUG_ON(aio_nr - ctx->max_reqs > aio_nr);
|
|
|
- aio_nr -= ctx->max_reqs;
|
|
|
- spin_unlock(&aio_nr_lock);
|
|
|
-
|
|
|
pr_debug("freeing %p\n", ctx);
|
|
|
|
|
|
/*
|
|
@@ -435,17 +427,24 @@ static void kill_ioctx(struct kioctx *ctx)
|
|
|
{
|
|
|
if (!atomic_xchg(&ctx->dead, 1)) {
|
|
|
hlist_del_rcu(&ctx->list);
|
|
|
- /* Between hlist_del_rcu() and dropping the initial ref */
|
|
|
- synchronize_rcu();
|
|
|
|
|
|
/*
|
|
|
- * We can't punt to workqueue here because put_ioctx() ->
|
|
|
- * free_ioctx() will unmap the ringbuffer, and that has to be
|
|
|
- * done in the original process's context. kill_ioctx_rcu/work()
|
|
|
- * exist for exit_aio(), as in that path free_ioctx() won't do
|
|
|
- * the unmap.
|
|
|
+ * It'd be more correct to do this in free_ioctx(), after all
|
|
|
+ * the outstanding kiocbs have finished - but by then io_destroy
|
|
|
+ * has already returned, so io_setup() could potentially return
|
|
|
+ * -EAGAIN with no ioctxs actually in use (as far as userspace
|
|
|
+ * could tell).
|
|
|
*/
|
|
|
- kill_ioctx_work(&ctx->rcu_work);
|
|
|
+ spin_lock(&aio_nr_lock);
|
|
|
+ BUG_ON(aio_nr - ctx->max_reqs > aio_nr);
|
|
|
+ aio_nr -= ctx->max_reqs;
|
|
|
+ spin_unlock(&aio_nr_lock);
|
|
|
+
|
|
|
+ if (ctx->mmap_size)
|
|
|
+ vm_munmap(ctx->mmap_base, ctx->mmap_size);
|
|
|
+
|
|
|
+ /* Between hlist_del_rcu() and dropping the initial ref */
|
|
|
+ call_rcu(&ctx->rcu_head, kill_ioctx_rcu);
|
|
|
}
|
|
|
}
|
|
|
|
|
@@ -495,10 +494,7 @@ void exit_aio(struct mm_struct *mm)
|
|
|
*/
|
|
|
ctx->mmap_size = 0;
|
|
|
|
|
|
- if (!atomic_xchg(&ctx->dead, 1)) {
|
|
|
- hlist_del_rcu(&ctx->list);
|
|
|
- call_rcu(&ctx->rcu_head, kill_ioctx_rcu);
|
|
|
- }
|
|
|
+ kill_ioctx(ctx);
|
|
|
}
|
|
|
}
|
|
|
|