|
@@ -191,6 +191,43 @@ static int aio_setup_ring(struct kioctx *ctx)
|
|
|
kunmap_atomic((void *)((unsigned long)__event & PAGE_MASK), km); \
|
|
|
} while(0)
|
|
|
|
|
|
+
|
|
|
+/* __put_ioctx
|
|
|
+ * Called when the last user of an aio context has gone away,
|
|
|
+ * and the struct needs to be freed.
|
|
|
+ */
|
|
|
+static void __put_ioctx(struct kioctx *ctx)
|
|
|
+{
|
|
|
+ unsigned nr_events = ctx->max_reqs;
|
|
|
+
|
|
|
+ BUG_ON(ctx->reqs_active);
|
|
|
+
|
|
|
+ cancel_delayed_work(&ctx->wq);
|
|
|
+ cancel_work_sync(&ctx->wq.work);
|
|
|
+ aio_free_ring(ctx);
|
|
|
+ mmdrop(ctx->mm);
|
|
|
+ ctx->mm = NULL;
|
|
|
+ pr_debug("__put_ioctx: freeing %p\n", ctx);
|
|
|
+ kmem_cache_free(kioctx_cachep, ctx);
|
|
|
+
|
|
|
+ if (nr_events) {
|
|
|
+ spin_lock(&aio_nr_lock);
|
|
|
+ BUG_ON(aio_nr - nr_events > aio_nr);
|
|
|
+ aio_nr -= nr_events;
|
|
|
+ spin_unlock(&aio_nr_lock);
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+#define get_ioctx(kioctx) do { \
|
|
|
+ BUG_ON(atomic_read(&(kioctx)->users) <= 0); \
|
|
|
+ atomic_inc(&(kioctx)->users); \
|
|
|
+} while (0)
|
|
|
+#define put_ioctx(kioctx) do { \
|
|
|
+ BUG_ON(atomic_read(&(kioctx)->users) <= 0); \
|
|
|
+ if (unlikely(atomic_dec_and_test(&(kioctx)->users))) \
|
|
|
+ __put_ioctx(kioctx); \
|
|
|
+} while (0)
|
|
|
+
|
|
|
/* ioctx_alloc
|
|
|
* Allocates and initializes an ioctx. Returns an ERR_PTR if it failed.
|
|
|
*/
|
|
@@ -361,32 +398,6 @@ void exit_aio(struct mm_struct *mm)
|
|
|
}
|
|
|
}
|
|
|
|
|
|
-/* __put_ioctx
|
|
|
- * Called when the last user of an aio context has gone away,
|
|
|
- * and the struct needs to be freed.
|
|
|
- */
|
|
|
-void __put_ioctx(struct kioctx *ctx)
|
|
|
-{
|
|
|
- unsigned nr_events = ctx->max_reqs;
|
|
|
-
|
|
|
- BUG_ON(ctx->reqs_active);
|
|
|
-
|
|
|
- cancel_delayed_work(&ctx->wq);
|
|
|
- cancel_work_sync(&ctx->wq.work);
|
|
|
- aio_free_ring(ctx);
|
|
|
- mmdrop(ctx->mm);
|
|
|
- ctx->mm = NULL;
|
|
|
- pr_debug("__put_ioctx: freeing %p\n", ctx);
|
|
|
- kmem_cache_free(kioctx_cachep, ctx);
|
|
|
-
|
|
|
- if (nr_events) {
|
|
|
- spin_lock(&aio_nr_lock);
|
|
|
- BUG_ON(aio_nr - nr_events > aio_nr);
|
|
|
- aio_nr -= nr_events;
|
|
|
- spin_unlock(&aio_nr_lock);
|
|
|
- }
|
|
|
-}
|
|
|
-
|
|
|
/* aio_get_req
|
|
|
* Allocate a slot for an aio request. Increments the users count
|
|
|
* of the kioctx so that the kioctx stays around until all requests are
|
|
@@ -545,7 +556,7 @@ int aio_put_req(struct kiocb *req)
|
|
|
/* Lookup an ioctx id. ioctx_list is lockless for reads.
|
|
|
* FIXME: this is O(n) and is only suitable for development.
|
|
|
*/
|
|
|
-struct kioctx *lookup_ioctx(unsigned long ctx_id)
|
|
|
+static struct kioctx *lookup_ioctx(unsigned long ctx_id)
|
|
|
{
|
|
|
struct kioctx *ioctx;
|
|
|
struct mm_struct *mm;
|
|
@@ -1552,7 +1563,7 @@ static int aio_wake_function(wait_queue_t *wait, unsigned mode,
|
|
|
return 1;
|
|
|
}
|
|
|
|
|
|
-int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
|
|
|
+static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
|
|
|
struct iocb *iocb)
|
|
|
{
|
|
|
struct kiocb *req;
|