|
@@ -205,16 +205,15 @@ void exit_io_context(struct task_struct *task)
|
|
|
put_io_context(ioc, NULL);
|
|
|
}
|
|
|
|
|
|
-static struct io_context *create_task_io_context(struct task_struct *task,
|
|
|
- gfp_t gfp_flags, int node,
|
|
|
- bool take_ref)
|
|
|
+void create_io_context_slowpath(struct task_struct *task, gfp_t gfp_flags,
|
|
|
+ int node)
|
|
|
{
|
|
|
struct io_context *ioc;
|
|
|
|
|
|
ioc = kmem_cache_alloc_node(iocontext_cachep, gfp_flags | __GFP_ZERO,
|
|
|
node);
|
|
|
if (unlikely(!ioc))
|
|
|
- return NULL;
|
|
|
+ return;
|
|
|
|
|
|
/* initialize */
|
|
|
atomic_long_set(&ioc->refcount, 1);
|
|
@@ -226,42 +225,13 @@ static struct io_context *create_task_io_context(struct task_struct *task,
|
|
|
|
|
|
/* try to install, somebody might already have beaten us to it */
|
|
|
task_lock(task);
|
|
|
-
|
|
|
- if (!task->io_context && !(task->flags & PF_EXITING)) {
|
|
|
+ if (!task->io_context && !(task->flags & PF_EXITING))
|
|
|
task->io_context = ioc;
|
|
|
- } else {
|
|
|
+ else
|
|
|
kmem_cache_free(iocontext_cachep, ioc);
|
|
|
- ioc = task->io_context;
|
|
|
- }
|
|
|
-
|
|
|
- if (ioc && take_ref)
|
|
|
- get_io_context(ioc);
|
|
|
-
|
|
|
task_unlock(task);
|
|
|
- return ioc;
|
|
|
}
|
|
|
-
|
|
|
-/**
|
|
|
- * current_io_context - get io_context of %current
|
|
|
- * @gfp_flags: allocation flags, used if allocation is necessary
|
|
|
- * @node: allocation node, used if allocation is necessary
|
|
|
- *
|
|
|
- * Return io_context of %current. If it doesn't exist, it is created with
|
|
|
- * @gfp_flags and @node. The returned io_context does NOT have its
|
|
|
- * reference count incremented. Because io_context is exited only on task
|
|
|
- * exit, %current can be sure that the returned io_context is valid and
|
|
|
- * alive as long as it is executing.
|
|
|
- */
|
|
|
-struct io_context *current_io_context(gfp_t gfp_flags, int node)
|
|
|
-{
|
|
|
- might_sleep_if(gfp_flags & __GFP_WAIT);
|
|
|
-
|
|
|
- if (current->io_context)
|
|
|
- return current->io_context;
|
|
|
-
|
|
|
- return create_task_io_context(current, gfp_flags, node, false);
|
|
|
-}
|
|
|
-EXPORT_SYMBOL(current_io_context);
|
|
|
+EXPORT_SYMBOL(create_io_context_slowpath);
|
|
|
|
|
|
/**
|
|
|
* get_task_io_context - get io_context of a task
|
|
@@ -274,7 +244,7 @@ EXPORT_SYMBOL(current_io_context);
|
|
|
* incremented.
|
|
|
*
|
|
|
* This function always goes through task_lock() and it's better to use
|
|
|
- * current_io_context() + get_io_context() for %current.
|
|
|
+ * %current->io_context + get_io_context() for %current.
|
|
|
*/
|
|
|
struct io_context *get_task_io_context(struct task_struct *task,
|
|
|
gfp_t gfp_flags, int node)
|
|
@@ -283,16 +253,18 @@ struct io_context *get_task_io_context(struct task_struct *task,
|
|
|
|
|
|
might_sleep_if(gfp_flags & __GFP_WAIT);
|
|
|
|
|
|
- task_lock(task);
|
|
|
- ioc = task->io_context;
|
|
|
- if (likely(ioc)) {
|
|
|
- get_io_context(ioc);
|
|
|
+ do {
|
|
|
+ task_lock(task);
|
|
|
+ ioc = task->io_context;
|
|
|
+ if (likely(ioc)) {
|
|
|
+ get_io_context(ioc);
|
|
|
+ task_unlock(task);
|
|
|
+ return ioc;
|
|
|
+ }
|
|
|
task_unlock(task);
|
|
|
- return ioc;
|
|
|
- }
|
|
|
- task_unlock(task);
|
|
|
+ } while (create_io_context(task, gfp_flags, node));
|
|
|
|
|
|
- return create_task_io_context(task, gfp_flags, node, true);
|
|
|
+ return NULL;
|
|
|
}
|
|
|
EXPORT_SYMBOL(get_task_io_context);
|
|
|
|