|
@@ -82,26 +82,26 @@ void exit_io_context(struct task_struct *task)
|
|
|
|
|
|
struct io_context *alloc_io_context(gfp_t gfp_flags, int node)
|
|
struct io_context *alloc_io_context(gfp_t gfp_flags, int node)
|
|
{
|
|
{
|
|
- struct io_context *ret;
|
|
|
|
|
|
+ struct io_context *ioc;
|
|
|
|
|
|
- ret = kmem_cache_alloc_node(iocontext_cachep, gfp_flags, node);
|
|
|
|
- if (ret) {
|
|
|
|
- atomic_long_set(&ret->refcount, 1);
|
|
|
|
- atomic_set(&ret->nr_tasks, 1);
|
|
|
|
- spin_lock_init(&ret->lock);
|
|
|
|
- ret->ioprio_changed = 0;
|
|
|
|
- ret->ioprio = 0;
|
|
|
|
- ret->last_waited = 0; /* doesn't matter... */
|
|
|
|
- ret->nr_batch_requests = 0; /* because this is 0 */
|
|
|
|
- INIT_RADIX_TREE(&ret->radix_root, GFP_ATOMIC | __GFP_HIGH);
|
|
|
|
- INIT_HLIST_HEAD(&ret->cic_list);
|
|
|
|
- ret->ioc_data = NULL;
|
|
|
|
|
|
+ ioc = kmem_cache_alloc_node(iocontext_cachep, gfp_flags, node);
|
|
|
|
+ if (ioc) {
|
|
|
|
+ atomic_long_set(&ioc->refcount, 1);
|
|
|
|
+ atomic_set(&ioc->nr_tasks, 1);
|
|
|
|
+ spin_lock_init(&ioc->lock);
|
|
|
|
+ ioc->ioprio_changed = 0;
|
|
|
|
+ ioc->ioprio = 0;
|
|
|
|
+ ioc->last_waited = 0; /* doesn't matter... */
|
|
|
|
+ ioc->nr_batch_requests = 0; /* because this is 0 */
|
|
|
|
+ INIT_RADIX_TREE(&ioc->radix_root, GFP_ATOMIC | __GFP_HIGH);
|
|
|
|
+ INIT_HLIST_HEAD(&ioc->cic_list);
|
|
|
|
+ ioc->ioc_data = NULL;
|
|
#if defined(CONFIG_BLK_CGROUP) || defined(CONFIG_BLK_CGROUP_MODULE)
|
|
#if defined(CONFIG_BLK_CGROUP) || defined(CONFIG_BLK_CGROUP_MODULE)
|
|
- ret->cgroup_changed = 0;
|
|
|
|
|
|
+ ioc->cgroup_changed = 0;
|
|
#endif
|
|
#endif
|
|
}
|
|
}
|
|
|
|
|
|
- return ret;
|
|
|
|
|
|
+ return ioc;
|
|
}
|
|
}
|
|
|
|
|
|
/*
|
|
/*
|
|
@@ -139,19 +139,19 @@ struct io_context *current_io_context(gfp_t gfp_flags, int node)
|
|
*/
|
|
*/
|
|
struct io_context *get_io_context(gfp_t gfp_flags, int node)
|
|
struct io_context *get_io_context(gfp_t gfp_flags, int node)
|
|
{
|
|
{
|
|
- struct io_context *ret = NULL;
|
|
|
|
|
|
+ struct io_context *ioc = NULL;
|
|
|
|
|
|
/*
|
|
/*
|
|
* Check for unlikely race with exiting task. ioc ref count is
|
|
* Check for unlikely race with exiting task. ioc ref count is
|
|
* zero when ioc is being detached.
|
|
* zero when ioc is being detached.
|
|
*/
|
|
*/
|
|
do {
|
|
do {
|
|
- ret = current_io_context(gfp_flags, node);
|
|
|
|
- if (unlikely(!ret))
|
|
|
|
|
|
+ ioc = current_io_context(gfp_flags, node);
|
|
|
|
+ if (unlikely(!ioc))
|
|
break;
|
|
break;
|
|
- } while (!atomic_long_inc_not_zero(&ret->refcount));
|
|
|
|
|
|
+ } while (!atomic_long_inc_not_zero(&ioc->refcount));
|
|
|
|
|
|
- return ret;
|
|
|
|
|
|
+ return ioc;
|
|
}
|
|
}
|
|
EXPORT_SYMBOL(get_io_context);
|
|
EXPORT_SYMBOL(get_io_context);
|
|
|
|
|