|
@@ -3854,12 +3854,13 @@ int __init blk_dev_init(void)
|
|
|
}
|
|
|
|
|
|
/*
|
|
|
- * IO Context helper functions
|
|
|
+ * IO Context helper functions. put_io_context() returns 1 if there are no
|
|
|
+ * more users of this io context, 0 otherwise.
|
|
|
*/
|
|
|
-void put_io_context(struct io_context *ioc)
|
|
|
+int put_io_context(struct io_context *ioc)
|
|
|
{
|
|
|
if (ioc == NULL)
|
|
|
- return;
|
|
|
+ return 1;
|
|
|
|
|
|
BUG_ON(atomic_read(&ioc->refcount) == 0);
|
|
|
|
|
@@ -3878,7 +3879,9 @@ void put_io_context(struct io_context *ioc)
|
|
|
rcu_read_unlock();
|
|
|
|
|
|
kmem_cache_free(iocontext_cachep, ioc);
|
|
|
+ return 1;
|
|
|
}
|
|
|
+ return 0;
|
|
|
}
|
|
|
EXPORT_SYMBOL(put_io_context);
|
|
|
|
|
@@ -3893,15 +3896,17 @@ void exit_io_context(void)
|
|
|
current->io_context = NULL;
|
|
|
task_unlock(current);
|
|
|
|
|
|
- ioc->task = NULL;
|
|
|
- if (ioc->aic && ioc->aic->exit)
|
|
|
- ioc->aic->exit(ioc->aic);
|
|
|
- if (ioc->cic_root.rb_node != NULL) {
|
|
|
- cic = rb_entry(rb_first(&ioc->cic_root), struct cfq_io_context, rb_node);
|
|
|
- cic->exit(ioc);
|
|
|
- }
|
|
|
+ if (atomic_dec_and_test(&ioc->nr_tasks)) {
|
|
|
+ if (ioc->aic && ioc->aic->exit)
|
|
|
+ ioc->aic->exit(ioc->aic);
|
|
|
+ if (ioc->cic_root.rb_node != NULL) {
|
|
|
+ cic = rb_entry(rb_first(&ioc->cic_root),
|
|
|
+ struct cfq_io_context, rb_node);
|
|
|
+ cic->exit(ioc);
|
|
|
+ }
|
|
|
|
|
|
- put_io_context(ioc);
|
|
|
+ put_io_context(ioc);
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
struct io_context *alloc_io_context(gfp_t gfp_flags, int node)
|
|
@@ -3911,7 +3916,8 @@ struct io_context *alloc_io_context(gfp_t gfp_flags, int node)
|
|
|
ret = kmem_cache_alloc_node(iocontext_cachep, gfp_flags, node);
|
|
|
if (ret) {
|
|
|
atomic_set(&ret->refcount, 1);
|
|
|
- ret->task = current;
|
|
|
+ atomic_set(&ret->nr_tasks, 1);
|
|
|
+ spin_lock_init(&ret->lock);
|
|
|
ret->ioprio_changed = 0;
|
|
|
ret->ioprio = 0;
|
|
|
ret->last_waited = jiffies; /* doesn't matter... */
|
|
@@ -3959,10 +3965,18 @@ static struct io_context *current_io_context(gfp_t gfp_flags, int node)
|
|
|
*/
|
|
|
struct io_context *get_io_context(gfp_t gfp_flags, int node)
|
|
|
{
|
|
|
- struct io_context *ret;
|
|
|
- ret = current_io_context(gfp_flags, node);
|
|
|
- if (likely(ret))
|
|
|
- atomic_inc(&ret->refcount);
|
|
|
+ struct io_context *ret = NULL;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Check for unlikely race with exiting task. ioc ref count is
|
|
|
+ * zero when ioc is being detached.
|
|
|
+ */
|
|
|
+ do {
|
|
|
+ ret = current_io_context(gfp_flags, node);
|
|
|
+ if (unlikely(!ret))
|
|
|
+ break;
|
|
|
+ } while (!atomic_inc_not_zero(&ret->refcount));
|
|
|
+
|
|
|
return ret;
|
|
|
}
|
|
|
EXPORT_SYMBOL(get_io_context);
|