|
@@ -151,6 +151,7 @@ enum arq_state {
|
|
|
|
|
|
static DEFINE_PER_CPU(unsigned long, ioc_count);
|
|
static DEFINE_PER_CPU(unsigned long, ioc_count);
|
|
static struct completion *ioc_gone;
|
|
static struct completion *ioc_gone;
|
|
|
|
+static DEFINE_SPINLOCK(ioc_gone_lock);
|
|
|
|
|
|
static void as_move_to_dispatch(struct as_data *ad, struct request *rq);
|
|
static void as_move_to_dispatch(struct as_data *ad, struct request *rq);
|
|
static void as_antic_stop(struct as_data *ad);
|
|
static void as_antic_stop(struct as_data *ad);
|
|
@@ -164,8 +165,19 @@ static void free_as_io_context(struct as_io_context *aic)
|
|
{
|
|
{
|
|
kfree(aic);
|
|
kfree(aic);
|
|
elv_ioc_count_dec(ioc_count);
|
|
elv_ioc_count_dec(ioc_count);
|
|
- if (ioc_gone && !elv_ioc_count_read(ioc_count))
|
|
|
|
- complete(ioc_gone);
|
|
|
|
|
|
+ if (ioc_gone) {
|
|
|
|
+ /*
|
|
|
|
+ * AS scheduler is exiting, grab exit lock and check
|
|
|
|
+ * the pending io context count. If it hits zero,
|
|
|
|
+ * complete ioc_gone and set it back to NULL.
|
|
|
|
+ */
|
|
|
|
+ spin_lock(&ioc_gone_lock);
|
|
|
|
+ if (ioc_gone && !elv_ioc_count_read(ioc_count)) {
|
|
|
|
+ complete(ioc_gone);
|
|
|
|
+ ioc_gone = NULL;
|
|
|
|
+ }
|
|
|
|
+ spin_unlock(&ioc_gone_lock);
|
|
|
|
+ }
|
|
}
|
|
}
|
|
|
|
|
|
static void as_trim(struct io_context *ioc)
|
|
static void as_trim(struct io_context *ioc)
|
|
@@ -1493,7 +1505,7 @@ static void __exit as_exit(void)
|
|
/* ioc_gone's update must be visible before reading ioc_count */
|
|
/* ioc_gone's update must be visible before reading ioc_count */
|
|
smp_wmb();
|
|
smp_wmb();
|
|
if (elv_ioc_count_read(ioc_count))
|
|
if (elv_ioc_count_read(ioc_count))
|
|
- wait_for_completion(ioc_gone);
|
|
|
|
|
|
+ wait_for_completion(&all_gone);
|
|
synchronize_rcu();
|
|
synchronize_rcu();
|
|
}
|
|
}
|
|
|
|
|