|
@@ -1166,7 +1166,10 @@ retry:
|
|
|
break;
|
|
|
if (min_nr <= i)
|
|
|
break;
|
|
|
- ret = 0;
|
|
|
+ if (unlikely(ctx->dead)) {
|
|
|
+ ret = -EINVAL;
|
|
|
+ break;
|
|
|
+ }
|
|
|
if (to.timed_out) /* Only check after read evt */
|
|
|
break;
|
|
|
/* Try to only show up in io wait if there are ops
|
|
@@ -1231,6 +1234,13 @@ static void io_destroy(struct kioctx *ioctx)
|
|
|
|
|
|
aio_cancel_all(ioctx);
|
|
|
wait_for_all_aios(ioctx);
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Wake up any waiters. The setting of ctx->dead must be seen
|
|
|
+ * by other CPUs at this point. Right now, we rely on the
|
|
|
+ * locking done by the above calls to ensure this consistency.
|
|
|
+ */
|
|
|
+ wake_up(&ioctx->wait);
|
|
|
put_ioctx(ioctx); /* once for the lookup */
|
|
|
}
|
|
|
|