|
@@ -29,6 +29,7 @@
|
|
|
#include <linux/fault-inject.h>
|
|
|
#include <linux/list_sort.h>
|
|
|
#include <linux/delay.h>
|
|
|
+#include <linux/ratelimit.h>
|
|
|
|
|
|
#define CREATE_TRACE_POINTS
|
|
|
#include <trace/events/block.h>
|
|
@@ -930,17 +931,6 @@ retry:
|
|
|
rw_flags |= REQ_IO_STAT;
|
|
|
spin_unlock_irq(q->queue_lock);
|
|
|
|
|
|
- /* create icq if missing */
|
|
|
- if ((rw_flags & REQ_ELVPRIV) && unlikely(et->icq_cache && !icq)) {
|
|
|
- create_io_context(gfp_mask, q->node);
|
|
|
- ioc = rq_ioc(bio);
|
|
|
- if (!ioc)
|
|
|
- goto fail_alloc;
|
|
|
- icq = ioc_create_icq(ioc, q, gfp_mask);
|
|
|
- if (!icq)
|
|
|
- goto fail_alloc;
|
|
|
- }
|
|
|
-
|
|
|
/* allocate and init request */
|
|
|
rq = mempool_alloc(q->rq.rq_pool, gfp_mask);
|
|
|
if (!rq)
|
|
@@ -949,17 +939,28 @@ retry:
|
|
|
blk_rq_init(q, rq);
|
|
|
rq->cmd_flags = rw_flags | REQ_ALLOCED;
|
|
|
|
|
|
+ /* init elvpriv */
|
|
|
if (rw_flags & REQ_ELVPRIV) {
|
|
|
- rq->elv.icq = icq;
|
|
|
- if (unlikely(elv_set_request(q, rq, bio, gfp_mask))) {
|
|
|
- mempool_free(rq, q->rq.rq_pool);
|
|
|
- goto fail_alloc;
|
|
|
+ if (unlikely(et->icq_cache && !icq)) {
|
|
|
+ create_io_context(gfp_mask, q->node);
|
|
|
+ ioc = rq_ioc(bio);
|
|
|
+ if (!ioc)
|
|
|
+ goto fail_elvpriv;
|
|
|
+
|
|
|
+ icq = ioc_create_icq(ioc, q, gfp_mask);
|
|
|
+ if (!icq)
|
|
|
+ goto fail_elvpriv;
|
|
|
}
|
|
|
- /* @rq->elv.icq holds on to io_context until @rq is freed */
|
|
|
+
|
|
|
+ rq->elv.icq = icq;
|
|
|
+ if (unlikely(elv_set_request(q, rq, bio, gfp_mask)))
|
|
|
+ goto fail_elvpriv;
|
|
|
+
|
|
|
+ /* @rq->elv.icq holds io_context until @rq is freed */
|
|
|
if (icq)
|
|
|
get_io_context(icq->ioc);
|
|
|
}
|
|
|
-
|
|
|
+out:
|
|
|
/*
|
|
|
* ioc may be NULL here, and ioc_batching will be false. That's
|
|
|
* OK, if the queue is under the request limit then requests need
|
|
@@ -972,6 +973,24 @@ retry:
|
|
|
trace_block_getrq(q, bio, rw_flags & 1);
|
|
|
return rq;
|
|
|
|
|
|
+fail_elvpriv:
|
|
|
+ /*
|
|
|
+ * elvpriv init failed. ioc, icq and elvpriv aren't mempool backed
|
|
|
+ * and may fail indefinitely under memory pressure and thus
|
|
|
+ * shouldn't stall IO. Treat this request as !elvpriv. This will
|
|
|
+ * disturb iosched and blkcg but weird is bettern than dead.
|
|
|
+ */
|
|
|
+ printk_ratelimited(KERN_WARNING "%s: request aux data allocation failed, iosched may be disturbed\n",
|
|
|
+ dev_name(q->backing_dev_info.dev));
|
|
|
+
|
|
|
+ rq->cmd_flags &= ~REQ_ELVPRIV;
|
|
|
+ rq->elv.icq = NULL;
|
|
|
+
|
|
|
+ spin_lock_irq(q->queue_lock);
|
|
|
+ rl->elvpriv--;
|
|
|
+ spin_unlock_irq(q->queue_lock);
|
|
|
+ goto out;
|
|
|
+
|
|
|
fail_alloc:
|
|
|
/*
|
|
|
* Allocation failed presumably due to memory. Undo anything we
|