|
@@ -1731,6 +1731,10 @@ int blk_insert_cloned_request(struct request_queue *q, struct request *rq)
|
|
|
return -EIO;
|
|
|
|
|
|
spin_lock_irqsave(q->queue_lock, flags);
|
|
|
+ if (unlikely(blk_queue_dead(q))) {
|
|
|
+ spin_unlock_irqrestore(q->queue_lock, flags);
|
|
|
+ return -ENODEV;
|
|
|
+ }
|
|
|
|
|
|
/*
|
|
|
* Submitting request must be dequeued before calling this function
|
|
@@ -2704,6 +2708,14 @@ static void queue_unplugged(struct request_queue *q, unsigned int depth,
|
|
|
{
|
|
|
trace_block_unplug(q, depth, !from_schedule);
|
|
|
|
|
|
+ /*
|
|
|
+ * Don't mess with dead queue.
|
|
|
+ */
|
|
|
+ if (unlikely(blk_queue_dead(q))) {
|
|
|
+ spin_unlock(q->queue_lock);
|
|
|
+ return;
|
|
|
+ }
|
|
|
+
|
|
|
/*
|
|
|
* If we are punting this to kblockd, then we can safely drop
|
|
|
* the queue_lock before waking kblockd (which needs to take
|
|
@@ -2780,6 +2792,15 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
|
|
|
depth = 0;
|
|
|
spin_lock(q->queue_lock);
|
|
|
}
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Short-circuit if @q is dead
|
|
|
+ */
|
|
|
+ if (unlikely(blk_queue_dead(q))) {
|
|
|
+ __blk_end_request_all(rq, -ENODEV);
|
|
|
+ continue;
|
|
|
+ }
|
|
|
+
|
|
|
/*
|
|
|
* rq is already accounted, so use raw insert
|
|
|
*/
|