|
@@ -409,6 +409,42 @@ void blk_drain_queue(struct request_queue *q, bool drain_all)
|
|
|
}
|
|
|
}
|
|
|
|
|
|
+/**
|
|
|
+ * blk_queue_bypass_start - enter queue bypass mode
|
|
|
+ * @q: queue of interest
|
|
|
+ *
|
|
|
+ * In bypass mode, only the dispatch FIFO queue of @q is used. This
|
|
|
+ * function makes @q enter bypass mode and drains all requests which were
|
|
|
+ * issued before. On return, it's guaranteed that no request has ELVPRIV
|
|
|
+ * set.
|
|
|
+ */
|
|
|
+void blk_queue_bypass_start(struct request_queue *q)
|
|
|
+{
|
|
|
+ spin_lock_irq(q->queue_lock);
|
|
|
+ q->bypass_depth++;
|
|
|
+ queue_flag_set(QUEUE_FLAG_BYPASS, q);
|
|
|
+ spin_unlock_irq(q->queue_lock);
|
|
|
+
|
|
|
+ blk_drain_queue(q, false);
|
|
|
+}
|
|
|
+EXPORT_SYMBOL_GPL(blk_queue_bypass_start);
|
|
|
+
|
|
|
+/**
|
|
|
+ * blk_queue_bypass_end - leave queue bypass mode
|
|
|
+ * @q: queue of interest
|
|
|
+ *
|
|
|
+ * Leave bypass mode and restore the normal queueing behavior.
|
|
|
+ */
|
|
|
+void blk_queue_bypass_end(struct request_queue *q)
|
|
|
+{
|
|
|
+ spin_lock_irq(q->queue_lock);
|
|
|
+ if (!--q->bypass_depth)
|
|
|
+ queue_flag_clear(QUEUE_FLAG_BYPASS, q);
|
|
|
+ WARN_ON_ONCE(q->bypass_depth < 0);
|
|
|
+ spin_unlock_irq(q->queue_lock);
|
|
|
+}
|
|
|
+EXPORT_SYMBOL_GPL(blk_queue_bypass_end);
|
|
|
+
|
|
|
/**
|
|
|
* blk_cleanup_queue - shutdown a request queue
|
|
|
* @q: request queue to shutdown
|
|
@@ -862,8 +898,7 @@ retry:
|
|
|
* Also, lookup icq while holding queue_lock. If it doesn't exist,
|
|
|
* it will be created after releasing queue_lock.
|
|
|
*/
|
|
|
- if (blk_rq_should_init_elevator(bio) &&
|
|
|
- !test_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags)) {
|
|
|
+ if (blk_rq_should_init_elevator(bio) && !blk_queue_bypass(q)) {
|
|
|
rw_flags |= REQ_ELVPRIV;
|
|
|
rl->elvpriv++;
|
|
|
if (et->icq_cache && ioc)
|