|
@@ -333,24 +333,6 @@ void blk_unplug(struct request_queue *q)
|
|
|
}
|
|
|
EXPORT_SYMBOL(blk_unplug);
|
|
|
|
|
|
-static void blk_invoke_request_fn(struct request_queue *q)
|
|
|
-{
|
|
|
- if (unlikely(blk_queue_stopped(q)))
|
|
|
- return;
|
|
|
-
|
|
|
- /*
|
|
|
- * one level of recursion is ok and is much faster than kicking
|
|
|
- * the unplug handling
|
|
|
- */
|
|
|
- if (!queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) {
|
|
|
- q->request_fn(q);
|
|
|
- queue_flag_clear(QUEUE_FLAG_REENTER, q);
|
|
|
- } else {
|
|
|
- queue_flag_set(QUEUE_FLAG_PLUGGED, q);
|
|
|
- kblockd_schedule_work(q, &q->unplug_work);
|
|
|
- }
|
|
|
-}
|
|
|
-
|
|
|
/**
|
|
|
* blk_start_queue - restart a previously stopped queue
|
|
|
* @q: The &struct request_queue in question
|
|
@@ -365,7 +347,7 @@ void blk_start_queue(struct request_queue *q)
|
|
|
WARN_ON(!irqs_disabled());
|
|
|
|
|
|
queue_flag_clear(QUEUE_FLAG_STOPPED, q);
|
|
|
- blk_invoke_request_fn(q);
|
|
|
+ __blk_run_queue(q);
|
|
|
}
|
|
|
EXPORT_SYMBOL(blk_start_queue);
|
|
|
|
|
@@ -425,12 +407,23 @@ void __blk_run_queue(struct request_queue *q)
|
|
|
{
|
|
|
blk_remove_plug(q);
|
|
|
|
|
|
+ if (unlikely(blk_queue_stopped(q)))
|
|
|
+ return;
|
|
|
+
|
|
|
+ if (elv_queue_empty(q))
|
|
|
+ return;
|
|
|
+
|
|
|
/*
|
|
|
* Only recurse once to avoid overrunning the stack, let the unplug
|
|
|
* handling reinvoke the handler shortly if we already got there.
|
|
|
*/
|
|
|
- if (!elv_queue_empty(q))
|
|
|
- blk_invoke_request_fn(q);
|
|
|
+ if (!queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) {
|
|
|
+ q->request_fn(q);
|
|
|
+ queue_flag_clear(QUEUE_FLAG_REENTER, q);
|
|
|
+ } else {
|
|
|
+ queue_flag_set(QUEUE_FLAG_PLUGGED, q);
|
|
|
+ kblockd_schedule_work(q, &q->unplug_work);
|
|
|
+ }
|
|
|
}
|
|
|
EXPORT_SYMBOL(__blk_run_queue);
|
|
|
|