|
@@ -508,7 +508,7 @@ static inline struct request *start_ordered(request_queue_t *q,
|
|
|
|
|
|
int blk_do_ordered(request_queue_t *q, struct request **rqp)
|
|
|
{
|
|
|
- struct request *rq = *rqp, *allowed_rq;
|
|
|
+ struct request *rq = *rqp;
|
|
|
int is_barrier = blk_fs_request(rq) && blk_barrier_rq(rq);
|
|
|
|
|
|
if (!q->ordseq) {
|
|
@@ -532,32 +532,26 @@ int blk_do_ordered(request_queue_t *q, struct request **rqp)
|
|
|
}
|
|
|
}
|
|
|
|
|
|
+ /*
|
|
|
+ * Ordered sequence in progress
|
|
|
+ */
|
|
|
+
|
|
|
+ /* Special requests are not subject to ordering rules. */
|
|
|
+ if (!blk_fs_request(rq) &&
|
|
|
+ rq != &q->pre_flush_rq && rq != &q->post_flush_rq)
|
|
|
+ return 1;
|
|
|
+
|
|
|
if (q->ordered & QUEUE_ORDERED_TAG) {
|
|
|
+ /* Ordered by tag. Blocking the next barrier is enough. */
|
|
|
if (is_barrier && rq != &q->bar_rq)
|
|
|
*rqp = NULL;
|
|
|
- return 1;
|
|
|
+ } else {
|
|
|
+ /* Ordered by draining. Wait for turn. */
|
|
|
+ WARN_ON(blk_ordered_req_seq(rq) < blk_ordered_cur_seq(q));
|
|
|
+ if (blk_ordered_req_seq(rq) > blk_ordered_cur_seq(q))
|
|
|
+ *rqp = NULL;
|
|
|
}
|
|
|
|
|
|
- switch (blk_ordered_cur_seq(q)) {
|
|
|
- case QUEUE_ORDSEQ_PREFLUSH:
|
|
|
- allowed_rq = &q->pre_flush_rq;
|
|
|
- break;
|
|
|
- case QUEUE_ORDSEQ_BAR:
|
|
|
- allowed_rq = &q->bar_rq;
|
|
|
- break;
|
|
|
- case QUEUE_ORDSEQ_POSTFLUSH:
|
|
|
- allowed_rq = &q->post_flush_rq;
|
|
|
- break;
|
|
|
- default:
|
|
|
- allowed_rq = NULL;
|
|
|
- break;
|
|
|
- }
|
|
|
-
|
|
|
- if (rq != allowed_rq &&
|
|
|
- (blk_fs_request(rq) || rq == &q->pre_flush_rq ||
|
|
|
- rq == &q->post_flush_rq))
|
|
|
- *rqp = NULL;
|
|
|
-
|
|
|
return 1;
|
|
|
}
|
|
|
|