|
@@ -36,6 +36,8 @@
|
|
|
static void blk_unplug_work(void *data);
|
|
|
static void blk_unplug_timeout(unsigned long data);
|
|
|
static void drive_stat_acct(struct request *rq, int nr_sectors, int new_io);
|
|
|
+static void init_request_from_bio(struct request *req, struct bio *bio);
|
|
|
+static int __make_request(request_queue_t *q, struct bio *bio);
|
|
|
|
|
|
/*
|
|
|
* For the allocated request tables
|
|
@@ -288,8 +290,8 @@ static inline void rq_init(request_queue_t *q, struct request *rq)
|
|
|
|
|
|
/**
|
|
|
* blk_queue_ordered - does this queue support ordered writes
|
|
|
- * @q: the request queue
|
|
|
- * @flag: see below
|
|
|
+ * @q: the request queue
|
|
|
+ * @ordered: one of QUEUE_ORDERED_*
|
|
|
*
|
|
|
* Description:
|
|
|
* For journalled file systems, doing ordered writes on a commit
|
|
@@ -298,28 +300,30 @@ static inline void rq_init(request_queue_t *q, struct request *rq)
|
|
|
* feature should call this function and indicate so.
|
|
|
*
|
|
|
**/
|
|
|
-void blk_queue_ordered(request_queue_t *q, int flag)
|
|
|
-{
|
|
|
- switch (flag) {
|
|
|
- case QUEUE_ORDERED_NONE:
|
|
|
- if (q->flush_rq)
|
|
|
- kmem_cache_free(request_cachep, q->flush_rq);
|
|
|
- q->flush_rq = NULL;
|
|
|
- q->ordered = flag;
|
|
|
- break;
|
|
|
- case QUEUE_ORDERED_TAG:
|
|
|
- q->ordered = flag;
|
|
|
- break;
|
|
|
- case QUEUE_ORDERED_FLUSH:
|
|
|
- q->ordered = flag;
|
|
|
- if (!q->flush_rq)
|
|
|
- q->flush_rq = kmem_cache_alloc(request_cachep,
|
|
|
- GFP_KERNEL);
|
|
|
- break;
|
|
|
- default:
|
|
|
- printk("blk_queue_ordered: bad value %d\n", flag);
|
|
|
- break;
|
|
|
+int blk_queue_ordered(request_queue_t *q, unsigned ordered,
|
|
|
+ prepare_flush_fn *prepare_flush_fn)
|
|
|
+{
|
|
|
+ if (ordered & (QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_POSTFLUSH) &&
|
|
|
+ prepare_flush_fn == NULL) {
|
|
|
+ printk(KERN_ERR "blk_queue_ordered: prepare_flush_fn required\n");
|
|
|
+ return -EINVAL;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (ordered != QUEUE_ORDERED_NONE &&
|
|
|
+ ordered != QUEUE_ORDERED_DRAIN &&
|
|
|
+ ordered != QUEUE_ORDERED_DRAIN_FLUSH &&
|
|
|
+ ordered != QUEUE_ORDERED_DRAIN_FUA &&
|
|
|
+ ordered != QUEUE_ORDERED_TAG &&
|
|
|
+ ordered != QUEUE_ORDERED_TAG_FLUSH &&
|
|
|
+ ordered != QUEUE_ORDERED_TAG_FUA) {
|
|
|
+ printk(KERN_ERR "blk_queue_ordered: bad value %d\n", ordered);
|
|
|
+ return -EINVAL;
|
|
|
}
|
|
|
+
|
|
|
+ q->next_ordered = ordered;
|
|
|
+ q->prepare_flush_fn = prepare_flush_fn;
|
|
|
+
|
|
|
+ return 0;
|
|
|
}
|
|
|
|
|
|
EXPORT_SYMBOL(blk_queue_ordered);
|
|
@@ -344,167 +348,265 @@ EXPORT_SYMBOL(blk_queue_issue_flush_fn);
|
|
|
/*
|
|
|
* Cache flushing for ordered writes handling
|
|
|
*/
|
|
|
-static void blk_pre_flush_end_io(struct request *flush_rq)
|
|
|
+inline unsigned blk_ordered_cur_seq(request_queue_t *q)
|
|
|
{
|
|
|
- struct request *rq = flush_rq->end_io_data;
|
|
|
- request_queue_t *q = rq->q;
|
|
|
-
|
|
|
- elv_completed_request(q, flush_rq);
|
|
|
-
|
|
|
- rq->flags |= REQ_BAR_PREFLUSH;
|
|
|
-
|
|
|
- if (!flush_rq->errors)
|
|
|
- elv_requeue_request(q, rq);
|
|
|
- else {
|
|
|
- q->end_flush_fn(q, flush_rq);
|
|
|
- clear_bit(QUEUE_FLAG_FLUSH, &q->queue_flags);
|
|
|
- q->request_fn(q);
|
|
|
- }
|
|
|
+ if (!q->ordseq)
|
|
|
+ return 0;
|
|
|
+ return 1 << ffz(q->ordseq);
|
|
|
}
|
|
|
|
|
|
-static void blk_post_flush_end_io(struct request *flush_rq)
|
|
|
+unsigned blk_ordered_req_seq(struct request *rq)
|
|
|
{
|
|
|
- struct request *rq = flush_rq->end_io_data;
|
|
|
request_queue_t *q = rq->q;
|
|
|
|
|
|
- elv_completed_request(q, flush_rq);
|
|
|
+ BUG_ON(q->ordseq == 0);
|
|
|
|
|
|
- rq->flags |= REQ_BAR_POSTFLUSH;
|
|
|
+ if (rq == &q->pre_flush_rq)
|
|
|
+ return QUEUE_ORDSEQ_PREFLUSH;
|
|
|
+ if (rq == &q->bar_rq)
|
|
|
+ return QUEUE_ORDSEQ_BAR;
|
|
|
+ if (rq == &q->post_flush_rq)
|
|
|
+ return QUEUE_ORDSEQ_POSTFLUSH;
|
|
|
|
|
|
- q->end_flush_fn(q, flush_rq);
|
|
|
- clear_bit(QUEUE_FLAG_FLUSH, &q->queue_flags);
|
|
|
- q->request_fn(q);
|
|
|
+ if ((rq->flags & REQ_ORDERED_COLOR) ==
|
|
|
+ (q->orig_bar_rq->flags & REQ_ORDERED_COLOR))
|
|
|
+ return QUEUE_ORDSEQ_DRAIN;
|
|
|
+ else
|
|
|
+ return QUEUE_ORDSEQ_DONE;
|
|
|
}
|
|
|
|
|
|
-struct request *blk_start_pre_flush(request_queue_t *q, struct request *rq)
|
|
|
+void blk_ordered_complete_seq(request_queue_t *q, unsigned seq, int error)
|
|
|
{
|
|
|
- struct request *flush_rq = q->flush_rq;
|
|
|
-
|
|
|
- BUG_ON(!blk_barrier_rq(rq));
|
|
|
+ struct request *rq;
|
|
|
+ int uptodate;
|
|
|
|
|
|
- if (test_and_set_bit(QUEUE_FLAG_FLUSH, &q->queue_flags))
|
|
|
- return NULL;
|
|
|
+ if (error && !q->orderr)
|
|
|
+ q->orderr = error;
|
|
|
|
|
|
- rq_init(q, flush_rq);
|
|
|
- flush_rq->elevator_private = NULL;
|
|
|
- flush_rq->flags = REQ_BAR_FLUSH;
|
|
|
- flush_rq->rq_disk = rq->rq_disk;
|
|
|
- flush_rq->rl = NULL;
|
|
|
+ BUG_ON(q->ordseq & seq);
|
|
|
+ q->ordseq |= seq;
|
|
|
|
|
|
- /*
|
|
|
- * prepare_flush returns 0 if no flush is needed, just mark both
|
|
|
- * pre and post flush as done in that case
|
|
|
- */
|
|
|
- if (!q->prepare_flush_fn(q, flush_rq)) {
|
|
|
- rq->flags |= REQ_BAR_PREFLUSH | REQ_BAR_POSTFLUSH;
|
|
|
- clear_bit(QUEUE_FLAG_FLUSH, &q->queue_flags);
|
|
|
- return rq;
|
|
|
- }
|
|
|
+ if (blk_ordered_cur_seq(q) != QUEUE_ORDSEQ_DONE)
|
|
|
+ return;
|
|
|
|
|
|
/*
|
|
|
- * some drivers dequeue requests right away, some only after io
|
|
|
- * completion. make sure the request is dequeued.
|
|
|
+ * Okay, sequence complete.
|
|
|
*/
|
|
|
- if (!list_empty(&rq->queuelist))
|
|
|
- blkdev_dequeue_request(rq);
|
|
|
+ rq = q->orig_bar_rq;
|
|
|
+ uptodate = q->orderr ? q->orderr : 1;
|
|
|
|
|
|
- flush_rq->end_io_data = rq;
|
|
|
- flush_rq->end_io = blk_pre_flush_end_io;
|
|
|
+ q->ordseq = 0;
|
|
|
|
|
|
- __elv_add_request(q, flush_rq, ELEVATOR_INSERT_FRONT, 0);
|
|
|
- return flush_rq;
|
|
|
+ end_that_request_first(rq, uptodate, rq->hard_nr_sectors);
|
|
|
+ end_that_request_last(rq, uptodate);
|
|
|
}
|
|
|
|
|
|
-static void blk_start_post_flush(request_queue_t *q, struct request *rq)
|
|
|
+static void pre_flush_end_io(struct request *rq, int error)
|
|
|
{
|
|
|
- struct request *flush_rq = q->flush_rq;
|
|
|
+ elv_completed_request(rq->q, rq);
|
|
|
+ blk_ordered_complete_seq(rq->q, QUEUE_ORDSEQ_PREFLUSH, error);
|
|
|
+}
|
|
|
|
|
|
- BUG_ON(!blk_barrier_rq(rq));
|
|
|
+static void bar_end_io(struct request *rq, int error)
|
|
|
+{
|
|
|
+ elv_completed_request(rq->q, rq);
|
|
|
+ blk_ordered_complete_seq(rq->q, QUEUE_ORDSEQ_BAR, error);
|
|
|
+}
|
|
|
|
|
|
- rq_init(q, flush_rq);
|
|
|
- flush_rq->elevator_private = NULL;
|
|
|
- flush_rq->flags = REQ_BAR_FLUSH;
|
|
|
- flush_rq->rq_disk = rq->rq_disk;
|
|
|
- flush_rq->rl = NULL;
|
|
|
+static void post_flush_end_io(struct request *rq, int error)
|
|
|
+{
|
|
|
+ elv_completed_request(rq->q, rq);
|
|
|
+ blk_ordered_complete_seq(rq->q, QUEUE_ORDSEQ_POSTFLUSH, error);
|
|
|
+}
|
|
|
|
|
|
- if (q->prepare_flush_fn(q, flush_rq)) {
|
|
|
- flush_rq->end_io_data = rq;
|
|
|
- flush_rq->end_io = blk_post_flush_end_io;
|
|
|
+static void queue_flush(request_queue_t *q, unsigned which)
|
|
|
+{
|
|
|
+ struct request *rq;
|
|
|
+ rq_end_io_fn *end_io;
|
|
|
|
|
|
- __elv_add_request(q, flush_rq, ELEVATOR_INSERT_FRONT, 0);
|
|
|
- q->request_fn(q);
|
|
|
+ if (which == QUEUE_ORDERED_PREFLUSH) {
|
|
|
+ rq = &q->pre_flush_rq;
|
|
|
+ end_io = pre_flush_end_io;
|
|
|
+ } else {
|
|
|
+ rq = &q->post_flush_rq;
|
|
|
+ end_io = post_flush_end_io;
|
|
|
}
|
|
|
+
|
|
|
+ rq_init(q, rq);
|
|
|
+ rq->flags = REQ_HARDBARRIER;
|
|
|
+ rq->elevator_private = NULL;
|
|
|
+ rq->rq_disk = q->bar_rq.rq_disk;
|
|
|
+ rq->rl = NULL;
|
|
|
+ rq->end_io = end_io;
|
|
|
+ q->prepare_flush_fn(q, rq);
|
|
|
+
|
|
|
+ __elv_add_request(q, rq, ELEVATOR_INSERT_FRONT, 0);
|
|
|
}
|
|
|
|
|
|
-static inline int blk_check_end_barrier(request_queue_t *q, struct request *rq,
|
|
|
- int sectors)
|
|
|
+static inline struct request *start_ordered(request_queue_t *q,
|
|
|
+ struct request *rq)
|
|
|
{
|
|
|
- if (sectors > rq->nr_sectors)
|
|
|
- sectors = rq->nr_sectors;
|
|
|
+ q->bi_size = 0;
|
|
|
+ q->orderr = 0;
|
|
|
+ q->ordered = q->next_ordered;
|
|
|
+ q->ordseq |= QUEUE_ORDSEQ_STARTED;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Prep proxy barrier request.
|
|
|
+ */
|
|
|
+ blkdev_dequeue_request(rq);
|
|
|
+ q->orig_bar_rq = rq;
|
|
|
+ rq = &q->bar_rq;
|
|
|
+ rq_init(q, rq);
|
|
|
+ rq->flags = bio_data_dir(q->orig_bar_rq->bio);
|
|
|
+ rq->flags |= q->ordered & QUEUE_ORDERED_FUA ? REQ_FUA : 0;
|
|
|
+ rq->elevator_private = NULL;
|
|
|
+ rq->rl = NULL;
|
|
|
+ init_request_from_bio(rq, q->orig_bar_rq->bio);
|
|
|
+ rq->end_io = bar_end_io;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Queue ordered sequence. As we stack them at the head, we
|
|
|
+ * need to queue in reverse order. Note that we rely on that
|
|
|
+ * no fs request uses ELEVATOR_INSERT_FRONT and thus no fs
|
|
|
+ * request gets inbetween ordered sequence.
|
|
|
+ */
|
|
|
+ if (q->ordered & QUEUE_ORDERED_POSTFLUSH)
|
|
|
+ queue_flush(q, QUEUE_ORDERED_POSTFLUSH);
|
|
|
+ else
|
|
|
+ q->ordseq |= QUEUE_ORDSEQ_POSTFLUSH;
|
|
|
+
|
|
|
+ __elv_add_request(q, rq, ELEVATOR_INSERT_FRONT, 0);
|
|
|
+
|
|
|
+ if (q->ordered & QUEUE_ORDERED_PREFLUSH) {
|
|
|
+ queue_flush(q, QUEUE_ORDERED_PREFLUSH);
|
|
|
+ rq = &q->pre_flush_rq;
|
|
|
+ } else
|
|
|
+ q->ordseq |= QUEUE_ORDSEQ_PREFLUSH;
|
|
|
|
|
|
- rq->nr_sectors -= sectors;
|
|
|
- return rq->nr_sectors;
|
|
|
+ if ((q->ordered & QUEUE_ORDERED_TAG) || q->in_flight == 0)
|
|
|
+ q->ordseq |= QUEUE_ORDSEQ_DRAIN;
|
|
|
+ else
|
|
|
+ rq = NULL;
|
|
|
+
|
|
|
+ return rq;
|
|
|
}
|
|
|
|
|
|
-static int __blk_complete_barrier_rq(request_queue_t *q, struct request *rq,
|
|
|
- int sectors, int queue_locked)
|
|
|
+int blk_do_ordered(request_queue_t *q, struct request **rqp)
|
|
|
{
|
|
|
- if (q->ordered != QUEUE_ORDERED_FLUSH)
|
|
|
- return 0;
|
|
|
- if (!blk_fs_request(rq) || !blk_barrier_rq(rq))
|
|
|
- return 0;
|
|
|
- if (blk_barrier_postflush(rq))
|
|
|
- return 0;
|
|
|
+ struct request *rq = *rqp, *allowed_rq;
|
|
|
+ int is_barrier = blk_fs_request(rq) && blk_barrier_rq(rq);
|
|
|
|
|
|
- if (!blk_check_end_barrier(q, rq, sectors)) {
|
|
|
- unsigned long flags = 0;
|
|
|
+ if (!q->ordseq) {
|
|
|
+ if (!is_barrier)
|
|
|
+ return 1;
|
|
|
|
|
|
- if (!queue_locked)
|
|
|
- spin_lock_irqsave(q->queue_lock, flags);
|
|
|
+ if (q->next_ordered != QUEUE_ORDERED_NONE) {
|
|
|
+ *rqp = start_ordered(q, rq);
|
|
|
+ return 1;
|
|
|
+ } else {
|
|
|
+ /*
|
|
|
+ * This can happen when the queue switches to
|
|
|
+ * ORDERED_NONE while this request is on it.
|
|
|
+ */
|
|
|
+ blkdev_dequeue_request(rq);
|
|
|
+ end_that_request_first(rq, -EOPNOTSUPP,
|
|
|
+ rq->hard_nr_sectors);
|
|
|
+ end_that_request_last(rq, -EOPNOTSUPP);
|
|
|
+ *rqp = NULL;
|
|
|
+ return 0;
|
|
|
+ }
|
|
|
+ }
|
|
|
|
|
|
- blk_start_post_flush(q, rq);
|
|
|
+ if (q->ordered & QUEUE_ORDERED_TAG) {
|
|
|
+ if (is_barrier && rq != &q->bar_rq)
|
|
|
+ *rqp = NULL;
|
|
|
+ return 1;
|
|
|
+ }
|
|
|
|
|
|
- if (!queue_locked)
|
|
|
- spin_unlock_irqrestore(q->queue_lock, flags);
|
|
|
+ switch (blk_ordered_cur_seq(q)) {
|
|
|
+ case QUEUE_ORDSEQ_PREFLUSH:
|
|
|
+ allowed_rq = &q->pre_flush_rq;
|
|
|
+ break;
|
|
|
+ case QUEUE_ORDSEQ_BAR:
|
|
|
+ allowed_rq = &q->bar_rq;
|
|
|
+ break;
|
|
|
+ case QUEUE_ORDSEQ_POSTFLUSH:
|
|
|
+ allowed_rq = &q->post_flush_rq;
|
|
|
+ break;
|
|
|
+ default:
|
|
|
+ allowed_rq = NULL;
|
|
|
+ break;
|
|
|
}
|
|
|
|
|
|
+ if (rq != allowed_rq &&
|
|
|
+ (blk_fs_request(rq) || rq == &q->pre_flush_rq ||
|
|
|
+ rq == &q->post_flush_rq))
|
|
|
+ *rqp = NULL;
|
|
|
+
|
|
|
return 1;
|
|
|
}
|
|
|
|
|
|
-/**
|
|
|
- * blk_complete_barrier_rq - complete possible barrier request
|
|
|
- * @q: the request queue for the device
|
|
|
- * @rq: the request
|
|
|
- * @sectors: number of sectors to complete
|
|
|
- *
|
|
|
- * Description:
|
|
|
- * Used in driver end_io handling to determine whether to postpone
|
|
|
- * completion of a barrier request until a post flush has been done. This
|
|
|
- * is the unlocked variant, used if the caller doesn't already hold the
|
|
|
- * queue lock.
|
|
|
- **/
|
|
|
-int blk_complete_barrier_rq(request_queue_t *q, struct request *rq, int sectors)
|
|
|
+static int flush_dry_bio_endio(struct bio *bio, unsigned int bytes, int error)
|
|
|
{
|
|
|
- return __blk_complete_barrier_rq(q, rq, sectors, 0);
|
|
|
+ request_queue_t *q = bio->bi_private;
|
|
|
+ struct bio_vec *bvec;
|
|
|
+ int i;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * This is dry run, restore bio_sector and size. We'll finish
|
|
|
+ * this request again with the original bi_end_io after an
|
|
|
+ * error occurs or post flush is complete.
|
|
|
+ */
|
|
|
+ q->bi_size += bytes;
|
|
|
+
|
|
|
+ if (bio->bi_size)
|
|
|
+ return 1;
|
|
|
+
|
|
|
+ /* Rewind bvec's */
|
|
|
+ bio->bi_idx = 0;
|
|
|
+ bio_for_each_segment(bvec, bio, i) {
|
|
|
+ bvec->bv_len += bvec->bv_offset;
|
|
|
+ bvec->bv_offset = 0;
|
|
|
+ }
|
|
|
+
|
|
|
+ /* Reset bio */
|
|
|
+ set_bit(BIO_UPTODATE, &bio->bi_flags);
|
|
|
+ bio->bi_size = q->bi_size;
|
|
|
+ bio->bi_sector -= (q->bi_size >> 9);
|
|
|
+ q->bi_size = 0;
|
|
|
+
|
|
|
+ return 0;
|
|
|
}
|
|
|
-EXPORT_SYMBOL(blk_complete_barrier_rq);
|
|
|
|
|
|
-/**
|
|
|
- * blk_complete_barrier_rq_locked - complete possible barrier request
|
|
|
- * @q: the request queue for the device
|
|
|
- * @rq: the request
|
|
|
- * @sectors: number of sectors to complete
|
|
|
- *
|
|
|
- * Description:
|
|
|
- * See blk_complete_barrier_rq(). This variant must be used if the caller
|
|
|
- * holds the queue lock.
|
|
|
- **/
|
|
|
-int blk_complete_barrier_rq_locked(request_queue_t *q, struct request *rq,
|
|
|
- int sectors)
|
|
|
+static inline int ordered_bio_endio(struct request *rq, struct bio *bio,
|
|
|
+ unsigned int nbytes, int error)
|
|
|
{
|
|
|
- return __blk_complete_barrier_rq(q, rq, sectors, 1);
|
|
|
+ request_queue_t *q = rq->q;
|
|
|
+ bio_end_io_t *endio;
|
|
|
+ void *private;
|
|
|
+
|
|
|
+ if (&q->bar_rq != rq)
|
|
|
+ return 0;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Okay, this is the barrier request in progress, dry finish it.
|
|
|
+ */
|
|
|
+ if (error && !q->orderr)
|
|
|
+ q->orderr = error;
|
|
|
+
|
|
|
+ endio = bio->bi_end_io;
|
|
|
+ private = bio->bi_private;
|
|
|
+ bio->bi_end_io = flush_dry_bio_endio;
|
|
|
+ bio->bi_private = q;
|
|
|
+
|
|
|
+ bio_endio(bio, nbytes, error);
|
|
|
+
|
|
|
+ bio->bi_end_io = endio;
|
|
|
+ bio->bi_private = private;
|
|
|
+
|
|
|
+ return 1;
|
|
|
}
|
|
|
-EXPORT_SYMBOL(blk_complete_barrier_rq_locked);
|
|
|
|
|
|
/**
|
|
|
* blk_queue_bounce_limit - set bounce buffer limit for queue
|
|
@@ -1039,12 +1141,13 @@ void blk_queue_invalidate_tags(request_queue_t *q)
|
|
|
|
|
|
EXPORT_SYMBOL(blk_queue_invalidate_tags);
|
|
|
|
|
|
-static char *rq_flags[] = {
|
|
|
+static const char * const rq_flags[] = {
|
|
|
"REQ_RW",
|
|
|
"REQ_FAILFAST",
|
|
|
"REQ_SORTED",
|
|
|
"REQ_SOFTBARRIER",
|
|
|
"REQ_HARDBARRIER",
|
|
|
+ "REQ_FUA",
|
|
|
"REQ_CMD",
|
|
|
"REQ_NOMERGE",
|
|
|
"REQ_STARTED",
|
|
@@ -1064,6 +1167,7 @@ static char *rq_flags[] = {
|
|
|
"REQ_PM_SUSPEND",
|
|
|
"REQ_PM_RESUME",
|
|
|
"REQ_PM_SHUTDOWN",
|
|
|
+ "REQ_ORDERED_COLOR",
|
|
|
};
|
|
|
|
|
|
void blk_dump_rq_flags(struct request *rq, char *msg)
|
|
@@ -1641,8 +1745,6 @@ void blk_cleanup_queue(request_queue_t * q)
|
|
|
if (q->queue_tags)
|
|
|
__blk_queue_free_tags(q);
|
|
|
|
|
|
- blk_queue_ordered(q, QUEUE_ORDERED_NONE);
|
|
|
-
|
|
|
kmem_cache_free(requestq_cachep, q);
|
|
|
}
|
|
|
|
|
@@ -1667,8 +1769,6 @@ static int blk_init_free_list(request_queue_t *q)
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
-static int __make_request(request_queue_t *, struct bio *);
|
|
|
-
|
|
|
request_queue_t *blk_alloc_queue(gfp_t gfp_mask)
|
|
|
{
|
|
|
return blk_alloc_queue_node(gfp_mask, -1);
|
|
@@ -1908,40 +2008,40 @@ static struct request *get_request(request_queue_t *q, int rw, struct bio *bio,
|
|
|
{
|
|
|
struct request *rq = NULL;
|
|
|
struct request_list *rl = &q->rq;
|
|
|
- struct io_context *ioc = current_io_context(GFP_ATOMIC);
|
|
|
- int priv;
|
|
|
+ struct io_context *ioc = NULL;
|
|
|
+ int may_queue, priv;
|
|
|
|
|
|
- if (rl->count[rw]+1 >= q->nr_requests) {
|
|
|
- /*
|
|
|
- * The queue will fill after this allocation, so set it as
|
|
|
- * full, and mark this process as "batching". This process
|
|
|
- * will be allowed to complete a batch of requests, others
|
|
|
- * will be blocked.
|
|
|
- */
|
|
|
- if (!blk_queue_full(q, rw)) {
|
|
|
- ioc_set_batching(q, ioc);
|
|
|
- blk_set_queue_full(q, rw);
|
|
|
- }
|
|
|
- }
|
|
|
+ may_queue = elv_may_queue(q, rw, bio);
|
|
|
+ if (may_queue == ELV_MQUEUE_NO)
|
|
|
+ goto rq_starved;
|
|
|
|
|
|
- switch (elv_may_queue(q, rw, bio)) {
|
|
|
- case ELV_MQUEUE_NO:
|
|
|
- goto rq_starved;
|
|
|
- case ELV_MQUEUE_MAY:
|
|
|
- break;
|
|
|
- case ELV_MQUEUE_MUST:
|
|
|
- goto get_rq;
|
|
|
- }
|
|
|
-
|
|
|
- if (blk_queue_full(q, rw) && !ioc_batching(q, ioc)) {
|
|
|
- /*
|
|
|
- * The queue is full and the allocating process is not a
|
|
|
- * "batcher", and not exempted by the IO scheduler
|
|
|
- */
|
|
|
- goto out;
|
|
|
+ if (rl->count[rw]+1 >= queue_congestion_on_threshold(q)) {
|
|
|
+ if (rl->count[rw]+1 >= q->nr_requests) {
|
|
|
+ ioc = current_io_context(GFP_ATOMIC);
|
|
|
+ /*
|
|
|
+ * The queue will fill after this allocation, so set
|
|
|
+ * it as full, and mark this process as "batching".
|
|
|
+ * This process will be allowed to complete a batch of
|
|
|
+ * requests, others will be blocked.
|
|
|
+ */
|
|
|
+ if (!blk_queue_full(q, rw)) {
|
|
|
+ ioc_set_batching(q, ioc);
|
|
|
+ blk_set_queue_full(q, rw);
|
|
|
+ } else {
|
|
|
+ if (may_queue != ELV_MQUEUE_MUST
|
|
|
+ && !ioc_batching(q, ioc)) {
|
|
|
+ /*
|
|
|
+ * The queue is full and the allocating
|
|
|
+ * process is not a "batcher", and not
|
|
|
+ * exempted by the IO scheduler
|
|
|
+ */
|
|
|
+ goto out;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ }
|
|
|
+ set_queue_congested(q, rw);
|
|
|
}
|
|
|
|
|
|
-get_rq:
|
|
|
/*
|
|
|
* Only allow batching queuers to allocate up to 50% over the defined
|
|
|
* limit of requests, otherwise we could have thousands of requests
|
|
@@ -1952,8 +2052,6 @@ get_rq:
|
|
|
|
|
|
rl->count[rw]++;
|
|
|
rl->starved[rw] = 0;
|
|
|
- if (rl->count[rw] >= queue_congestion_on_threshold(q))
|
|
|
- set_queue_congested(q, rw);
|
|
|
|
|
|
priv = !test_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags);
|
|
|
if (priv)
|
|
@@ -1962,7 +2060,7 @@ get_rq:
|
|
|
spin_unlock_irq(q->queue_lock);
|
|
|
|
|
|
rq = blk_alloc_request(q, rw, bio, priv, gfp_mask);
|
|
|
- if (!rq) {
|
|
|
+ if (unlikely(!rq)) {
|
|
|
/*
|
|
|
* Allocation failed presumably due to memory. Undo anything
|
|
|
* we might have messed up.
|
|
@@ -1987,6 +2085,12 @@ rq_starved:
|
|
|
goto out;
|
|
|
}
|
|
|
|
|
|
+ /*
|
|
|
+ * ioc may be NULL here, and ioc_batching will be false. That's
|
|
|
+ * OK, if the queue is under the request limit then requests need
|
|
|
+ * not count toward the nr_batch_requests limit. There will always
|
|
|
+ * be some limit enforced by BLK_BATCH_TIME.
|
|
|
+ */
|
|
|
if (ioc_batching(q, ioc))
|
|
|
ioc->nr_batch_requests--;
|
|
|
|
|
@@ -2313,7 +2417,7 @@ EXPORT_SYMBOL(blk_rq_map_kern);
|
|
|
*/
|
|
|
void blk_execute_rq_nowait(request_queue_t *q, struct gendisk *bd_disk,
|
|
|
struct request *rq, int at_head,
|
|
|
- void (*done)(struct request *))
|
|
|
+ rq_end_io_fn *done)
|
|
|
{
|
|
|
int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK;
|
|
|
|
|
@@ -2517,7 +2621,7 @@ EXPORT_SYMBOL(blk_put_request);
|
|
|
* blk_end_sync_rq - executes a completion event on a request
|
|
|
* @rq: request to complete
|
|
|
*/
|
|
|
-void blk_end_sync_rq(struct request *rq)
|
|
|
+void blk_end_sync_rq(struct request *rq, int error)
|
|
|
{
|
|
|
struct completion *waiting = rq->waiting;
|
|
|
|
|
@@ -2655,6 +2759,36 @@ void blk_attempt_remerge(request_queue_t *q, struct request *rq)
|
|
|
|
|
|
EXPORT_SYMBOL(blk_attempt_remerge);
|
|
|
|
|
|
+static void init_request_from_bio(struct request *req, struct bio *bio)
|
|
|
+{
|
|
|
+ req->flags |= REQ_CMD;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * inherit FAILFAST from bio (for read-ahead, and explicit FAILFAST)
|
|
|
+ */
|
|
|
+ if (bio_rw_ahead(bio) || bio_failfast(bio))
|
|
|
+ req->flags |= REQ_FAILFAST;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * REQ_BARRIER implies no merging, but lets make it explicit
|
|
|
+ */
|
|
|
+ if (unlikely(bio_barrier(bio)))
|
|
|
+ req->flags |= (REQ_HARDBARRIER | REQ_NOMERGE);
|
|
|
+
|
|
|
+ req->errors = 0;
|
|
|
+ req->hard_sector = req->sector = bio->bi_sector;
|
|
|
+ req->hard_nr_sectors = req->nr_sectors = bio_sectors(bio);
|
|
|
+ req->current_nr_sectors = req->hard_cur_sectors = bio_cur_sectors(bio);
|
|
|
+ req->nr_phys_segments = bio_phys_segments(req->q, bio);
|
|
|
+ req->nr_hw_segments = bio_hw_segments(req->q, bio);
|
|
|
+ req->buffer = bio_data(bio); /* see ->buffer comment above */
|
|
|
+ req->waiting = NULL;
|
|
|
+ req->bio = req->biotail = bio;
|
|
|
+ req->ioprio = bio_prio(bio);
|
|
|
+ req->rq_disk = bio->bi_bdev->bd_disk;
|
|
|
+ req->start_time = jiffies;
|
|
|
+}
|
|
|
+
|
|
|
static int __make_request(request_queue_t *q, struct bio *bio)
|
|
|
{
|
|
|
struct request *req;
|
|
@@ -2680,7 +2814,7 @@ static int __make_request(request_queue_t *q, struct bio *bio)
|
|
|
spin_lock_prefetch(q->queue_lock);
|
|
|
|
|
|
barrier = bio_barrier(bio);
|
|
|
- if (unlikely(barrier) && (q->ordered == QUEUE_ORDERED_NONE)) {
|
|
|
+ if (unlikely(barrier) && (q->next_ordered == QUEUE_ORDERED_NONE)) {
|
|
|
err = -EOPNOTSUPP;
|
|
|
goto end_io;
|
|
|
}
|
|
@@ -2750,33 +2884,7 @@ get_rq:
|
|
|
* We don't worry about that case for efficiency. It won't happen
|
|
|
* often, and the elevators are able to handle it.
|
|
|
*/
|
|
|
-
|
|
|
- req->flags |= REQ_CMD;
|
|
|
-
|
|
|
- /*
|
|
|
- * inherit FAILFAST from bio (for read-ahead, and explicit FAILFAST)
|
|
|
- */
|
|
|
- if (bio_rw_ahead(bio) || bio_failfast(bio))
|
|
|
- req->flags |= REQ_FAILFAST;
|
|
|
-
|
|
|
- /*
|
|
|
- * REQ_BARRIER implies no merging, but lets make it explicit
|
|
|
- */
|
|
|
- if (unlikely(barrier))
|
|
|
- req->flags |= (REQ_HARDBARRIER | REQ_NOMERGE);
|
|
|
-
|
|
|
- req->errors = 0;
|
|
|
- req->hard_sector = req->sector = sector;
|
|
|
- req->hard_nr_sectors = req->nr_sectors = nr_sectors;
|
|
|
- req->current_nr_sectors = req->hard_cur_sectors = cur_nr_sectors;
|
|
|
- req->nr_phys_segments = bio_phys_segments(q, bio);
|
|
|
- req->nr_hw_segments = bio_hw_segments(q, bio);
|
|
|
- req->buffer = bio_data(bio); /* see ->buffer comment above */
|
|
|
- req->waiting = NULL;
|
|
|
- req->bio = req->biotail = bio;
|
|
|
- req->ioprio = prio;
|
|
|
- req->rq_disk = bio->bi_bdev->bd_disk;
|
|
|
- req->start_time = jiffies;
|
|
|
+ init_request_from_bio(req, bio);
|
|
|
|
|
|
spin_lock_irq(q->queue_lock);
|
|
|
if (elv_queue_empty(q))
|
|
@@ -3067,7 +3175,8 @@ static int __end_that_request_first(struct request *req, int uptodate,
|
|
|
if (nr_bytes >= bio->bi_size) {
|
|
|
req->bio = bio->bi_next;
|
|
|
nbytes = bio->bi_size;
|
|
|
- bio_endio(bio, nbytes, error);
|
|
|
+ if (!ordered_bio_endio(req, bio, nbytes, error))
|
|
|
+ bio_endio(bio, nbytes, error);
|
|
|
next_idx = 0;
|
|
|
bio_nbytes = 0;
|
|
|
} else {
|
|
@@ -3122,7 +3231,8 @@ static int __end_that_request_first(struct request *req, int uptodate,
|
|
|
* if the request wasn't completed, update state
|
|
|
*/
|
|
|
if (bio_nbytes) {
|
|
|
- bio_endio(bio, bio_nbytes, error);
|
|
|
+ if (!ordered_bio_endio(req, bio, bio_nbytes, error))
|
|
|
+ bio_endio(bio, bio_nbytes, error);
|
|
|
bio->bi_idx += next_idx;
|
|
|
bio_iovec(bio)->bv_offset += nr_bytes;
|
|
|
bio_iovec(bio)->bv_len -= nr_bytes;
|
|
@@ -3179,9 +3289,17 @@ EXPORT_SYMBOL(end_that_request_chunk);
|
|
|
/*
|
|
|
* queue lock must be held
|
|
|
*/
|
|
|
-void end_that_request_last(struct request *req)
|
|
|
+void end_that_request_last(struct request *req, int uptodate)
|
|
|
{
|
|
|
struct gendisk *disk = req->rq_disk;
|
|
|
+ int error;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * extend uptodate bool to allow < 0 value to be direct io error
|
|
|
+ */
|
|
|
+ error = 0;
|
|
|
+ if (end_io_error(uptodate))
|
|
|
+ error = !uptodate ? -EIO : uptodate;
|
|
|
|
|
|
if (unlikely(laptop_mode) && blk_fs_request(req))
|
|
|
laptop_io_completion();
|
|
@@ -3196,7 +3314,7 @@ void end_that_request_last(struct request *req)
|
|
|
disk->in_flight--;
|
|
|
}
|
|
|
if (req->end_io)
|
|
|
- req->end_io(req);
|
|
|
+ req->end_io(req, error);
|
|
|
else
|
|
|
__blk_put_request(req->q, req);
|
|
|
}
|
|
@@ -3208,7 +3326,7 @@ void end_request(struct request *req, int uptodate)
|
|
|
if (!end_that_request_first(req, uptodate, req->hard_cur_sectors)) {
|
|
|
add_disk_randomness(req->rq_disk);
|
|
|
blkdev_dequeue_request(req);
|
|
|
- end_that_request_last(req);
|
|
|
+ end_that_request_last(req, uptodate);
|
|
|
}
|
|
|
}
|
|
|
|