|
@@ -1211,7 +1211,7 @@ void init_request_from_bio(struct request *req, struct bio *bio)
|
|
blk_rq_bio_prep(req->q, req, bio);
|
|
blk_rq_bio_prep(req->q, req, bio);
|
|
}
|
|
}
|
|
|
|
|
|
-int blk_queue_bio(struct request_queue *q, struct bio *bio)
|
|
|
|
|
|
+void blk_queue_bio(struct request_queue *q, struct bio *bio)
|
|
{
|
|
{
|
|
const bool sync = !!(bio->bi_rw & REQ_SYNC);
|
|
const bool sync = !!(bio->bi_rw & REQ_SYNC);
|
|
struct blk_plug *plug;
|
|
struct blk_plug *plug;
|
|
@@ -1236,7 +1236,7 @@ int blk_queue_bio(struct request_queue *q, struct bio *bio)
|
|
* any locks.
|
|
* any locks.
|
|
*/
|
|
*/
|
|
if (attempt_plug_merge(current, q, bio))
|
|
if (attempt_plug_merge(current, q, bio))
|
|
- goto out;
|
|
|
|
|
|
+ return;
|
|
|
|
|
|
spin_lock_irq(q->queue_lock);
|
|
spin_lock_irq(q->queue_lock);
|
|
|
|
|
|
@@ -1312,8 +1312,6 @@ get_rq:
|
|
out_unlock:
|
|
out_unlock:
|
|
spin_unlock_irq(q->queue_lock);
|
|
spin_unlock_irq(q->queue_lock);
|
|
}
|
|
}
|
|
-out:
|
|
|
|
- return 0;
|
|
|
|
}
|
|
}
|
|
EXPORT_SYMBOL_GPL(blk_queue_bio); /* for device mapper only */
|
|
EXPORT_SYMBOL_GPL(blk_queue_bio); /* for device mapper only */
|
|
|
|
|
|
@@ -1441,112 +1439,85 @@ static inline int bio_check_eod(struct bio *bio, unsigned int nr_sectors)
|
|
static inline void __generic_make_request(struct bio *bio)
|
|
static inline void __generic_make_request(struct bio *bio)
|
|
{
|
|
{
|
|
struct request_queue *q;
|
|
struct request_queue *q;
|
|
- sector_t old_sector;
|
|
|
|
- int ret, nr_sectors = bio_sectors(bio);
|
|
|
|
- dev_t old_dev;
|
|
|
|
|
|
+ int nr_sectors = bio_sectors(bio);
|
|
int err = -EIO;
|
|
int err = -EIO;
|
|
|
|
+ char b[BDEVNAME_SIZE];
|
|
|
|
+ struct hd_struct *part;
|
|
|
|
|
|
might_sleep();
|
|
might_sleep();
|
|
|
|
|
|
if (bio_check_eod(bio, nr_sectors))
|
|
if (bio_check_eod(bio, nr_sectors))
|
|
goto end_io;
|
|
goto end_io;
|
|
|
|
|
|
- /*
|
|
|
|
- * Resolve the mapping until finished. (drivers are
|
|
|
|
- * still free to implement/resolve their own stacking
|
|
|
|
- * by explicitly returning 0)
|
|
|
|
- *
|
|
|
|
- * NOTE: we don't repeat the blk_size check for each new device.
|
|
|
|
- * Stacking drivers are expected to know what they are doing.
|
|
|
|
- */
|
|
|
|
- old_sector = -1;
|
|
|
|
- old_dev = 0;
|
|
|
|
- do {
|
|
|
|
- char b[BDEVNAME_SIZE];
|
|
|
|
- struct hd_struct *part;
|
|
|
|
-
|
|
|
|
- q = bdev_get_queue(bio->bi_bdev);
|
|
|
|
- if (unlikely(!q)) {
|
|
|
|
- printk(KERN_ERR
|
|
|
|
- "generic_make_request: Trying to access "
|
|
|
|
- "nonexistent block-device %s (%Lu)\n",
|
|
|
|
- bdevname(bio->bi_bdev, b),
|
|
|
|
- (long long) bio->bi_sector);
|
|
|
|
- goto end_io;
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
- if (unlikely(!(bio->bi_rw & REQ_DISCARD) &&
|
|
|
|
- nr_sectors > queue_max_hw_sectors(q))) {
|
|
|
|
- printk(KERN_ERR "bio too big device %s (%u > %u)\n",
|
|
|
|
- bdevname(bio->bi_bdev, b),
|
|
|
|
- bio_sectors(bio),
|
|
|
|
- queue_max_hw_sectors(q));
|
|
|
|
- goto end_io;
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
- if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)))
|
|
|
|
- goto end_io;
|
|
|
|
-
|
|
|
|
- part = bio->bi_bdev->bd_part;
|
|
|
|
- if (should_fail_request(part, bio->bi_size) ||
|
|
|
|
- should_fail_request(&part_to_disk(part)->part0,
|
|
|
|
- bio->bi_size))
|
|
|
|
- goto end_io;
|
|
|
|
|
|
+ q = bdev_get_queue(bio->bi_bdev);
|
|
|
|
+ if (unlikely(!q)) {
|
|
|
|
+ printk(KERN_ERR
|
|
|
|
+ "generic_make_request: Trying to access "
|
|
|
|
+ "nonexistent block-device %s (%Lu)\n",
|
|
|
|
+ bdevname(bio->bi_bdev, b),
|
|
|
|
+ (long long) bio->bi_sector);
|
|
|
|
+ goto end_io;
|
|
|
|
+ }
|
|
|
|
|
|
- /*
|
|
|
|
- * If this device has partitions, remap block n
|
|
|
|
- * of partition p to block n+start(p) of the disk.
|
|
|
|
- */
|
|
|
|
- blk_partition_remap(bio);
|
|
|
|
|
|
+ if (unlikely(!(bio->bi_rw & REQ_DISCARD) &&
|
|
|
|
+ nr_sectors > queue_max_hw_sectors(q))) {
|
|
|
|
+ printk(KERN_ERR "bio too big device %s (%u > %u)\n",
|
|
|
|
+ bdevname(bio->bi_bdev, b),
|
|
|
|
+ bio_sectors(bio),
|
|
|
|
+ queue_max_hw_sectors(q));
|
|
|
|
+ goto end_io;
|
|
|
|
+ }
|
|
|
|
|
|
- if (bio_integrity_enabled(bio) && bio_integrity_prep(bio))
|
|
|
|
- goto end_io;
|
|
|
|
|
|
+ if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)))
|
|
|
|
+ goto end_io;
|
|
|
|
|
|
- if (old_sector != -1)
|
|
|
|
- trace_block_bio_remap(q, bio, old_dev, old_sector);
|
|
|
|
|
|
+ part = bio->bi_bdev->bd_part;
|
|
|
|
+ if (should_fail_request(part, bio->bi_size) ||
|
|
|
|
+ should_fail_request(&part_to_disk(part)->part0,
|
|
|
|
+ bio->bi_size))
|
|
|
|
+ goto end_io;
|
|
|
|
|
|
- old_sector = bio->bi_sector;
|
|
|
|
- old_dev = bio->bi_bdev->bd_dev;
|
|
|
|
|
|
+ /*
|
|
|
|
+ * If this device has partitions, remap block n
|
|
|
|
+ * of partition p to block n+start(p) of the disk.
|
|
|
|
+ */
|
|
|
|
+ blk_partition_remap(bio);
|
|
|
|
|
|
- if (bio_check_eod(bio, nr_sectors))
|
|
|
|
- goto end_io;
|
|
|
|
|
|
+ if (bio_integrity_enabled(bio) && bio_integrity_prep(bio))
|
|
|
|
+ goto end_io;
|
|
|
|
|
|
- /*
|
|
|
|
- * Filter flush bio's early so that make_request based
|
|
|
|
- * drivers without flush support don't have to worry
|
|
|
|
- * about them.
|
|
|
|
- */
|
|
|
|
- if ((bio->bi_rw & (REQ_FLUSH | REQ_FUA)) && !q->flush_flags) {
|
|
|
|
- bio->bi_rw &= ~(REQ_FLUSH | REQ_FUA);
|
|
|
|
- if (!nr_sectors) {
|
|
|
|
- err = 0;
|
|
|
|
- goto end_io;
|
|
|
|
- }
|
|
|
|
- }
|
|
|
|
|
|
+ if (bio_check_eod(bio, nr_sectors))
|
|
|
|
+ goto end_io;
|
|
|
|
|
|
- if ((bio->bi_rw & REQ_DISCARD) &&
|
|
|
|
- (!blk_queue_discard(q) ||
|
|
|
|
- ((bio->bi_rw & REQ_SECURE) &&
|
|
|
|
- !blk_queue_secdiscard(q)))) {
|
|
|
|
- err = -EOPNOTSUPP;
|
|
|
|
|
|
+ /*
|
|
|
|
+ * Filter flush bio's early so that make_request based
|
|
|
|
+ * drivers without flush support don't have to worry
|
|
|
|
+ * about them.
|
|
|
|
+ */
|
|
|
|
+ if ((bio->bi_rw & (REQ_FLUSH | REQ_FUA)) && !q->flush_flags) {
|
|
|
|
+ bio->bi_rw &= ~(REQ_FLUSH | REQ_FUA);
|
|
|
|
+ if (!nr_sectors) {
|
|
|
|
+ err = 0;
|
|
goto end_io;
|
|
goto end_io;
|
|
}
|
|
}
|
|
|
|
+ }
|
|
|
|
|
|
- if (blk_throtl_bio(q, &bio))
|
|
|
|
- goto end_io;
|
|
|
|
-
|
|
|
|
- /*
|
|
|
|
- * If bio = NULL, bio has been throttled and will be submitted
|
|
|
|
- * later.
|
|
|
|
- */
|
|
|
|
- if (!bio)
|
|
|
|
- break;
|
|
|
|
-
|
|
|
|
- trace_block_bio_queue(q, bio);
|
|
|
|
|
|
+ if ((bio->bi_rw & REQ_DISCARD) &&
|
|
|
|
+ (!blk_queue_discard(q) ||
|
|
|
|
+ ((bio->bi_rw & REQ_SECURE) &&
|
|
|
|
+ !blk_queue_secdiscard(q)))) {
|
|
|
|
+ err = -EOPNOTSUPP;
|
|
|
|
+ goto end_io;
|
|
|
|
+ }
|
|
|
|
|
|
- ret = q->make_request_fn(q, bio);
|
|
|
|
- } while (ret);
|
|
|
|
|
|
+ if (blk_throtl_bio(q, &bio))
|
|
|
|
+ goto end_io;
|
|
|
|
|
|
|
|
+ /* if bio = NULL, bio has been throttled and will be submitted later. */
|
|
|
|
+ if (!bio)
|
|
|
|
+ return;
|
|
|
|
+ trace_block_bio_queue(q, bio);
|
|
|
|
+ q->make_request_fn(q, bio);
|
|
return;
|
|
return;
|
|
|
|
|
|
end_io:
|
|
end_io:
|