|
@@ -1105,6 +1105,7 @@ static void make_request(struct mddev *mddev, struct bio * bio)
|
|
|
const unsigned long do_fua = (bio->bi_rw & REQ_FUA);
|
|
|
const unsigned long do_discard = (bio->bi_rw
|
|
|
& (REQ_DISCARD | REQ_SECURE));
|
|
|
+ const unsigned long do_same = (bio->bi_rw & REQ_WRITE_SAME);
|
|
|
unsigned long flags;
|
|
|
struct md_rdev *blocked_rdev;
|
|
|
struct blk_plug_cb *cb;
|
|
@@ -1460,7 +1461,8 @@ retry_write:
|
|
|
rdev));
|
|
|
mbio->bi_bdev = rdev->bdev;
|
|
|
mbio->bi_end_io = raid10_end_write_request;
|
|
|
- mbio->bi_rw = WRITE | do_sync | do_fua | do_discard;
|
|
|
+ mbio->bi_rw =
|
|
|
+ WRITE | do_sync | do_fua | do_discard | do_same;
|
|
|
mbio->bi_private = r10_bio;
|
|
|
|
|
|
atomic_inc(&r10_bio->remaining);
|
|
@@ -1502,7 +1504,8 @@ retry_write:
|
|
|
r10_bio, rdev));
|
|
|
mbio->bi_bdev = rdev->bdev;
|
|
|
mbio->bi_end_io = raid10_end_write_request;
|
|
|
- mbio->bi_rw = WRITE | do_sync | do_fua | do_discard;
|
|
|
+ mbio->bi_rw =
|
|
|
+ WRITE | do_sync | do_fua | do_discard | do_same;
|
|
|
mbio->bi_private = r10_bio;
|
|
|
|
|
|
atomic_inc(&r10_bio->remaining);
|
|
@@ -3569,6 +3572,8 @@ static int run(struct mddev *mddev)
|
|
|
if (mddev->queue) {
|
|
|
blk_queue_max_discard_sectors(mddev->queue,
|
|
|
mddev->chunk_sectors);
|
|
|
+ blk_queue_max_write_same_sectors(mddev->queue,
|
|
|
+ mddev->chunk_sectors);
|
|
|
blk_queue_io_min(mddev->queue, chunk_size);
|
|
|
if (conf->geo.raid_disks % conf->geo.near_copies)
|
|
|
blk_queue_io_opt(mddev->queue, chunk_size * conf->geo.raid_disks);
|