|
@@ -270,6 +270,7 @@ struct queue_limits {
|
|
|
unsigned int io_min;
|
|
|
unsigned int io_opt;
|
|
|
unsigned int max_discard_sectors;
|
|
|
+ unsigned int max_write_same_sectors;
|
|
|
unsigned int discard_granularity;
|
|
|
unsigned int discard_alignment;
|
|
|
|
|
@@ -614,9 +615,20 @@ static inline bool blk_check_merge_flags(unsigned int flags1,
|
|
|
if ((flags1 & REQ_SECURE) != (flags2 & REQ_SECURE))
|
|
|
return false;
|
|
|
|
|
|
+ if ((flags1 & REQ_WRITE_SAME) != (flags2 & REQ_WRITE_SAME))
|
|
|
+ return false;
|
|
|
+
|
|
|
return true;
|
|
|
}
|
|
|
|
|
|
+static inline bool blk_write_same_mergeable(struct bio *a, struct bio *b)
|
|
|
+{
|
|
|
+ if (bio_data(a) == bio_data(b))
|
|
|
+ return true;
|
|
|
+
|
|
|
+ return false;
|
|
|
+}
|
|
|
+
|
|
|
/*
|
|
|
* q->prep_rq_fn return values
|
|
|
*/
|
|
@@ -818,6 +830,9 @@ static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q,
|
|
|
if (unlikely(cmd_flags & REQ_DISCARD))
|
|
|
return q->limits.max_discard_sectors;
|
|
|
|
|
|
+ if (unlikely(cmd_flags & REQ_WRITE_SAME))
|
|
|
+ return q->limits.max_write_same_sectors;
|
|
|
+
|
|
|
return q->limits.max_sectors;
|
|
|
}
|
|
|
|
|
@@ -886,6 +901,8 @@ extern void blk_queue_max_segments(struct request_queue *, unsigned short);
|
|
|
extern void blk_queue_max_segment_size(struct request_queue *, unsigned int);
|
|
|
extern void blk_queue_max_discard_sectors(struct request_queue *q,
|
|
|
unsigned int max_discard_sectors);
|
|
|
+extern void blk_queue_max_write_same_sectors(struct request_queue *q,
|
|
|
+ unsigned int max_write_same_sectors);
|
|
|
extern void blk_queue_logical_block_size(struct request_queue *, unsigned short);
|
|
|
extern void blk_queue_physical_block_size(struct request_queue *, unsigned int);
|
|
|
extern void blk_queue_alignment_offset(struct request_queue *q,
|
|
@@ -1016,6 +1033,8 @@ static inline struct request *blk_map_queue_find_tag(struct blk_queue_tag *bqt,
|
|
|
extern int blkdev_issue_flush(struct block_device *, gfp_t, sector_t *);
|
|
|
extern int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
|
|
|
sector_t nr_sects, gfp_t gfp_mask, unsigned long flags);
|
|
|
+extern int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
|
|
|
+ sector_t nr_sects, gfp_t gfp_mask, struct page *page);
|
|
|
extern int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
|
|
|
sector_t nr_sects, gfp_t gfp_mask);
|
|
|
static inline int sb_issue_discard(struct super_block *sb, sector_t block,
|
|
@@ -1193,6 +1212,16 @@ static inline unsigned int bdev_discard_zeroes_data(struct block_device *bdev)
|
|
|
return queue_discard_zeroes_data(bdev_get_queue(bdev));
|
|
|
}
|
|
|
|
|
|
+static inline unsigned int bdev_write_same(struct block_device *bdev)
|
|
|
+{
|
|
|
+ struct request_queue *q = bdev_get_queue(bdev);
|
|
|
+
|
|
|
+ if (q)
|
|
|
+ return q->limits.max_write_same_sectors;
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
static inline int queue_dma_alignment(struct request_queue *q)
|
|
|
{
|
|
|
return q ? q->dma_alignment : 511;
|