|
@@ -43,8 +43,8 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
|
|
|
DECLARE_COMPLETION_ONSTACK(wait);
|
|
|
struct request_queue *q = bdev_get_queue(bdev);
|
|
|
int type = REQ_WRITE | REQ_DISCARD;
|
|
|
- unsigned int max_discard_sectors;
|
|
|
- unsigned int granularity, alignment, mask;
|
|
|
+ sector_t max_discard_sectors;
|
|
|
+ sector_t granularity, alignment;
|
|
|
struct bio_batch bb;
|
|
|
struct bio *bio;
|
|
|
int ret = 0;
|
|
@@ -57,15 +57,16 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
|
|
|
|
|
|
/* Zero-sector (unknown) and one-sector granularities are the same. */
|
|
|
granularity = max(q->limits.discard_granularity >> 9, 1U);
|
|
|
- mask = granularity - 1;
|
|
|
- alignment = (bdev_discard_alignment(bdev) >> 9) & mask;
|
|
|
+ alignment = bdev_discard_alignment(bdev) >> 9;
|
|
|
+ alignment = sector_div(alignment, granularity);
|
|
|
|
|
|
/*
|
|
|
* Ensure that max_discard_sectors is of the proper
|
|
|
* granularity, so that requests stay aligned after a split.
|
|
|
*/
|
|
|
max_discard_sectors = min(q->limits.max_discard_sectors, UINT_MAX >> 9);
|
|
|
- max_discard_sectors = round_down(max_discard_sectors, granularity);
|
|
|
+ sector_div(max_discard_sectors, granularity);
|
|
|
+ max_discard_sectors *= granularity;
|
|
|
if (unlikely(!max_discard_sectors)) {
|
|
|
/* Avoid infinite loop below. Being cautious never hurts. */
|
|
|
return -EOPNOTSUPP;
|
|
@@ -83,7 +84,7 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
|
|
|
|
|
|
while (nr_sects) {
|
|
|
unsigned int req_sects;
|
|
|
- sector_t end_sect;
|
|
|
+ sector_t end_sect, tmp;
|
|
|
|
|
|
bio = bio_alloc(gfp_mask, 1);
|
|
|
if (!bio) {
|
|
@@ -98,10 +99,12 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
|
|
|
* misaligned, stop the discard at the previous aligned sector.
|
|
|
*/
|
|
|
end_sect = sector + req_sects;
|
|
|
- if (req_sects < nr_sects && (end_sect & mask) != alignment) {
|
|
|
- end_sect =
|
|
|
- round_down(end_sect - alignment, granularity)
|
|
|
- + alignment;
|
|
|
+ tmp = end_sect;
|
|
|
+ if (req_sects < nr_sects &&
|
|
|
+ sector_div(tmp, granularity) != alignment) {
|
|
|
+ end_sect = end_sect - alignment;
|
|
|
+ sector_div(end_sect, granularity);
|
|
|
+ end_sect = end_sect * granularity + alignment;
|
|
|
req_sects = end_sect - sector;
|
|
|
}
|
|
|
|