diff options
Diffstat (limited to 'block/blk-lib.c')
| -rw-r--r-- | block/blk-lib.c | 41 | 
1 files changed, 28 insertions, 13 deletions
diff --git a/block/blk-lib.c b/block/blk-lib.c index 2b461b496a7..19cc761cacb 100644 --- a/block/blk-lib.c +++ b/block/blk-lib.c @@ -44,6 +44,7 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,  	struct request_queue *q = bdev_get_queue(bdev);  	int type = REQ_WRITE | REQ_DISCARD;  	unsigned int max_discard_sectors; +	unsigned int granularity, alignment, mask;  	struct bio_batch bb;  	struct bio *bio;  	int ret = 0; @@ -54,18 +55,20 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,  	if (!blk_queue_discard(q))  		return -EOPNOTSUPP; +	/* Zero-sector (unknown) and one-sector granularities are the same.  */ +	granularity = max(q->limits.discard_granularity >> 9, 1U); +	mask = granularity - 1; +	alignment = (bdev_discard_alignment(bdev) >> 9) & mask; +  	/*  	 * Ensure that max_discard_sectors is of the proper -	 * granularity +	 * granularity, so that requests stay aligned after a split.  	 */  	max_discard_sectors = min(q->limits.max_discard_sectors, UINT_MAX >> 9); +	max_discard_sectors = round_down(max_discard_sectors, granularity);  	if (unlikely(!max_discard_sectors)) {  		/* Avoid infinite loop below. Being cautious never hurts. */  		return -EOPNOTSUPP; -	} else if (q->limits.discard_granularity) { -		unsigned int disc_sects = q->limits.discard_granularity >> 9; - -		max_discard_sectors &= ~(disc_sects - 1);  	}  	if (flags & BLKDEV_DISCARD_SECURE) { @@ -79,25 +82,37 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,  	bb.wait = &wait;  	while (nr_sects) { +		unsigned int req_sects; +		sector_t end_sect; +  		bio = bio_alloc(gfp_mask, 1);  		if (!bio) {  			ret = -ENOMEM;  			break;  		} +		req_sects = min_t(sector_t, nr_sects, max_discard_sectors); + +		/* +		 * If splitting a request, and the next starting sector would be +		 * misaligned, stop the discard at the previous aligned sector. +		 */ +		end_sect = sector + req_sects; +		if (req_sects < nr_sects && (end_sect & mask) != alignment) { +			end_sect = +				round_down(end_sect - alignment, granularity) +				+ alignment; +			req_sects = end_sect - sector; +		} +  		bio->bi_sector = sector;  		bio->bi_end_io = bio_batch_end_io;  		bio->bi_bdev = bdev;  		bio->bi_private = &bb; -		if (nr_sects > max_discard_sectors) { -			bio->bi_size = max_discard_sectors << 9; -			nr_sects -= max_discard_sectors; -			sector += max_discard_sectors; -		} else { -			bio->bi_size = nr_sects << 9; -			nr_sects = 0; -		} +		bio->bi_size = req_sects << 9; +		nr_sects -= req_sects; +		sector = end_sect;  		atomic_inc(&bb.done);  		submit_bio(type, bio);  |