diff options
Diffstat (limited to 'drivers/md/dm.c')
| -rw-r--r-- | drivers/md/dm.c | 74 | 
1 files changed, 22 insertions, 52 deletions
diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 67ffa391edc..66ceaff6455 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -86,12 +86,17 @@ struct dm_rq_target_io {  };  /* - * For request-based dm. - * One of these is allocated per bio. + * For request-based dm - the bio clones we allocate are embedded in these + * structs. + * + * We allocate these with bio_alloc_bioset, using the front_pad parameter when + * the bioset is created - this means the bio has to come at the end of the + * struct.   */  struct dm_rq_clone_bio_info {  	struct bio *orig;  	struct dm_rq_target_io *tio; +	struct bio clone;  };  union map_info *dm_get_mapinfo(struct bio *bio) @@ -211,6 +216,11 @@ struct dm_md_mempools {  static struct kmem_cache *_io_cache;  static struct kmem_cache *_tio_cache;  static struct kmem_cache *_rq_tio_cache; + +/* + * Unused now, and needs to be deleted. But since io_pool is overloaded and it's + * still used for _io_cache, I'm leaving this for a later cleanup + */  static struct kmem_cache *_rq_bio_info_cache;  static int __init local_init(void) @@ -467,16 +477,6 @@ static void free_rq_tio(struct dm_rq_target_io *tio)  	mempool_free(tio, tio->md->tio_pool);  } -static struct dm_rq_clone_bio_info *alloc_bio_info(struct mapped_device *md) -{ -	return mempool_alloc(md->io_pool, GFP_ATOMIC); -} - -static void free_bio_info(struct dm_rq_clone_bio_info *info) -{ -	mempool_free(info, info->tio->md->io_pool); -} -  static int md_in_flight(struct mapped_device *md)  {  	return atomic_read(&md->pending[READ]) + @@ -681,11 +681,6 @@ static void clone_endio(struct bio *bio, int error)  		}  	} -	/* -	 * Store md for cleanup instead of tio which is about to get freed. -	 */ -	bio->bi_private = md->bs; -  	free_tio(md, tio);  	bio_put(bio);  	dec_pending(io, error); @@ -1036,11 +1031,6 @@ static void __map_bio(struct dm_target *ti, struct bio *clone,  		/* error the io and bail out, or requeue it if needed */  		md = tio->io->md;  		dec_pending(tio->io, r); -		/* -		 * Store bio_set for cleanup. -		 */ -		clone->bi_end_io = NULL; -		clone->bi_private = md->bs;  		bio_put(clone);  		free_tio(md, tio);  	} else if (r) { @@ -1059,13 +1049,6 @@ struct clone_info {  	unsigned short idx;  }; -static void dm_bio_destructor(struct bio *bio) -{ -	struct bio_set *bs = bio->bi_private; - -	bio_free(bio, bs); -} -  /*   * Creates a little bio that just does part of a bvec.   */ @@ -1077,7 +1060,6 @@ static struct bio *split_bvec(struct bio *bio, sector_t sector,  	struct bio_vec *bv = bio->bi_io_vec + idx;  	clone = bio_alloc_bioset(GFP_NOIO, 1, bs); -	clone->bi_destructor = dm_bio_destructor;  	*clone->bi_io_vec = *bv;  	clone->bi_sector = sector; @@ -1090,7 +1072,7 @@ static struct bio *split_bvec(struct bio *bio, sector_t sector,  	clone->bi_flags |= 1 << BIO_CLONED;  	if (bio_integrity(bio)) { -		bio_integrity_clone(clone, bio, GFP_NOIO, bs); +		bio_integrity_clone(clone, bio, GFP_NOIO);  		bio_integrity_trim(clone,  				   bio_sector_offset(bio, idx, offset), len);  	} @@ -1109,7 +1091,6 @@ static struct bio *clone_bio(struct bio *bio, sector_t sector,  	clone = bio_alloc_bioset(GFP_NOIO, bio->bi_max_vecs, bs);  	__bio_clone(clone, bio); -	clone->bi_destructor = dm_bio_destructor;  	clone->bi_sector = sector;  	clone->bi_idx = idx;  	clone->bi_vcnt = idx + bv_count; @@ -1117,7 +1098,7 @@ static struct bio *clone_bio(struct bio *bio, sector_t sector,  	clone->bi_flags &= ~(1 << BIO_SEG_VALID);  	if (bio_integrity(bio)) { -		bio_integrity_clone(clone, bio, GFP_NOIO, bs); +		bio_integrity_clone(clone, bio, GFP_NOIO);  		if (idx != bio->bi_idx || clone->bi_size < bio->bi_size)  			bio_integrity_trim(clone, @@ -1152,9 +1133,8 @@ static void __issue_target_request(struct clone_info *ci, struct dm_target *ti,  	 * ci->bio->bi_max_vecs is BIO_INLINE_VECS anyway, for both flush  	 * and discard, so no need for concern about wasted bvec allocations.  	 */ -	clone = bio_alloc_bioset(GFP_NOIO, ci->bio->bi_max_vecs, ci->md->bs); -	__bio_clone(clone, ci->bio); -	clone->bi_destructor = dm_bio_destructor; +	clone = bio_clone_bioset(ci->bio, GFP_NOIO, ci->md->bs); +  	if (len) {  		clone->bi_sector = ci->sector;  		clone->bi_size = to_bytes(len); @@ -1484,30 +1464,17 @@ void dm_dispatch_request(struct request *rq)  }  EXPORT_SYMBOL_GPL(dm_dispatch_request); -static void dm_rq_bio_destructor(struct bio *bio) -{ -	struct dm_rq_clone_bio_info *info = bio->bi_private; -	struct mapped_device *md = info->tio->md; - -	free_bio_info(info); -	bio_free(bio, md->bs); -} -  static int dm_rq_bio_constructor(struct bio *bio, struct bio *bio_orig,  				 void *data)  {  	struct dm_rq_target_io *tio = data; -	struct mapped_device *md = tio->md; -	struct dm_rq_clone_bio_info *info = alloc_bio_info(md); - -	if (!info) -		return -ENOMEM; +	struct dm_rq_clone_bio_info *info = +		container_of(bio, struct dm_rq_clone_bio_info, clone);  	info->orig = bio_orig;  	info->tio = tio;  	bio->bi_end_io = end_clone_bio;  	bio->bi_private = info; -	bio->bi_destructor = dm_rq_bio_destructor;  	return 0;  } @@ -2771,7 +2738,10 @@ struct dm_md_mempools *dm_alloc_md_mempools(unsigned type, unsigned integrity)  	if (!pools->tio_pool)  		goto free_io_pool_and_out; -	pools->bs = bioset_create(pool_size, 0); +	pools->bs = (type == DM_TYPE_BIO_BASED) ? +		bioset_create(pool_size, 0) : +		bioset_create(pool_size, +			      offsetof(struct dm_rq_clone_bio_info, clone));  	if (!pools->bs)  		goto free_tio_pool_and_out;  |