diff options
Diffstat (limited to 'block/blk-merge.c')
| -rw-r--r-- | block/blk-merge.c | 117 | 
1 files changed, 82 insertions, 35 deletions
diff --git a/block/blk-merge.c b/block/blk-merge.c index 160035f5488..e76279e4116 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c @@ -110,6 +110,49 @@ static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,  	return 0;  } +static void +__blk_segment_map_sg(struct request_queue *q, struct bio_vec *bvec, +		     struct scatterlist *sglist, struct bio_vec **bvprv, +		     struct scatterlist **sg, int *nsegs, int *cluster) +{ + +	int nbytes = bvec->bv_len; + +	if (*bvprv && *cluster) { +		if ((*sg)->length + nbytes > queue_max_segment_size(q)) +			goto new_segment; + +		if (!BIOVEC_PHYS_MERGEABLE(*bvprv, bvec)) +			goto new_segment; +		if (!BIOVEC_SEG_BOUNDARY(q, *bvprv, bvec)) +			goto new_segment; + +		(*sg)->length += nbytes; +	} else { +new_segment: +		if (!*sg) +			*sg = sglist; +		else { +			/* +			 * If the driver previously mapped a shorter +			 * list, we could see a termination bit +			 * prematurely unless it fully inits the sg +			 * table on each mapping. We KNOW that there +			 * must be more entries here or the driver +			 * would be buggy, so force clear the +			 * termination bit to avoid doing a full +			 * sg_init_table() in drivers for each command. +			 */ +			(*sg)->page_link &= ~0x02; +			*sg = sg_next(*sg); +		} + +		sg_set_page(*sg, bvec->bv_page, nbytes, bvec->bv_offset); +		(*nsegs)++; +	} +	*bvprv = bvec; +} +  /*   * map a request to scatterlist, return number of sg entries setup. Caller   * must make sure sg can hold rq->nr_phys_segments entries @@ -131,41 +174,8 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq,  	bvprv = NULL;  	sg = NULL;  	rq_for_each_segment(bvec, rq, iter) { -		int nbytes = bvec->bv_len; - -		if (bvprv && cluster) { -			if (sg->length + nbytes > queue_max_segment_size(q)) -				goto new_segment; - -			if (!BIOVEC_PHYS_MERGEABLE(bvprv, bvec)) -				goto new_segment; -			if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bvec)) -				goto new_segment; - -			sg->length += nbytes; -		} else { -new_segment: -			if (!sg) -				sg = sglist; -			else { -				/* -				 * If the driver previously mapped a shorter -				 * list, we could see a termination bit -				 * prematurely unless it fully inits the sg -				 * table on each mapping. We KNOW that there -				 * must be more entries here or the driver -				 * would be buggy, so force clear the -				 * termination bit to avoid doing a full -				 * sg_init_table() in drivers for each command. -				 */ -				sg->page_link &= ~0x02; -				sg = sg_next(sg); -			} - -			sg_set_page(sg, bvec->bv_page, nbytes, bvec->bv_offset); -			nsegs++; -		} -		bvprv = bvec; +		__blk_segment_map_sg(q, bvec, sglist, &bvprv, &sg, +				     &nsegs, &cluster);  	} /* segments in rq */ @@ -199,6 +209,43 @@ new_segment:  }  EXPORT_SYMBOL(blk_rq_map_sg); +/** + * blk_bio_map_sg - map a bio to a scatterlist + * @q: request_queue in question + * @bio: bio being mapped + * @sglist: scatterlist being mapped + * + * Note: + *    Caller must make sure sg can hold bio->bi_phys_segments entries + * + * Will return the number of sg entries setup + */ +int blk_bio_map_sg(struct request_queue *q, struct bio *bio, +		   struct scatterlist *sglist) +{ +	struct bio_vec *bvec, *bvprv; +	struct scatterlist *sg; +	int nsegs, cluster; +	unsigned long i; + +	nsegs = 0; +	cluster = blk_queue_cluster(q); + +	bvprv = NULL; +	sg = NULL; +	bio_for_each_segment(bvec, bio, i) { +		__blk_segment_map_sg(q, bvec, sglist, &bvprv, &sg, +				     &nsegs, &cluster); +	} /* segments in bio */ + +	if (sg) +		sg_mark_end(sg); + +	BUG_ON(bio->bi_phys_segments && nsegs > bio->bi_phys_segments); +	return nsegs; +} +EXPORT_SYMBOL(blk_bio_map_sg); +  static inline int ll_new_hw_segment(struct request_queue *q,  				    struct request *req,  				    struct bio *bio)  |