diff options
| author | Martin K. Petersen <martin.petersen@oracle.com> | 2009-05-22 17:17:50 -0400 | 
|---|---|---|
| committer | Jens Axboe <jens.axboe@oracle.com> | 2009-05-22 23:22:54 +0200 | 
| commit | ae03bf639a5027d27270123f5f6e3ee6a412781d (patch) | |
| tree | d705f41a188ad656b1f47f7952626a9f992e3b8f /drivers/md | |
| parent | e1defc4ff0cf57aca6c5e3ff99fa503f5943c1f1 (diff) | |
| download | olio-linux-3.10-ae03bf639a5027d27270123f5f6e3ee6a412781d.tar.xz olio-linux-3.10-ae03bf639a5027d27270123f5f6e3ee6a412781d.zip  | |
block: Use accessor functions for queue limits
Convert all external users of queue limits to using wrapper functions
instead of poking the request queue variables directly.
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'drivers/md')
| -rw-r--r-- | drivers/md/dm-table.c | 28 | ||||
| -rw-r--r-- | drivers/md/linear.c | 2 | ||||
| -rw-r--r-- | drivers/md/multipath.c | 4 | ||||
| -rw-r--r-- | drivers/md/raid0.c | 2 | ||||
| -rw-r--r-- | drivers/md/raid1.c | 4 | ||||
| -rw-r--r-- | drivers/md/raid10.c | 8 | ||||
| -rw-r--r-- | drivers/md/raid5.c | 4 | 
7 files changed, 26 insertions, 26 deletions
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index 65e2d975985..e9a73bb242b 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -510,7 +510,7 @@ void dm_set_device_limits(struct dm_target *ti, struct block_device *bdev)  	 *        combine_restrictions_low()  	 */  	rs->max_sectors = -		min_not_zero(rs->max_sectors, q->max_sectors); +		min_not_zero(rs->max_sectors, queue_max_sectors(q));  	/*  	 * Check if merge fn is supported. @@ -525,25 +525,25 @@ void dm_set_device_limits(struct dm_target *ti, struct block_device *bdev)  	rs->max_phys_segments =  		min_not_zero(rs->max_phys_segments, -			     q->max_phys_segments); +			     queue_max_phys_segments(q));  	rs->max_hw_segments = -		min_not_zero(rs->max_hw_segments, q->max_hw_segments); +		min_not_zero(rs->max_hw_segments, queue_max_hw_segments(q));  	rs->logical_block_size = max(rs->logical_block_size,  				     queue_logical_block_size(q));  	rs->max_segment_size = -		min_not_zero(rs->max_segment_size, q->max_segment_size); +		min_not_zero(rs->max_segment_size, queue_max_segment_size(q));  	rs->max_hw_sectors = -		min_not_zero(rs->max_hw_sectors, q->max_hw_sectors); +		min_not_zero(rs->max_hw_sectors, queue_max_hw_sectors(q));  	rs->seg_boundary_mask =  		min_not_zero(rs->seg_boundary_mask, -			     q->seg_boundary_mask); +			     queue_segment_boundary(q)); -	rs->bounce_pfn = min_not_zero(rs->bounce_pfn, q->bounce_pfn); +	rs->bounce_pfn = min_not_zero(rs->bounce_pfn, queue_bounce_pfn(q));  	rs->no_cluster |= !test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);  } @@ -914,13 +914,13 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q)  	 * restrictions.  	 */  	blk_queue_max_sectors(q, t->limits.max_sectors); -	q->max_phys_segments = t->limits.max_phys_segments; -	q->max_hw_segments = t->limits.max_hw_segments; -	q->logical_block_size = t->limits.logical_block_size; -	q->max_segment_size = t->limits.max_segment_size; -	q->max_hw_sectors = t->limits.max_hw_sectors; -	q->seg_boundary_mask = t->limits.seg_boundary_mask; -	q->bounce_pfn = t->limits.bounce_pfn; +	blk_queue_max_phys_segments(q, t->limits.max_phys_segments); +	blk_queue_max_hw_segments(q, t->limits.max_hw_segments); +	blk_queue_logical_block_size(q, t->limits.logical_block_size); +	blk_queue_max_segment_size(q, t->limits.max_segment_size); +	blk_queue_max_hw_sectors(q, t->limits.max_hw_sectors); +	blk_queue_segment_boundary(q, t->limits.seg_boundary_mask); +	blk_queue_bounce_limit(q, t->limits.bounce_pfn);  	if (t->limits.no_cluster)  		queue_flag_clear_unlocked(QUEUE_FLAG_CLUSTER, q); diff --git a/drivers/md/linear.c b/drivers/md/linear.c index 7a36e38393a..64f1f3e046e 100644 --- a/drivers/md/linear.c +++ b/drivers/md/linear.c @@ -146,7 +146,7 @@ static linear_conf_t *linear_conf(mddev_t *mddev, int raid_disks)  		 * a one page request is never in violation.  		 */  		if (rdev->bdev->bd_disk->queue->merge_bvec_fn && -		    mddev->queue->max_sectors > (PAGE_SIZE>>9)) +		    queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))  			blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);  		disk->num_sectors = rdev->sectors; diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c index 41ced0cbe82..4ee31aa13c4 100644 --- a/drivers/md/multipath.c +++ b/drivers/md/multipath.c @@ -303,7 +303,7 @@ static int multipath_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)  		 * merge_bvec_fn will be involved in multipath.)  		 */  			if (q->merge_bvec_fn && -			    mddev->queue->max_sectors > (PAGE_SIZE>>9)) +			    queue_max_sectors(q) > (PAGE_SIZE>>9))  				blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);  			conf->working_disks++; @@ -467,7 +467,7 @@ static int multipath_run (mddev_t *mddev)  		 * violating it, not that we ever expect a device with  		 * a merge_bvec_fn to be involved in multipath */  		if (rdev->bdev->bd_disk->queue->merge_bvec_fn && -		    mddev->queue->max_sectors > (PAGE_SIZE>>9)) +		    queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))  			blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);  		if (!test_bit(Faulty, &rdev->flags)) diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c index c08d7559be5..925507e7d67 100644 --- a/drivers/md/raid0.c +++ b/drivers/md/raid0.c @@ -144,7 +144,7 @@ static int create_strip_zones (mddev_t *mddev)  		 */  		if (rdev1->bdev->bd_disk->queue->merge_bvec_fn && -		    mddev->queue->max_sectors > (PAGE_SIZE>>9)) +		    queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))  			blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);  		if (!smallest || (rdev1->sectors < smallest->sectors)) diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 36df9109cde..e23758b4a34 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -1130,7 +1130,7 @@ static int raid1_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)  			 * a one page request is never in violation.  			 */  			if (rdev->bdev->bd_disk->queue->merge_bvec_fn && -			    mddev->queue->max_sectors > (PAGE_SIZE>>9)) +			    queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))  				blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);  			p->head_position = 0; @@ -1996,7 +1996,7 @@ static int run(mddev_t *mddev)  		 * a one page request is never in violation.  		 */  		if (rdev->bdev->bd_disk->queue->merge_bvec_fn && -		    mddev->queue->max_sectors > (PAGE_SIZE>>9)) +		    queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))  			blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);  		disk->head_position = 0; diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 499620afb44..750550c1166 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -1158,8 +1158,8 @@ static int raid10_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)  			 * a one page request is never in violation.  			 */  			if (rdev->bdev->bd_disk->queue->merge_bvec_fn && -			    mddev->queue->max_sectors > (PAGE_SIZE>>9)) -				mddev->queue->max_sectors = (PAGE_SIZE>>9); +			    queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9)) +				blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);  			p->head_position = 0;  			rdev->raid_disk = mirror; @@ -2145,8 +2145,8 @@ static int run(mddev_t *mddev)  		 * a one page request is never in violation.  		 */  		if (rdev->bdev->bd_disk->queue->merge_bvec_fn && -		    mddev->queue->max_sectors > (PAGE_SIZE>>9)) -			mddev->queue->max_sectors = (PAGE_SIZE>>9); +		    queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9)) +			blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);  		disk->head_position = 0;  	} diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 4616bc3a6e7..7970dc8c522 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -3463,10 +3463,10 @@ static int bio_fits_rdev(struct bio *bi)  {  	struct request_queue *q = bdev_get_queue(bi->bi_bdev); -	if ((bi->bi_size>>9) > q->max_sectors) +	if ((bi->bi_size>>9) > queue_max_sectors(q))  		return 0;  	blk_recount_segments(q, bi); -	if (bi->bi_phys_segments > q->max_phys_segments) +	if (bi->bi_phys_segments > queue_max_phys_segments(q))  		return 0;  	if (q->merge_bvec_fn)  |