diff options
Diffstat (limited to 'drivers/md/dm-table.c')
| -rw-r--r-- | drivers/md/dm-table.c | 61 | 
1 files changed, 57 insertions, 4 deletions
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index f90069029aa..100368eb799 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -1212,6 +1212,41 @@ struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector)  	return &t->targets[(KEYS_PER_NODE * n) + k];  } +static int count_device(struct dm_target *ti, struct dm_dev *dev, +			sector_t start, sector_t len, void *data) +{ +	unsigned *num_devices = data; + +	(*num_devices)++; + +	return 0; +} + +/* + * Check whether a table has no data devices attached using each + * target's iterate_devices method. + * Returns false if the result is unknown because a target doesn't + * support iterate_devices. + */ +bool dm_table_has_no_data_devices(struct dm_table *table) +{ +	struct dm_target *uninitialized_var(ti); +	unsigned i = 0, num_devices = 0; + +	while (i < dm_table_get_num_targets(table)) { +		ti = dm_table_get_target(table, i++); + +		if (!ti->type->iterate_devices) +			return false; + +		ti->type->iterate_devices(ti, count_device, &num_devices); +		if (num_devices) +			return false; +	} + +	return true; +} +  /*   * Establish the new table's queue_limits and validate them.   */ @@ -1354,17 +1389,25 @@ static int device_is_nonrot(struct dm_target *ti, struct dm_dev *dev,  	return q && blk_queue_nonrot(q);  } -static bool dm_table_is_nonrot(struct dm_table *t) +static int device_is_not_random(struct dm_target *ti, struct dm_dev *dev, +			     sector_t start, sector_t len, void *data) +{ +	struct request_queue *q = bdev_get_queue(dev->bdev); + +	return q && !blk_queue_add_random(q); +} + +static bool dm_table_all_devices_attribute(struct dm_table *t, +					   iterate_devices_callout_fn func)  {  	struct dm_target *ti;  	unsigned i = 0; -	/* Ensure that all underlying device are non-rotational. */  	while (i < dm_table_get_num_targets(t)) {  		ti = dm_table_get_target(t, i++);  		if (!ti->type->iterate_devices || -		    !ti->type->iterate_devices(ti, device_is_nonrot, NULL)) +		    !ti->type->iterate_devices(ti, func, NULL))  			return 0;  	} @@ -1396,7 +1439,8 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,  	if (!dm_table_discard_zeroes_data(t))  		q->limits.discard_zeroes_data = 0; -	if (dm_table_is_nonrot(t)) +	/* Ensure that all underlying devices are non-rotational. */ +	if (dm_table_all_devices_attribute(t, device_is_nonrot))  		queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);  	else  		queue_flag_clear_unlocked(QUEUE_FLAG_NONROT, q); @@ -1404,6 +1448,15 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,  	dm_table_set_integrity(t);  	/* +	 * Determine whether or not this queue's I/O timings contribute +	 * to the entropy pool, Only request-based targets use this. +	 * Clear QUEUE_FLAG_ADD_RANDOM if any underlying device does not +	 * have it set. +	 */ +	if (blk_queue_add_random(q) && dm_table_all_devices_attribute(t, device_is_not_random)) +		queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, q); + +	/*  	 * QUEUE_FLAG_STACKABLE must be set after all queue settings are  	 * visible to other CPUs because, once the flag is set, incoming bios  	 * are processed by request-based dm, which refers to the queue  |