diff options
Diffstat (limited to 'drivers/md/dm.c')
| -rw-r--r-- | drivers/md/dm.c | 71 | 
1 files changed, 52 insertions, 19 deletions
diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 4e09b6ff5b4..67ffa391edc 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -865,10 +865,14 @@ static void dm_done(struct request *clone, int error, bool mapped)  {  	int r = error;  	struct dm_rq_target_io *tio = clone->end_io_data; -	dm_request_endio_fn rq_end_io = tio->ti->type->rq_end_io; +	dm_request_endio_fn rq_end_io = NULL; -	if (mapped && rq_end_io) -		r = rq_end_io(tio->ti, clone, error, &tio->info); +	if (tio->ti) { +		rq_end_io = tio->ti->type->rq_end_io; + +		if (mapped && rq_end_io) +			r = rq_end_io(tio->ti, clone, error, &tio->info); +	}  	if (r <= 0)  		/* The target wants to complete the I/O */ @@ -1588,15 +1592,6 @@ static int map_request(struct dm_target *ti, struct request *clone,  	int r, requeued = 0;  	struct dm_rq_target_io *tio = clone->end_io_data; -	/* -	 * Hold the md reference here for the in-flight I/O. -	 * We can't rely on the reference count by device opener, -	 * because the device may be closed during the request completion -	 * when all bios are completed. -	 * See the comment in rq_completed() too. -	 */ -	dm_get(md); -  	tio->ti = ti;  	r = ti->type->map_rq(ti, clone, &tio->info);  	switch (r) { @@ -1628,6 +1623,26 @@ static int map_request(struct dm_target *ti, struct request *clone,  	return requeued;  } +static struct request *dm_start_request(struct mapped_device *md, struct request *orig) +{ +	struct request *clone; + +	blk_start_request(orig); +	clone = orig->special; +	atomic_inc(&md->pending[rq_data_dir(clone)]); + +	/* +	 * Hold the md reference here for the in-flight I/O. +	 * We can't rely on the reference count by device opener, +	 * because the device may be closed during the request completion +	 * when all bios are completed. +	 * See the comment in rq_completed() too. +	 */ +	dm_get(md); + +	return clone; +} +  /*   * q->request_fn for request-based dm.   * Called with the queue lock held. @@ -1657,14 +1672,21 @@ static void dm_request_fn(struct request_queue *q)  			pos = blk_rq_pos(rq);  		ti = dm_table_find_target(map, pos); -		BUG_ON(!dm_target_is_valid(ti)); +		if (!dm_target_is_valid(ti)) { +			/* +			 * Must perform setup, that dm_done() requires, +			 * before calling dm_kill_unmapped_request +			 */ +			DMERR_LIMIT("request attempted access beyond the end of device"); +			clone = dm_start_request(md, rq); +			dm_kill_unmapped_request(clone, -EIO); +			continue; +		}  		if (ti->type->busy && ti->type->busy(ti))  			goto delay_and_out; -		blk_start_request(rq); -		clone = rq->special; -		atomic_inc(&md->pending[rq_data_dir(clone)]); +		clone = dm_start_request(md, rq);  		spin_unlock(q->queue_lock);  		if (map_request(ti, clone, md)) @@ -1684,8 +1706,6 @@ delay_and_out:  	blk_delay_queue(q, HZ / 10);  out:  	dm_table_put(map); - -	return;  }  int dm_underlying_device_busy(struct request_queue *q) @@ -2409,7 +2429,7 @@ static void dm_queue_flush(struct mapped_device *md)   */  struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table)  { -	struct dm_table *map = ERR_PTR(-EINVAL); +	struct dm_table *live_map, *map = ERR_PTR(-EINVAL);  	struct queue_limits limits;  	int r; @@ -2419,6 +2439,19 @@ struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table)  	if (!dm_suspended_md(md))  		goto out; +	/* +	 * If the new table has no data devices, retain the existing limits. +	 * This helps multipath with queue_if_no_path if all paths disappear, +	 * then new I/O is queued based on these limits, and then some paths +	 * reappear. +	 */ +	if (dm_table_has_no_data_devices(table)) { +		live_map = dm_get_live_table(md); +		if (live_map) +			limits = md->queue->limits; +		dm_table_put(live_map); +	} +  	r = dm_calculate_queue_limits(table, &limits);  	if (r) {  		map = ERR_PTR(r);  |