diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2013-06-13 10:13:29 -0700 | 
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-06-13 10:13:29 -0700 | 
| commit | 82ea4be61f45d1a7ba1f5151d90124d1ece0c07a (patch) | |
| tree | 2d2e89a3074d8385478ca5c4b5918f3552d3c25e /drivers/md/raid10.c | |
| parent | b844db31874e3b1c3b86c65024ac7bed9f77ee42 (diff) | |
| parent | 5026d7a9b2f3eb1f9bda66c18ac6bc3036ec9020 (diff) | |
| download | olio-linux-3.10-82ea4be61f45d1a7ba1f5151d90124d1ece0c07a.tar.xz olio-linux-3.10-82ea4be61f45d1a7ba1f5151d90124d1ece0c07a.zip  | |
Merge tag 'md-3.10-fixes' of git://neil.brown.name/md
Pull md bugfixes from Neil Brown:
 "A few bugfixes for md
  Some tagged for -stable"
* tag 'md-3.10-fixes' of git://neil.brown.name/md:
  md/raid1,5,10: Disable WRITE SAME until a recovery strategy is in place
  md/raid1,raid10: use freeze_array in place of raise_barrier in various places.
  md/raid1: consider WRITE as successful only if at least one non-Faulty and non-rebuilding drive completed it.
  md: md_stop_writes() should always freeze recovery.
Diffstat (limited to 'drivers/md/raid10.c')
| -rw-r--r-- | drivers/md/raid10.c | 29 | 
1 files changed, 19 insertions, 10 deletions
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 59d4daa5f4c..6ddae2501b9 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -490,7 +490,17 @@ static void raid10_end_write_request(struct bio *bio, int error)  		sector_t first_bad;  		int bad_sectors; -		set_bit(R10BIO_Uptodate, &r10_bio->state); +		/* +		 * Do not set R10BIO_Uptodate if the current device is +		 * rebuilding or Faulty. This is because we cannot use +		 * such device for properly reading the data back (we could +		 * potentially use it, if the current write would have felt +		 * before rdev->recovery_offset, but for simplicity we don't +		 * check this here. +		 */ +		if (test_bit(In_sync, &rdev->flags) && +		    !test_bit(Faulty, &rdev->flags)) +			set_bit(R10BIO_Uptodate, &r10_bio->state);  		/* Maybe we can clear some bad blocks. */  		if (is_badblock(rdev, @@ -1055,17 +1065,17 @@ static void allow_barrier(struct r10conf *conf)  	wake_up(&conf->wait_barrier);  } -static void freeze_array(struct r10conf *conf) +static void freeze_array(struct r10conf *conf, int extra)  {  	/* stop syncio and normal IO and wait for everything to  	 * go quiet.  	 * We increment barrier and nr_waiting, and then -	 * wait until nr_pending match nr_queued+1 +	 * wait until nr_pending match nr_queued+extra  	 * This is called in the context of one normal IO request  	 * that has failed. Thus any sync request that might be pending  	 * will be blocked by nr_pending, and we need to wait for  	 * pending IO requests to complete or be queued for re-try. -	 * Thus the number queued (nr_queued) plus this request (1) +	 * Thus the number queued (nr_queued) plus this request (extra)  	 * must match the number of pending IOs (nr_pending) before  	 * we continue.  	 */ @@ -1073,7 +1083,7 @@ static void freeze_array(struct r10conf *conf)  	conf->barrier++;  	conf->nr_waiting++;  	wait_event_lock_irq_cmd(conf->wait_barrier, -				conf->nr_pending == conf->nr_queued+1, +				conf->nr_pending == conf->nr_queued+extra,  				conf->resync_lock,  				flush_pending_writes(conf)); @@ -1837,8 +1847,8 @@ static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev)  		 * we wait for all outstanding requests to complete.  		 */  		synchronize_sched(); -		raise_barrier(conf, 0); -		lower_barrier(conf); +		freeze_array(conf, 0); +		unfreeze_array(conf);  		clear_bit(Unmerged, &rdev->flags);  	}  	md_integrity_add_rdev(rdev, mddev); @@ -2612,7 +2622,7 @@ static void handle_read_error(struct mddev *mddev, struct r10bio *r10_bio)  	r10_bio->devs[slot].bio = NULL;  	if (mddev->ro == 0) { -		freeze_array(conf); +		freeze_array(conf, 1);  		fix_read_error(conf, mddev, r10_bio);  		unfreeze_array(conf);  	} else @@ -3609,8 +3619,7 @@ static int run(struct mddev *mddev)  	if (mddev->queue) {  		blk_queue_max_discard_sectors(mddev->queue,  					      mddev->chunk_sectors); -		blk_queue_max_write_same_sectors(mddev->queue, -						 mddev->chunk_sectors); +		blk_queue_max_write_same_sectors(mddev->queue, 0);  		blk_queue_io_min(mddev->queue, chunk_size);  		if (conf->geo.raid_disks % conf->geo.near_copies)  			blk_queue_io_opt(mddev->queue, chunk_size * conf->geo.raid_disks);  |