diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2012-12-18 09:42:05 -0800 | 
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-12-18 09:42:05 -0800 | 
| commit | a22180d2666c018f4fef6818074d78bb76ff2bda (patch) | |
| tree | a633aaf423ff39f94d00502d03dbbd99dab4b2ee /fs/btrfs/extent_io.c | |
| parent | 2d4dce0070448bcb5ccd04553a4be4635417f565 (diff) | |
| parent | 213490b301773ea9c6fb89a86424a6901fcdd069 (diff) | |
| download | olio-linux-3.10-a22180d2666c018f4fef6818074d78bb76ff2bda.tar.xz olio-linux-3.10-a22180d2666c018f4fef6818074d78bb76ff2bda.zip  | |
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mason/linux-btrfs
Pull btrfs update from Chris Mason:
 "A big set of fixes and features.
  In terms of line count, most of the code comes from Stefan, who added
  the ability to replace a single drive in place.  This is different
  from how btrfs normally replaces drives, and is much much much faster.
  Josef is plowing through our synchronous write performance.  This pull
  request does not include the DIO_OWN_WAITING patch that was discussed
  on the list, but it has a number of other improvements to cut down our
  latencies and CPU time during fsync/O_DIRECT writes.
  Miao Xie has a big series of fixes and is spreading out ordered
  operations over more CPUs.  This improves performance and reduces
  contention.
  I've put in fixes for error handling around hash collisions.  These
  are going back to individual stable kernels as I test against them.
  Otherwise we have a lot of fixes and cleanups, thanks everyone!
  raid5/6 is being rebased against the device replacement code.  I'll
  have it posted this Friday along with a nice series of benchmarks."
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mason/linux-btrfs: (115 commits)
  Btrfs: fix a bug of per-file nocow
  Btrfs: fix hash overflow handling
  Btrfs: don't take inode delalloc mutex if we're a free space inode
  Btrfs: fix autodefrag and umount lockup
  Btrfs: fix permissions of empty files not affected by umask
  Btrfs: put raid properties into global table
  Btrfs: fix BUG() in scrub when first superblock reading gives EIO
  Btrfs: do not call file_update_time in aio_write
  Btrfs: only unlock and relock if we have to
  Btrfs: use tokens where we can in the tree log
  Btrfs: optimize leaf_space_used
  Btrfs: don't memset new tokens
  Btrfs: only clear dirty on the buffer if it is marked as dirty
  Btrfs: move checks in set_page_dirty under DEBUG
  Btrfs: log changed inodes based on the extent map tree
  Btrfs: add path->really_keep_locks
  Btrfs: do not mark ems as prealloc if we are writing to them
  Btrfs: keep track of the extents original block length
  Btrfs: inline csums if we're fsyncing
  Btrfs: don't bother copying if we're only logging the inode
  ...
Diffstat (limited to 'fs/btrfs/extent_io.c')
| -rw-r--r-- | fs/btrfs/extent_io.c | 37 | 
1 files changed, 14 insertions, 23 deletions
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 472873a94d9..1b319df29ee 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -341,12 +341,10 @@ static int insert_state(struct extent_io_tree *tree,  {  	struct rb_node *node; -	if (end < start) { -		printk(KERN_ERR "btrfs end < start %llu %llu\n", +	if (end < start) +		WARN(1, KERN_ERR "btrfs end < start %llu %llu\n",  		       (unsigned long long)end,  		       (unsigned long long)start); -		WARN_ON(1); -	}  	state->start = start;  	state->end = end; @@ -1919,12 +1917,12 @@ static void repair_io_failure_callback(struct bio *bio, int err)   * the standard behavior is to write all copies in a raid setup. here we only   * want to write the one bad copy. so we do the mapping for ourselves and issue   * submit_bio directly. - * to avoid any synchonization issues, wait for the data after writing, which + * to avoid any synchronization issues, wait for the data after writing, which   * actually prevents the read that triggered the error from finishing.   * currently, there can be no more than two copies of every data bit. thus,   * exactly one rewrite is required.   */ -int repair_io_failure(struct btrfs_mapping_tree *map_tree, u64 start, +int repair_io_failure(struct btrfs_fs_info *fs_info, u64 start,  			u64 length, u64 logical, struct page *page,  			int mirror_num)  { @@ -1946,7 +1944,7 @@ int repair_io_failure(struct btrfs_mapping_tree *map_tree, u64 start,  	bio->bi_size = 0;  	map_length = length; -	ret = btrfs_map_block(map_tree, WRITE, logical, +	ret = btrfs_map_block(fs_info, WRITE, logical,  			      &map_length, &bbio, mirror_num);  	if (ret) {  		bio_put(bio); @@ -1984,14 +1982,13 @@ int repair_io_failure(struct btrfs_mapping_tree *map_tree, u64 start,  int repair_eb_io_failure(struct btrfs_root *root, struct extent_buffer *eb,  			 int mirror_num)  { -	struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;  	u64 start = eb->start;  	unsigned long i, num_pages = num_extent_pages(eb->start, eb->len);  	int ret = 0;  	for (i = 0; i < num_pages; i++) {  		struct page *p = extent_buffer_page(eb, i); -		ret = repair_io_failure(map_tree, start, PAGE_CACHE_SIZE, +		ret = repair_io_failure(root->fs_info, start, PAGE_CACHE_SIZE,  					start, p, mirror_num);  		if (ret)  			break; @@ -2010,7 +2007,7 @@ static int clean_io_failure(u64 start, struct page *page)  	u64 private;  	u64 private_failure;  	struct io_failure_record *failrec; -	struct btrfs_mapping_tree *map_tree; +	struct btrfs_fs_info *fs_info;  	struct extent_state *state;  	int num_copies;  	int did_repair = 0; @@ -2046,11 +2043,11 @@ static int clean_io_failure(u64 start, struct page *page)  	spin_unlock(&BTRFS_I(inode)->io_tree.lock);  	if (state && state->start == failrec->start) { -		map_tree = &BTRFS_I(inode)->root->fs_info->mapping_tree; -		num_copies = btrfs_num_copies(map_tree, failrec->logical, -						failrec->len); +		fs_info = BTRFS_I(inode)->root->fs_info; +		num_copies = btrfs_num_copies(fs_info, failrec->logical, +					      failrec->len);  		if (num_copies > 1)  { -			ret = repair_io_failure(map_tree, start, failrec->len, +			ret = repair_io_failure(fs_info, start, failrec->len,  						failrec->logical, page,  						failrec->failed_mirror);  			did_repair = !ret; @@ -2159,9 +2156,8 @@ static int bio_readpage_error(struct bio *failed_bio, struct page *page,  		 * clean_io_failure() clean all those errors at once.  		 */  	} -	num_copies = btrfs_num_copies( -			      &BTRFS_I(inode)->root->fs_info->mapping_tree, -			      failrec->logical, failrec->len); +	num_copies = btrfs_num_copies(BTRFS_I(inode)->root->fs_info, +				      failrec->logical, failrec->len);  	if (num_copies == 1) {  		/*  		 * we only have a single copy of the data, so don't bother with @@ -2466,10 +2462,6 @@ btrfs_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs,  	return bio;  } -/* - * Since writes are async, they will only return -ENOMEM. - * Reads can return the full range of I/O error conditions. - */  static int __must_check submit_one_bio(int rw, struct bio *bio,  				       int mirror_num, unsigned long bio_flags)  { @@ -4721,10 +4713,9 @@ int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start,  	}  	if (start + min_len > eb->len) { -		printk(KERN_ERR "btrfs bad mapping eb start %llu len %lu, " +		WARN(1, KERN_ERR "btrfs bad mapping eb start %llu len %lu, "  		       "wanted %lu %lu\n", (unsigned long long)eb->start,  		       eb->len, start, min_len); -		WARN_ON(1);  		return -EINVAL;  	}  |