diff options
Diffstat (limited to 'fs/btrfs')
37 files changed, 1972 insertions, 1017 deletions
diff --git a/fs/btrfs/async-thread.c b/fs/btrfs/async-thread.c index 0cc20b35c1c..42704149b72 100644 --- a/fs/btrfs/async-thread.c +++ b/fs/btrfs/async-thread.c @@ -171,11 +171,11 @@ out:  	spin_unlock_irqrestore(&workers->lock, flags);  } -static noinline int run_ordered_completions(struct btrfs_workers *workers, +static noinline void run_ordered_completions(struct btrfs_workers *workers,  					    struct btrfs_work *work)  {  	if (!workers->ordered) -		return 0; +		return;  	set_bit(WORK_DONE_BIT, &work->flags); @@ -213,7 +213,6 @@ static noinline int run_ordered_completions(struct btrfs_workers *workers,  	}  	spin_unlock(&workers->order_lock); -	return 0;  }  static void put_worker(struct btrfs_worker_thread *worker) @@ -399,7 +398,7 @@ again:  /*   * this will wait for all the worker threads to shutdown   */ -int btrfs_stop_workers(struct btrfs_workers *workers) +void btrfs_stop_workers(struct btrfs_workers *workers)  {  	struct list_head *cur;  	struct btrfs_worker_thread *worker; @@ -427,7 +426,6 @@ int btrfs_stop_workers(struct btrfs_workers *workers)  		put_worker(worker);  	}  	spin_unlock_irq(&workers->lock); -	return 0;  }  /* @@ -615,14 +613,14 @@ found:   * it was taken from.  It is intended for use with long running work functions   * that make some progress and want to give the cpu up for others.   */ -int btrfs_requeue_work(struct btrfs_work *work) +void btrfs_requeue_work(struct btrfs_work *work)  {  	struct btrfs_worker_thread *worker = work->worker;  	unsigned long flags;  	int wake = 0;  	if (test_and_set_bit(WORK_QUEUED_BIT, &work->flags)) -		goto out; +		return;  	spin_lock_irqsave(&worker->lock, flags);  	if (test_bit(WORK_HIGH_PRIO_BIT, &work->flags)) @@ -649,9 +647,6 @@ int btrfs_requeue_work(struct btrfs_work *work)  	if (wake)  		wake_up_process(worker->task);  	spin_unlock_irqrestore(&worker->lock, flags); -out: - -	return 0;  }  void btrfs_set_work_high_prio(struct btrfs_work *work) diff --git a/fs/btrfs/async-thread.h b/fs/btrfs/async-thread.h index f34cc31fa3c..063698b90ce 100644 --- a/fs/btrfs/async-thread.h +++ b/fs/btrfs/async-thread.h @@ -111,9 +111,9 @@ struct btrfs_workers {  void btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work);  int btrfs_start_workers(struct btrfs_workers *workers); -int btrfs_stop_workers(struct btrfs_workers *workers); +void btrfs_stop_workers(struct btrfs_workers *workers);  void btrfs_init_workers(struct btrfs_workers *workers, char *name, int max,  			struct btrfs_workers *async_starter); -int btrfs_requeue_work(struct btrfs_work *work); +void btrfs_requeue_work(struct btrfs_work *work);  void btrfs_set_work_high_prio(struct btrfs_work *work);  #endif diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c index d02c27cd14c..d11afa67c7d 100644 --- a/fs/btrfs/compression.c +++ b/fs/btrfs/compression.c @@ -226,8 +226,8 @@ out:   * Clear the writeback bits on all of the file   * pages for a compressed write   */ -static noinline int end_compressed_writeback(struct inode *inode, u64 start, -					     unsigned long ram_size) +static noinline void end_compressed_writeback(struct inode *inode, u64 start, +					      unsigned long ram_size)  {  	unsigned long index = start >> PAGE_CACHE_SHIFT;  	unsigned long end_index = (start + ram_size - 1) >> PAGE_CACHE_SHIFT; @@ -253,7 +253,6 @@ static noinline int end_compressed_writeback(struct inode *inode, u64 start,  		index += ret;  	}  	/* the inode may be gone now */ -	return 0;  }  /* @@ -392,16 +391,16 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start,  			 */  			atomic_inc(&cb->pending_bios);  			ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0); -			BUG_ON(ret); +			BUG_ON(ret); /* -ENOMEM */  			if (!skip_sum) {  				ret = btrfs_csum_one_bio(root, inode, bio,  							 start, 1); -				BUG_ON(ret); +				BUG_ON(ret); /* -ENOMEM */  			}  			ret = btrfs_map_bio(root, WRITE, bio, 0, 1); -			BUG_ON(ret); +			BUG_ON(ret); /* -ENOMEM */  			bio_put(bio); @@ -421,15 +420,15 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start,  	bio_get(bio);  	ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0); -	BUG_ON(ret); +	BUG_ON(ret); /* -ENOMEM */  	if (!skip_sum) {  		ret = btrfs_csum_one_bio(root, inode, bio, start, 1); -		BUG_ON(ret); +		BUG_ON(ret); /* -ENOMEM */  	}  	ret = btrfs_map_bio(root, WRITE, bio, 0, 1); -	BUG_ON(ret); +	BUG_ON(ret); /* -ENOMEM */  	bio_put(bio);  	return 0; @@ -497,7 +496,7 @@ static noinline int add_ra_bio_pages(struct inode *inode,  		 * sure they map to this compressed extent on disk.  		 */  		set_page_extent_mapped(page); -		lock_extent(tree, last_offset, end, GFP_NOFS); +		lock_extent(tree, last_offset, end);  		read_lock(&em_tree->lock);  		em = lookup_extent_mapping(em_tree, last_offset,  					   PAGE_CACHE_SIZE); @@ -507,7 +506,7 @@ static noinline int add_ra_bio_pages(struct inode *inode,  		    (last_offset + PAGE_CACHE_SIZE > extent_map_end(em)) ||  		    (em->block_start >> 9) != cb->orig_bio->bi_sector) {  			free_extent_map(em); -			unlock_extent(tree, last_offset, end, GFP_NOFS); +			unlock_extent(tree, last_offset, end);  			unlock_page(page);  			page_cache_release(page);  			break; @@ -535,7 +534,7 @@ static noinline int add_ra_bio_pages(struct inode *inode,  			nr_pages++;  			page_cache_release(page);  		} else { -			unlock_extent(tree, last_offset, end, GFP_NOFS); +			unlock_extent(tree, last_offset, end);  			unlock_page(page);  			page_cache_release(page);  			break; @@ -662,7 +661,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,  			bio_get(comp_bio);  			ret = btrfs_bio_wq_end_io(root->fs_info, comp_bio, 0); -			BUG_ON(ret); +			BUG_ON(ret); /* -ENOMEM */  			/*  			 * inc the count before we submit the bio so @@ -675,14 +674,14 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,  			if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) {  				ret = btrfs_lookup_bio_sums(root, inode,  							comp_bio, sums); -				BUG_ON(ret); +				BUG_ON(ret); /* -ENOMEM */  			}  			sums += (comp_bio->bi_size + root->sectorsize - 1) /  				root->sectorsize;  			ret = btrfs_map_bio(root, READ, comp_bio,  					    mirror_num, 0); -			BUG_ON(ret); +			BUG_ON(ret); /* -ENOMEM */  			bio_put(comp_bio); @@ -698,15 +697,15 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,  	bio_get(comp_bio);  	ret = btrfs_bio_wq_end_io(root->fs_info, comp_bio, 0); -	BUG_ON(ret); +	BUG_ON(ret); /* -ENOMEM */  	if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) {  		ret = btrfs_lookup_bio_sums(root, inode, comp_bio, sums); -		BUG_ON(ret); +		BUG_ON(ret); /* -ENOMEM */  	}  	ret = btrfs_map_bio(root, READ, comp_bio, mirror_num, 0); -	BUG_ON(ret); +	BUG_ON(ret); /* -ENOMEM */  	bio_put(comp_bio);  	return 0; @@ -734,7 +733,7 @@ struct btrfs_compress_op *btrfs_compress_op[] = {  	&btrfs_lzo_compress,  }; -int __init btrfs_init_compress(void) +void __init btrfs_init_compress(void)  {  	int i; @@ -744,7 +743,6 @@ int __init btrfs_init_compress(void)  		atomic_set(&comp_alloc_workspace[i], 0);  		init_waitqueue_head(&comp_workspace_wait[i]);  	} -	return 0;  }  /* diff --git a/fs/btrfs/compression.h b/fs/btrfs/compression.h index a12059f4f0f..9afb0a62ae8 100644 --- a/fs/btrfs/compression.h +++ b/fs/btrfs/compression.h @@ -19,7 +19,7 @@  #ifndef __BTRFS_COMPRESSION_  #define __BTRFS_COMPRESSION_ -int btrfs_init_compress(void); +void btrfs_init_compress(void);  void btrfs_exit_compress(void);  int btrfs_compress_pages(int type, struct address_space *mapping, diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c index 270655da11d..e801f226d7e 100644 --- a/fs/btrfs/ctree.c +++ b/fs/btrfs/ctree.c @@ -36,7 +36,7 @@ static int balance_node_right(struct btrfs_trans_handle *trans,  			      struct btrfs_root *root,  			      struct extent_buffer *dst_buf,  			      struct extent_buffer *src_buf); -static int del_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root, +static void del_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root,  		   struct btrfs_path *path, int level, int slot);  struct btrfs_path *btrfs_alloc_path(void) @@ -344,8 +344,13 @@ static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,  	if (btrfs_block_can_be_shared(root, buf)) {  		ret = btrfs_lookup_extent_info(trans, root, buf->start,  					       buf->len, &refs, &flags); -		BUG_ON(ret); -		BUG_ON(refs == 0); +		if (ret) +			return ret; +		if (refs == 0) { +			ret = -EROFS; +			btrfs_std_error(root->fs_info, ret); +			return ret; +		}  	} else {  		refs = 1;  		if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID || @@ -364,14 +369,14 @@ static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,  		     root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) &&  		    !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)) {  			ret = btrfs_inc_ref(trans, root, buf, 1, 1); -			BUG_ON(ret); +			BUG_ON(ret); /* -ENOMEM */  			if (root->root_key.objectid ==  			    BTRFS_TREE_RELOC_OBJECTID) {  				ret = btrfs_dec_ref(trans, root, buf, 0, 1); -				BUG_ON(ret); +				BUG_ON(ret); /* -ENOMEM */  				ret = btrfs_inc_ref(trans, root, cow, 1, 1); -				BUG_ON(ret); +				BUG_ON(ret); /* -ENOMEM */  			}  			new_flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;  		} else { @@ -381,14 +386,15 @@ static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,  				ret = btrfs_inc_ref(trans, root, cow, 1, 1);  			else  				ret = btrfs_inc_ref(trans, root, cow, 0, 1); -			BUG_ON(ret); +			BUG_ON(ret); /* -ENOMEM */  		}  		if (new_flags != 0) {  			ret = btrfs_set_disk_extent_flags(trans, root,  							  buf->start,  							  buf->len,  							  new_flags, 0); -			BUG_ON(ret); +			if (ret) +				return ret;  		}  	} else {  		if (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF) { @@ -397,9 +403,9 @@ static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,  				ret = btrfs_inc_ref(trans, root, cow, 1, 1);  			else  				ret = btrfs_inc_ref(trans, root, cow, 0, 1); -			BUG_ON(ret); +			BUG_ON(ret); /* -ENOMEM */  			ret = btrfs_dec_ref(trans, root, buf, 1, 1); -			BUG_ON(ret); +			BUG_ON(ret); /* -ENOMEM */  		}  		clean_tree_block(trans, root, buf);  		*last_ref = 1; @@ -428,7 +434,7 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,  {  	struct btrfs_disk_key disk_key;  	struct extent_buffer *cow; -	int level; +	int level, ret;  	int last_ref = 0;  	int unlock_orig = 0;  	u64 parent_start; @@ -480,7 +486,11 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,  			    (unsigned long)btrfs_header_fsid(cow),  			    BTRFS_FSID_SIZE); -	update_ref_for_cow(trans, root, buf, cow, &last_ref); +	ret = update_ref_for_cow(trans, root, buf, cow, &last_ref); +	if (ret) { +		btrfs_abort_transaction(trans, root, ret); +		return ret; +	}  	if (root->ref_cows)  		btrfs_reloc_cow_block(trans, root, buf, cow); @@ -947,7 +957,12 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,  		/* promote the child to a root */  		child = read_node_slot(root, mid, 0); -		BUG_ON(!child); +		if (!child) { +			ret = -EROFS; +			btrfs_std_error(root->fs_info, ret); +			goto enospc; +		} +  		btrfs_tree_lock(child);  		btrfs_set_lock_blocking(child);  		ret = btrfs_cow_block(trans, root, child, mid, 0, &child); @@ -1023,10 +1038,7 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,  		if (btrfs_header_nritems(right) == 0) {  			clean_tree_block(trans, root, right);  			btrfs_tree_unlock(right); -			wret = del_ptr(trans, root, path, level + 1, pslot + -				       1); -			if (wret) -				ret = wret; +			del_ptr(trans, root, path, level + 1, pslot + 1);  			root_sub_used(root, right->len);  			btrfs_free_tree_block(trans, root, right, 0, 1, 0);  			free_extent_buffer_stale(right); @@ -1048,7 +1060,11 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,  		 * otherwise we would have pulled some pointers from the  		 * right  		 */ -		BUG_ON(!left); +		if (!left) { +			ret = -EROFS; +			btrfs_std_error(root->fs_info, ret); +			goto enospc; +		}  		wret = balance_node_right(trans, root, mid, left);  		if (wret < 0) {  			ret = wret; @@ -1064,9 +1080,7 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,  	if (btrfs_header_nritems(mid) == 0) {  		clean_tree_block(trans, root, mid);  		btrfs_tree_unlock(mid); -		wret = del_ptr(trans, root, path, level + 1, pslot); -		if (wret) -			ret = wret; +		del_ptr(trans, root, path, level + 1, pslot);  		root_sub_used(root, mid->len);  		btrfs_free_tree_block(trans, root, mid, 0, 1, 0);  		free_extent_buffer_stale(mid); @@ -1905,15 +1919,12 @@ done:   * fixing up pointers when a given leaf/node is not in slot 0 of the   * higher levels   * - * If this fails to write a tree block, it returns -1, but continues - * fixing up the blocks in ram so the tree is consistent.   */ -static int fixup_low_keys(struct btrfs_trans_handle *trans, -			  struct btrfs_root *root, struct btrfs_path *path, -			  struct btrfs_disk_key *key, int level) +static void fixup_low_keys(struct btrfs_trans_handle *trans, +			   struct btrfs_root *root, struct btrfs_path *path, +			   struct btrfs_disk_key *key, int level)  {  	int i; -	int ret = 0;  	struct extent_buffer *t;  	for (i = level; i < BTRFS_MAX_LEVEL; i++) { @@ -1926,7 +1937,6 @@ static int fixup_low_keys(struct btrfs_trans_handle *trans,  		if (tslot != 0)  			break;  	} -	return ret;  }  /* @@ -1935,9 +1945,9 @@ static int fixup_low_keys(struct btrfs_trans_handle *trans,   * This function isn't completely safe. It's the caller's responsibility   * that the new key won't break the order   */ -int btrfs_set_item_key_safe(struct btrfs_trans_handle *trans, -			    struct btrfs_root *root, struct btrfs_path *path, -			    struct btrfs_key *new_key) +void btrfs_set_item_key_safe(struct btrfs_trans_handle *trans, +			     struct btrfs_root *root, struct btrfs_path *path, +			     struct btrfs_key *new_key)  {  	struct btrfs_disk_key disk_key;  	struct extent_buffer *eb; @@ -1947,13 +1957,11 @@ int btrfs_set_item_key_safe(struct btrfs_trans_handle *trans,  	slot = path->slots[0];  	if (slot > 0) {  		btrfs_item_key(eb, &disk_key, slot - 1); -		if (comp_keys(&disk_key, new_key) >= 0) -			return -1; +		BUG_ON(comp_keys(&disk_key, new_key) >= 0);  	}  	if (slot < btrfs_header_nritems(eb) - 1) {  		btrfs_item_key(eb, &disk_key, slot + 1); -		if (comp_keys(&disk_key, new_key) <= 0) -			return -1; +		BUG_ON(comp_keys(&disk_key, new_key) <= 0);  	}  	btrfs_cpu_key_to_disk(&disk_key, new_key); @@ -1961,7 +1969,6 @@ int btrfs_set_item_key_safe(struct btrfs_trans_handle *trans,  	btrfs_mark_buffer_dirty(eb);  	if (slot == 0)  		fixup_low_keys(trans, root, path, &disk_key, 1); -	return 0;  }  /* @@ -2164,12 +2171,11 @@ static noinline int insert_new_root(struct btrfs_trans_handle *trans,   *   * slot and level indicate where you want the key to go, and   * blocknr is the block the key points to. - * - * returns zero on success and < 0 on any error   */ -static int insert_ptr(struct btrfs_trans_handle *trans, struct btrfs_root -		      *root, struct btrfs_path *path, struct btrfs_disk_key -		      *key, u64 bytenr, int slot, int level) +static void insert_ptr(struct btrfs_trans_handle *trans, +		       struct btrfs_root *root, struct btrfs_path *path, +		       struct btrfs_disk_key *key, u64 bytenr, +		       int slot, int level)  {  	struct extent_buffer *lower;  	int nritems; @@ -2179,8 +2185,7 @@ static int insert_ptr(struct btrfs_trans_handle *trans, struct btrfs_root  	lower = path->nodes[level];  	nritems = btrfs_header_nritems(lower);  	BUG_ON(slot > nritems); -	if (nritems == BTRFS_NODEPTRS_PER_BLOCK(root)) -		BUG(); +	BUG_ON(nritems == BTRFS_NODEPTRS_PER_BLOCK(root));  	if (slot != nritems) {  		memmove_extent_buffer(lower,  			      btrfs_node_key_ptr_offset(slot + 1), @@ -2193,7 +2198,6 @@ static int insert_ptr(struct btrfs_trans_handle *trans, struct btrfs_root  	btrfs_set_node_ptr_generation(lower, slot, trans->transid);  	btrfs_set_header_nritems(lower, nritems + 1);  	btrfs_mark_buffer_dirty(lower); -	return 0;  }  /* @@ -2214,7 +2218,6 @@ static noinline int split_node(struct btrfs_trans_handle *trans,  	struct btrfs_disk_key disk_key;  	int mid;  	int ret; -	int wret;  	u32 c_nritems;  	c = path->nodes[level]; @@ -2271,11 +2274,8 @@ static noinline int split_node(struct btrfs_trans_handle *trans,  	btrfs_mark_buffer_dirty(c);  	btrfs_mark_buffer_dirty(split); -	wret = insert_ptr(trans, root, path, &disk_key, split->start, -			  path->slots[level + 1] + 1, -			  level + 1); -	if (wret) -		ret = wret; +	insert_ptr(trans, root, path, &disk_key, split->start, +		   path->slots[level + 1] + 1, level + 1);  	if (path->slots[level] >= mid) {  		path->slots[level] -= mid; @@ -2564,7 +2564,6 @@ static noinline int __push_leaf_left(struct btrfs_trans_handle *trans,  	u32 old_left_nritems;  	u32 nr;  	int ret = 0; -	int wret;  	u32 this_item_size;  	u32 old_left_item_size;  	struct btrfs_map_token token; @@ -2675,9 +2674,7 @@ static noinline int __push_leaf_left(struct btrfs_trans_handle *trans,  		clean_tree_block(trans, root, right);  	btrfs_item_key(right, &disk_key, 0); -	wret = fixup_low_keys(trans, root, path, &disk_key, 1); -	if (wret) -		ret = wret; +	fixup_low_keys(trans, root, path, &disk_key, 1);  	/* then fixup the leaf pointer in the path */  	if (path->slots[0] < push_items) { @@ -2748,7 +2745,8 @@ static int push_leaf_left(struct btrfs_trans_handle *trans, struct btrfs_root  			      path->nodes[1], slot - 1, &left);  	if (ret) {  		/* we hit -ENOSPC, but it isn't fatal here */ -		ret = 1; +		if (ret == -ENOSPC) +			ret = 1;  		goto out;  	} @@ -2770,21 +2768,17 @@ out:  /*   * split the path's leaf in two, making sure there is at least data_size   * available for the resulting leaf level of the path. - * - * returns 0 if all went well and < 0 on failure.   */ -static noinline int copy_for_split(struct btrfs_trans_handle *trans, -			       struct btrfs_root *root, -			       struct btrfs_path *path, -			       struct extent_buffer *l, -			       struct extent_buffer *right, -			       int slot, int mid, int nritems) +static noinline void copy_for_split(struct btrfs_trans_handle *trans, +				    struct btrfs_root *root, +				    struct btrfs_path *path, +				    struct extent_buffer *l, +				    struct extent_buffer *right, +				    int slot, int mid, int nritems)  {  	int data_copy_size;  	int rt_data_off;  	int i; -	int ret = 0; -	int wret;  	struct btrfs_disk_key disk_key;  	struct btrfs_map_token token; @@ -2816,12 +2810,9 @@ static noinline int copy_for_split(struct btrfs_trans_handle *trans,  	}  	btrfs_set_header_nritems(l, mid); -	ret = 0;  	btrfs_item_key(right, &disk_key, 0); -	wret = insert_ptr(trans, root, path, &disk_key, right->start, -			  path->slots[1] + 1, 1); -	if (wret) -		ret = wret; +	insert_ptr(trans, root, path, &disk_key, right->start, +		   path->slots[1] + 1, 1);  	btrfs_mark_buffer_dirty(right);  	btrfs_mark_buffer_dirty(l); @@ -2839,8 +2830,6 @@ static noinline int copy_for_split(struct btrfs_trans_handle *trans,  	}  	BUG_ON(path->slots[0] < 0); - -	return ret;  }  /* @@ -3029,12 +3018,8 @@ again:  	if (split == 0) {  		if (mid <= slot) {  			btrfs_set_header_nritems(right, 0); -			wret = insert_ptr(trans, root, path, -					  &disk_key, right->start, -					  path->slots[1] + 1, 1); -			if (wret) -				ret = wret; - +			insert_ptr(trans, root, path, &disk_key, right->start, +				   path->slots[1] + 1, 1);  			btrfs_tree_unlock(path->nodes[0]);  			free_extent_buffer(path->nodes[0]);  			path->nodes[0] = right; @@ -3042,29 +3027,21 @@ again:  			path->slots[1] += 1;  		} else {  			btrfs_set_header_nritems(right, 0); -			wret = insert_ptr(trans, root, path, -					  &disk_key, -					  right->start, +			insert_ptr(trans, root, path, &disk_key, right->start,  					  path->slots[1], 1); -			if (wret) -				ret = wret;  			btrfs_tree_unlock(path->nodes[0]);  			free_extent_buffer(path->nodes[0]);  			path->nodes[0] = right;  			path->slots[0] = 0; -			if (path->slots[1] == 0) { -				wret = fixup_low_keys(trans, root, -						path, &disk_key, 1); -				if (wret) -					ret = wret; -			} +			if (path->slots[1] == 0) +				fixup_low_keys(trans, root, path, +					       &disk_key, 1);  		}  		btrfs_mark_buffer_dirty(right);  		return ret;  	} -	ret = copy_for_split(trans, root, path, l, right, slot, mid, nritems); -	BUG_ON(ret); +	copy_for_split(trans, root, path, l, right, slot, mid, nritems);  	if (split == 2) {  		BUG_ON(num_doubles != 0); @@ -3072,7 +3049,7 @@ again:  		goto again;  	} -	return ret; +	return 0;  push_for_double:  	push_for_double_split(trans, root, path, data_size); @@ -3274,11 +3251,9 @@ int btrfs_duplicate_item(struct btrfs_trans_handle *trans,  		return ret;  	path->slots[0]++; -	ret = setup_items_for_insert(trans, root, path, new_key, &item_size, -				     item_size, item_size + -				     sizeof(struct btrfs_item), 1); -	BUG_ON(ret); - +	setup_items_for_insert(trans, root, path, new_key, &item_size, +			       item_size, item_size + +			       sizeof(struct btrfs_item), 1);  	leaf = path->nodes[0];  	memcpy_extent_buffer(leaf,  			     btrfs_item_ptr_offset(leaf, path->slots[0]), @@ -3293,10 +3268,10 @@ int btrfs_duplicate_item(struct btrfs_trans_handle *trans,   * off the end of the item or if we shift the item to chop bytes off   * the front.   */ -int btrfs_truncate_item(struct btrfs_trans_handle *trans, -			struct btrfs_root *root, -			struct btrfs_path *path, -			u32 new_size, int from_end) +void btrfs_truncate_item(struct btrfs_trans_handle *trans, +			 struct btrfs_root *root, +			 struct btrfs_path *path, +			 u32 new_size, int from_end)  {  	int slot;  	struct extent_buffer *leaf; @@ -3316,7 +3291,7 @@ int btrfs_truncate_item(struct btrfs_trans_handle *trans,  	old_size = btrfs_item_size_nr(leaf, slot);  	if (old_size == new_size) -		return 0; +		return;  	nritems = btrfs_header_nritems(leaf);  	data_end = leaf_data_end(root, leaf); @@ -3390,15 +3365,14 @@ int btrfs_truncate_item(struct btrfs_trans_handle *trans,  		btrfs_print_leaf(root, leaf);  		BUG();  	} -	return 0;  }  /*   * make the item pointed to by the path bigger, data_size is the new size.   */ -int btrfs_extend_item(struct btrfs_trans_handle *trans, -		      struct btrfs_root *root, struct btrfs_path *path, -		      u32 data_size) +void btrfs_extend_item(struct btrfs_trans_handle *trans, +		       struct btrfs_root *root, struct btrfs_path *path, +		       u32 data_size)  {  	int slot;  	struct extent_buffer *leaf; @@ -3460,7 +3434,6 @@ int btrfs_extend_item(struct btrfs_trans_handle *trans,  		btrfs_print_leaf(root, leaf);  		BUG();  	} -	return 0;  }  /* @@ -3593,7 +3566,7 @@ int btrfs_insert_some_items(struct btrfs_trans_handle *trans,  	ret = 0;  	if (slot == 0) {  		btrfs_cpu_key_to_disk(&disk_key, cpu_key); -		ret = fixup_low_keys(trans, root, path, &disk_key, 1); +		fixup_low_keys(trans, root, path, &disk_key, 1);  	}  	if (btrfs_leaf_free_space(root, leaf) < 0) { @@ -3611,17 +3584,16 @@ out:   * to save stack depth by doing the bulk of the work in a function   * that doesn't call btrfs_search_slot   */ -int setup_items_for_insert(struct btrfs_trans_handle *trans, -			   struct btrfs_root *root, struct btrfs_path *path, -			   struct btrfs_key *cpu_key, u32 *data_size, -			   u32 total_data, u32 total_size, int nr) +void setup_items_for_insert(struct btrfs_trans_handle *trans, +			    struct btrfs_root *root, struct btrfs_path *path, +			    struct btrfs_key *cpu_key, u32 *data_size, +			    u32 total_data, u32 total_size, int nr)  {  	struct btrfs_item *item;  	int i;  	u32 nritems;  	unsigned int data_end;  	struct btrfs_disk_key disk_key; -	int ret;  	struct extent_buffer *leaf;  	int slot;  	struct btrfs_map_token token; @@ -3687,10 +3659,9 @@ int setup_items_for_insert(struct btrfs_trans_handle *trans,  	btrfs_set_header_nritems(leaf, nritems + nr); -	ret = 0;  	if (slot == 0) {  		btrfs_cpu_key_to_disk(&disk_key, cpu_key); -		ret = fixup_low_keys(trans, root, path, &disk_key, 1); +		fixup_low_keys(trans, root, path, &disk_key, 1);  	}  	btrfs_unlock_up_safe(path, 1);  	btrfs_mark_buffer_dirty(leaf); @@ -3699,7 +3670,6 @@ int setup_items_for_insert(struct btrfs_trans_handle *trans,  		btrfs_print_leaf(root, leaf);  		BUG();  	} -	return ret;  }  /* @@ -3726,16 +3696,14 @@ int btrfs_insert_empty_items(struct btrfs_trans_handle *trans,  	if (ret == 0)  		return -EEXIST;  	if (ret < 0) -		goto out; +		return ret;  	slot = path->slots[0];  	BUG_ON(slot < 0); -	ret = setup_items_for_insert(trans, root, path, cpu_key, data_size, +	setup_items_for_insert(trans, root, path, cpu_key, data_size,  			       total_data, total_size, nr); - -out: -	return ret; +	return 0;  }  /* @@ -3771,13 +3739,11 @@ int btrfs_insert_item(struct btrfs_trans_handle *trans, struct btrfs_root   * the tree should have been previously balanced so the deletion does not   * empty a node.   */ -static int del_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root, -		   struct btrfs_path *path, int level, int slot) +static void del_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root, +		    struct btrfs_path *path, int level, int slot)  {  	struct extent_buffer *parent = path->nodes[level];  	u32 nritems; -	int ret = 0; -	int wret;  	nritems = btrfs_header_nritems(parent);  	if (slot != nritems - 1) { @@ -3797,12 +3763,9 @@ static int del_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root,  		struct btrfs_disk_key disk_key;  		btrfs_node_key(parent, &disk_key, 0); -		wret = fixup_low_keys(trans, root, path, &disk_key, level + 1); -		if (wret) -			ret = wret; +		fixup_low_keys(trans, root, path, &disk_key, level + 1);  	}  	btrfs_mark_buffer_dirty(parent); -	return ret;  }  /* @@ -3815,17 +3778,13 @@ static int del_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root,   * The path must have already been setup for deleting the leaf, including   * all the proper balancing.  path->nodes[1] must be locked.   */ -static noinline int btrfs_del_leaf(struct btrfs_trans_handle *trans, -				   struct btrfs_root *root, -				   struct btrfs_path *path, -				   struct extent_buffer *leaf) +static noinline void btrfs_del_leaf(struct btrfs_trans_handle *trans, +				    struct btrfs_root *root, +				    struct btrfs_path *path, +				    struct extent_buffer *leaf)  { -	int ret; -  	WARN_ON(btrfs_header_generation(leaf) != trans->transid); -	ret = del_ptr(trans, root, path, 1, path->slots[1]); -	if (ret) -		return ret; +	del_ptr(trans, root, path, 1, path->slots[1]);  	/*  	 * btrfs_free_extent is expensive, we want to make sure we @@ -3838,7 +3797,6 @@ static noinline int btrfs_del_leaf(struct btrfs_trans_handle *trans,  	extent_buffer_get(leaf);  	btrfs_free_tree_block(trans, root, leaf, 0, 1, 0);  	free_extent_buffer_stale(leaf); -	return 0;  }  /*   * delete the item at the leaf level in path.  If that empties @@ -3899,8 +3857,7 @@ int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,  		} else {  			btrfs_set_path_blocking(path);  			clean_tree_block(trans, root, leaf); -			ret = btrfs_del_leaf(trans, root, path, leaf); -			BUG_ON(ret); +			btrfs_del_leaf(trans, root, path, leaf);  		}  	} else {  		int used = leaf_space_used(leaf, 0, nritems); @@ -3908,10 +3865,7 @@ int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,  			struct btrfs_disk_key disk_key;  			btrfs_item_key(leaf, &disk_key, 0); -			wret = fixup_low_keys(trans, root, path, -					      &disk_key, 1); -			if (wret) -				ret = wret; +			fixup_low_keys(trans, root, path, &disk_key, 1);  		}  		/* delete the leaf if it is mostly empty */ @@ -3939,9 +3893,9 @@ int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,  			if (btrfs_header_nritems(leaf) == 0) {  				path->slots[1] = slot; -				ret = btrfs_del_leaf(trans, root, path, leaf); -				BUG_ON(ret); +				btrfs_del_leaf(trans, root, path, leaf);  				free_extent_buffer(leaf); +				ret = 0;  			} else {  				/* if we're still in the path, make sure  				 * we're dirty.  Otherwise, one of the @@ -4124,7 +4078,7 @@ find_next_key:  		}  		btrfs_set_path_blocking(path);  		cur = read_node_slot(root, cur, slot); -		BUG_ON(!cur); +		BUG_ON(!cur); /* -ENOMEM */  		btrfs_tree_read_lock(cur); diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index f7da8a8d13c..ed2d196f7a8 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -1525,6 +1525,7 @@ struct btrfs_ioctl_defrag_range_args {  #define BTRFS_MOUNT_SKIP_BALANCE	(1 << 19)  #define BTRFS_MOUNT_CHECK_INTEGRITY	(1 << 20)  #define BTRFS_MOUNT_CHECK_INTEGRITY_INCLUDING_EXTENT_DATA (1 << 21) +#define BTRFS_MOUNT_PANIC_ON_FATAL_ERROR	(1 << 22)  #define btrfs_clear_opt(o, opt)		((o) &= ~BTRFS_MOUNT_##opt)  #define btrfs_set_opt(o, opt)		((o) |= BTRFS_MOUNT_##opt) @@ -2518,8 +2519,8 @@ int btrfs_free_extent(struct btrfs_trans_handle *trans,  int btrfs_free_reserved_extent(struct btrfs_root *root, u64 start, u64 len);  int btrfs_free_and_pin_reserved_extent(struct btrfs_root *root,  				       u64 start, u64 len); -int btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans, -				struct btrfs_root *root); +void btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans, +				 struct btrfs_root *root);  int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,  			       struct btrfs_root *root);  int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans, @@ -2582,8 +2583,8 @@ void btrfs_block_rsv_release(struct btrfs_root *root,  			     u64 num_bytes);  int btrfs_set_block_group_ro(struct btrfs_root *root,  			     struct btrfs_block_group_cache *cache); -int btrfs_set_block_group_rw(struct btrfs_root *root, -			     struct btrfs_block_group_cache *cache); +void btrfs_set_block_group_rw(struct btrfs_root *root, +			      struct btrfs_block_group_cache *cache);  void btrfs_put_block_group_cache(struct btrfs_fs_info *info);  u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo);  int btrfs_error_unpin_extent_range(struct btrfs_root *root, @@ -2602,9 +2603,9 @@ int btrfs_comp_cpu_keys(struct btrfs_key *k1, struct btrfs_key *k2);  int btrfs_previous_item(struct btrfs_root *root,  			struct btrfs_path *path, u64 min_objectid,  			int type); -int btrfs_set_item_key_safe(struct btrfs_trans_handle *trans, -			    struct btrfs_root *root, struct btrfs_path *path, -			    struct btrfs_key *new_key); +void btrfs_set_item_key_safe(struct btrfs_trans_handle *trans, +			     struct btrfs_root *root, struct btrfs_path *path, +			     struct btrfs_key *new_key);  struct extent_buffer *btrfs_root_node(struct btrfs_root *root);  struct extent_buffer *btrfs_lock_root_node(struct btrfs_root *root);  int btrfs_find_next_key(struct btrfs_root *root, struct btrfs_path *path, @@ -2624,12 +2625,13 @@ int btrfs_copy_root(struct btrfs_trans_handle *trans,  		      struct extent_buffer **cow_ret, u64 new_root_objectid);  int btrfs_block_can_be_shared(struct btrfs_root *root,  			      struct extent_buffer *buf); -int btrfs_extend_item(struct btrfs_trans_handle *trans, struct btrfs_root -		      *root, struct btrfs_path *path, u32 data_size); -int btrfs_truncate_item(struct btrfs_trans_handle *trans, -			struct btrfs_root *root, -			struct btrfs_path *path, -			u32 new_size, int from_end); +void btrfs_extend_item(struct btrfs_trans_handle *trans, +		       struct btrfs_root *root, struct btrfs_path *path, +		       u32 data_size); +void btrfs_truncate_item(struct btrfs_trans_handle *trans, +			 struct btrfs_root *root, +			 struct btrfs_path *path, +			 u32 new_size, int from_end);  int btrfs_split_item(struct btrfs_trans_handle *trans,  		     struct btrfs_root *root,  		     struct btrfs_path *path, @@ -2663,10 +2665,10 @@ static inline int btrfs_del_item(struct btrfs_trans_handle *trans,  	return btrfs_del_items(trans, root, path, path->slots[0], 1);  } -int setup_items_for_insert(struct btrfs_trans_handle *trans, -			   struct btrfs_root *root, struct btrfs_path *path, -			   struct btrfs_key *cpu_key, u32 *data_size, -			   u32 total_data, u32 total_size, int nr); +void setup_items_for_insert(struct btrfs_trans_handle *trans, +			    struct btrfs_root *root, struct btrfs_path *path, +			    struct btrfs_key *cpu_key, u32 *data_size, +			    u32 total_data, u32 total_size, int nr);  int btrfs_insert_item(struct btrfs_trans_handle *trans, struct btrfs_root  		      *root, struct btrfs_key *key, void *data, u32 data_size);  int btrfs_insert_empty_items(struct btrfs_trans_handle *trans, @@ -2693,9 +2695,9 @@ static inline int btrfs_next_item(struct btrfs_root *root, struct btrfs_path *p)  }  int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path);  int btrfs_leaf_free_space(struct btrfs_root *root, struct extent_buffer *leaf); -void btrfs_drop_snapshot(struct btrfs_root *root, -			 struct btrfs_block_rsv *block_rsv, int update_ref, -			 int for_reloc); +int __must_check btrfs_drop_snapshot(struct btrfs_root *root, +				     struct btrfs_block_rsv *block_rsv, +				     int update_ref, int for_reloc);  int btrfs_drop_subtree(struct btrfs_trans_handle *trans,  			struct btrfs_root *root,  			struct extent_buffer *node, @@ -2757,9 +2759,10 @@ int btrfs_del_root(struct btrfs_trans_handle *trans, struct btrfs_root *root,  int btrfs_insert_root(struct btrfs_trans_handle *trans, struct btrfs_root  		      *root, struct btrfs_key *key, struct btrfs_root_item  		      *item); -int btrfs_update_root(struct btrfs_trans_handle *trans, struct btrfs_root -		      *root, struct btrfs_key *key, struct btrfs_root_item -		      *item); +int __must_check btrfs_update_root(struct btrfs_trans_handle *trans, +				   struct btrfs_root *root, +				   struct btrfs_key *key, +				   struct btrfs_root_item *item);  int btrfs_find_last_root(struct btrfs_root *root, u64 objectid, struct  			 btrfs_root_item *item, struct btrfs_key *key);  int btrfs_find_dead_roots(struct btrfs_root *root, u64 objectid); @@ -2943,7 +2946,7 @@ int btrfs_orphan_cleanup(struct btrfs_root *root);  void btrfs_orphan_commit_root(struct btrfs_trans_handle *trans,  			      struct btrfs_root *root);  int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size); -int btrfs_invalidate_inodes(struct btrfs_root *root); +void btrfs_invalidate_inodes(struct btrfs_root *root);  void btrfs_add_delayed_iput(struct inode *inode);  void btrfs_run_delayed_iputs(struct btrfs_root *root);  int btrfs_prealloc_file_range(struct inode *inode, int mode, @@ -2995,13 +2998,41 @@ ssize_t btrfs_listxattr(struct dentry *dentry, char *buffer, size_t size);  /* super.c */  int btrfs_parse_options(struct btrfs_root *root, char *options);  int btrfs_sync_fs(struct super_block *sb, int wait); +void btrfs_printk(struct btrfs_fs_info *fs_info, const char *fmt, ...);  void __btrfs_std_error(struct btrfs_fs_info *fs_info, const char *function, -		     unsigned int line, int errno); +		     unsigned int line, int errno, const char *fmt, ...); + +void __btrfs_abort_transaction(struct btrfs_trans_handle *trans, +			       struct btrfs_root *root, const char *function, +			       unsigned int line, int errno); + +#define btrfs_abort_transaction(trans, root, errno)		\ +do {								\ +	__btrfs_abort_transaction(trans, root, __func__,	\ +				  __LINE__, errno);		\ +} while (0)  #define btrfs_std_error(fs_info, errno)				\  do {								\  	if ((errno))						\ -		__btrfs_std_error((fs_info), __func__, __LINE__, (errno));\ +		__btrfs_std_error((fs_info), __func__,		\ +				   __LINE__, (errno), NULL);	\ +} while (0) + +#define btrfs_error(fs_info, errno, fmt, args...)		\ +do {								\ +	__btrfs_std_error((fs_info), __func__, __LINE__,	\ +			  (errno), fmt, ##args);		\ +} while (0) + +void __btrfs_panic(struct btrfs_fs_info *fs_info, const char *function, +		   unsigned int line, int errno, const char *fmt, ...); + +#define btrfs_panic(fs_info, errno, fmt, args...)			\ +do {									\ +	struct btrfs_fs_info *_i = (fs_info);				\ +	__btrfs_panic(_i, __func__, __LINE__, errno, fmt, ##args);	\ +	BUG_ON(!(_i->mount_opt & BTRFS_MOUNT_PANIC_ON_FATAL_ERROR));	\  } while (0)  /* acl.c */ @@ -3037,16 +3068,17 @@ void btrfs_reloc_cow_block(struct btrfs_trans_handle *trans,  void btrfs_reloc_pre_snapshot(struct btrfs_trans_handle *trans,  			      struct btrfs_pending_snapshot *pending,  			      u64 *bytes_to_reserve); -void btrfs_reloc_post_snapshot(struct btrfs_trans_handle *trans, +int btrfs_reloc_post_snapshot(struct btrfs_trans_handle *trans,  			      struct btrfs_pending_snapshot *pending);  /* scrub.c */  int btrfs_scrub_dev(struct btrfs_root *root, u64 devid, u64 start, u64 end,  		    struct btrfs_scrub_progress *progress, int readonly); -int btrfs_scrub_pause(struct btrfs_root *root); -int btrfs_scrub_pause_super(struct btrfs_root *root); -int btrfs_scrub_continue(struct btrfs_root *root); -int btrfs_scrub_continue_super(struct btrfs_root *root); +void btrfs_scrub_pause(struct btrfs_root *root); +void btrfs_scrub_pause_super(struct btrfs_root *root); +void btrfs_scrub_continue(struct btrfs_root *root); +void btrfs_scrub_continue_super(struct btrfs_root *root); +int __btrfs_scrub_cancel(struct btrfs_fs_info *info);  int btrfs_scrub_cancel(struct btrfs_root *root);  int btrfs_scrub_cancel_dev(struct btrfs_root *root, struct btrfs_device *dev);  int btrfs_scrub_cancel_devid(struct btrfs_root *root, u64 devid); diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c index fe4cd0f1cef..03e3748d84d 100644 --- a/fs/btrfs/delayed-inode.c +++ b/fs/btrfs/delayed-inode.c @@ -115,6 +115,7 @@ static struct btrfs_delayed_node *btrfs_get_delayed_node(struct inode *inode)  	return NULL;  } +/* Will return either the node or PTR_ERR(-ENOMEM) */  static struct btrfs_delayed_node *btrfs_get_or_create_delayed_node(  							struct inode *inode)  { @@ -836,10 +837,8 @@ static int btrfs_batch_insert_items(struct btrfs_trans_handle *trans,  	btrfs_clear_path_blocking(path, NULL, 0);  	/* insert the keys of the items */ -	ret = setup_items_for_insert(trans, root, path, keys, data_size, -				     total_data_size, total_size, nitems); -	if (ret) -		goto error; +	setup_items_for_insert(trans, root, path, keys, data_size, +			       total_data_size, total_size, nitems);  	/* insert the dir index items */  	slot = path->slots[0]; @@ -1108,16 +1107,25 @@ static int btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,  	return 0;  } -/* Called when committing the transaction. */ +/* + * Called when committing the transaction. + * Returns 0 on success. + * Returns < 0 on error and returns with an aborted transaction with any + * outstanding delayed items cleaned up. + */  int btrfs_run_delayed_items(struct btrfs_trans_handle *trans,  			    struct btrfs_root *root)  { +	struct btrfs_root *curr_root = root;  	struct btrfs_delayed_root *delayed_root;  	struct btrfs_delayed_node *curr_node, *prev_node;  	struct btrfs_path *path;  	struct btrfs_block_rsv *block_rsv;  	int ret = 0; +	if (trans->aborted) +		return -EIO; +  	path = btrfs_alloc_path();  	if (!path)  		return -ENOMEM; @@ -1130,17 +1138,18 @@ int btrfs_run_delayed_items(struct btrfs_trans_handle *trans,  	curr_node = btrfs_first_delayed_node(delayed_root);  	while (curr_node) { -		root = curr_node->root; -		ret = btrfs_insert_delayed_items(trans, path, root, +		curr_root = curr_node->root; +		ret = btrfs_insert_delayed_items(trans, path, curr_root,  						 curr_node);  		if (!ret) -			ret = btrfs_delete_delayed_items(trans, path, root, -							 curr_node); +			ret = btrfs_delete_delayed_items(trans, path, +						curr_root, curr_node);  		if (!ret) -			ret = btrfs_update_delayed_inode(trans, root, path, -							 curr_node); +			ret = btrfs_update_delayed_inode(trans, curr_root, +						path, curr_node);  		if (ret) {  			btrfs_release_delayed_node(curr_node); +			btrfs_abort_transaction(trans, root, ret);  			break;  		} @@ -1151,6 +1160,7 @@ int btrfs_run_delayed_items(struct btrfs_trans_handle *trans,  	btrfs_free_path(path);  	trans->block_rsv = block_rsv; +  	return ret;  } @@ -1371,6 +1381,7 @@ void btrfs_balance_delayed_items(struct btrfs_root *root)  	btrfs_wq_run_delayed_node(delayed_root, root, 0);  } +/* Will return 0 or -ENOMEM */  int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle *trans,  				   struct btrfs_root *root, const char *name,  				   int name_len, struct inode *dir, diff --git a/fs/btrfs/delayed-ref.c b/fs/btrfs/delayed-ref.c index 66e4f29505a..69f22e3ab3b 100644 --- a/fs/btrfs/delayed-ref.c +++ b/fs/btrfs/delayed-ref.c @@ -420,7 +420,7 @@ update_existing_head_ref(struct btrfs_delayed_ref_node *existing,   * this does all the dirty work in terms of maintaining the correct   * overall modification count.   */ -static noinline int add_delayed_ref_head(struct btrfs_fs_info *fs_info, +static noinline void add_delayed_ref_head(struct btrfs_fs_info *fs_info,  					struct btrfs_trans_handle *trans,  					struct btrfs_delayed_ref_node *ref,  					u64 bytenr, u64 num_bytes, @@ -487,20 +487,19 @@ static noinline int add_delayed_ref_head(struct btrfs_fs_info *fs_info,  		 * we've updated the existing ref, free the newly  		 * allocated ref  		 */ -		kfree(ref); +		kfree(head_ref);  	} else {  		delayed_refs->num_heads++;  		delayed_refs->num_heads_ready++;  		delayed_refs->num_entries++;  		trans->delayed_ref_updates++;  	} -	return 0;  }  /*   * helper to insert a delayed tree ref into the rbtree.   */ -static noinline int add_delayed_tree_ref(struct btrfs_fs_info *fs_info, +static noinline void add_delayed_tree_ref(struct btrfs_fs_info *fs_info,  					 struct btrfs_trans_handle *trans,  					 struct btrfs_delayed_ref_node *ref,  					 u64 bytenr, u64 num_bytes, u64 parent, @@ -549,18 +548,17 @@ static noinline int add_delayed_tree_ref(struct btrfs_fs_info *fs_info,  		 * we've updated the existing ref, free the newly  		 * allocated ref  		 */ -		kfree(ref); +		kfree(full_ref);  	} else {  		delayed_refs->num_entries++;  		trans->delayed_ref_updates++;  	} -	return 0;  }  /*   * helper to insert a delayed data ref into the rbtree.   */ -static noinline int add_delayed_data_ref(struct btrfs_fs_info *fs_info, +static noinline void add_delayed_data_ref(struct btrfs_fs_info *fs_info,  					 struct btrfs_trans_handle *trans,  					 struct btrfs_delayed_ref_node *ref,  					 u64 bytenr, u64 num_bytes, u64 parent, @@ -611,12 +609,11 @@ static noinline int add_delayed_data_ref(struct btrfs_fs_info *fs_info,  		 * we've updated the existing ref, free the newly  		 * allocated ref  		 */ -		kfree(ref); +		kfree(full_ref);  	} else {  		delayed_refs->num_entries++;  		trans->delayed_ref_updates++;  	} -	return 0;  }  /* @@ -634,7 +631,6 @@ int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info,  	struct btrfs_delayed_tree_ref *ref;  	struct btrfs_delayed_ref_head *head_ref;  	struct btrfs_delayed_ref_root *delayed_refs; -	int ret;  	BUG_ON(extent_op && extent_op->is_data);  	ref = kmalloc(sizeof(*ref), GFP_NOFS); @@ -656,14 +652,12 @@ int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info,  	 * insert both the head node and the new ref without dropping  	 * the spin lock  	 */ -	ret = add_delayed_ref_head(fs_info, trans, &head_ref->node, bytenr, +	add_delayed_ref_head(fs_info, trans, &head_ref->node, bytenr,  				   num_bytes, action, 0); -	BUG_ON(ret); -	ret = add_delayed_tree_ref(fs_info, trans, &ref->node, bytenr, +	add_delayed_tree_ref(fs_info, trans, &ref->node, bytenr,  				   num_bytes, parent, ref_root, level, action,  				   for_cow); -	BUG_ON(ret);  	if (!need_ref_seq(for_cow, ref_root) &&  	    waitqueue_active(&delayed_refs->seq_wait))  		wake_up(&delayed_refs->seq_wait); @@ -685,7 +679,6 @@ int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info,  	struct btrfs_delayed_data_ref *ref;  	struct btrfs_delayed_ref_head *head_ref;  	struct btrfs_delayed_ref_root *delayed_refs; -	int ret;  	BUG_ON(extent_op && !extent_op->is_data);  	ref = kmalloc(sizeof(*ref), GFP_NOFS); @@ -707,14 +700,12 @@ int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info,  	 * insert both the head node and the new ref without dropping  	 * the spin lock  	 */ -	ret = add_delayed_ref_head(fs_info, trans, &head_ref->node, bytenr, +	add_delayed_ref_head(fs_info, trans, &head_ref->node, bytenr,  				   num_bytes, action, 1); -	BUG_ON(ret); -	ret = add_delayed_data_ref(fs_info, trans, &ref->node, bytenr, +	add_delayed_data_ref(fs_info, trans, &ref->node, bytenr,  				   num_bytes, parent, ref_root, owner, offset,  				   action, for_cow); -	BUG_ON(ret);  	if (!need_ref_seq(for_cow, ref_root) &&  	    waitqueue_active(&delayed_refs->seq_wait))  		wake_up(&delayed_refs->seq_wait); @@ -729,7 +720,6 @@ int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info,  {  	struct btrfs_delayed_ref_head *head_ref;  	struct btrfs_delayed_ref_root *delayed_refs; -	int ret;  	head_ref = kmalloc(sizeof(*head_ref), GFP_NOFS);  	if (!head_ref) @@ -740,10 +730,9 @@ int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info,  	delayed_refs = &trans->transaction->delayed_refs;  	spin_lock(&delayed_refs->lock); -	ret = add_delayed_ref_head(fs_info, trans, &head_ref->node, bytenr, +	add_delayed_ref_head(fs_info, trans, &head_ref->node, bytenr,  				   num_bytes, BTRFS_UPDATE_DELAYED_HEAD,  				   extent_op->is_data); -	BUG_ON(ret);  	if (waitqueue_active(&delayed_refs->seq_wait))  		wake_up(&delayed_refs->seq_wait); diff --git a/fs/btrfs/dir-item.c b/fs/btrfs/dir-item.c index 31d84e78129..c1a074d0696 100644 --- a/fs/btrfs/dir-item.c +++ b/fs/btrfs/dir-item.c @@ -49,9 +49,8 @@ static struct btrfs_dir_item *insert_with_overflow(struct btrfs_trans_handle  		di = btrfs_match_dir_item_name(root, path, name, name_len);  		if (di)  			return ERR_PTR(-EEXIST); -		ret = btrfs_extend_item(trans, root, path, data_size); -	} -	if (ret < 0) +		btrfs_extend_item(trans, root, path, data_size); +	} else if (ret < 0)  		return ERR_PTR(ret);  	WARN_ON(ret > 0);  	leaf = path->nodes[0]; @@ -116,6 +115,7 @@ int btrfs_insert_xattr_item(struct btrfs_trans_handle *trans,   * 'location' is the key to stuff into the directory item, 'type' is the   * type of the inode we're pointing to, and 'index' is the sequence number   * to use for the second index (if one is created). + * Will return 0 or -ENOMEM   */  int btrfs_insert_dir_item(struct btrfs_trans_handle *trans, struct btrfs_root  			  *root, const char *name, int name_len, @@ -383,8 +383,8 @@ int btrfs_delete_one_dir_name(struct btrfs_trans_handle *trans,  		start = btrfs_item_ptr_offset(leaf, path->slots[0]);  		memmove_extent_buffer(leaf, ptr, ptr + sub_item_len,  			item_len - (ptr + sub_item_len - start)); -		ret = btrfs_truncate_item(trans, root, path, -					  item_len - sub_item_len, 1); +		btrfs_truncate_item(trans, root, path, +				    item_len - sub_item_len, 1);  	}  	return ret;  } diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 6107b695841..7b55eee15a5 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -48,20 +48,19 @@  static struct extent_io_ops btree_extent_io_ops;  static void end_workqueue_fn(struct btrfs_work *work);  static void free_fs_root(struct btrfs_root *root); -static void btrfs_check_super_valid(struct btrfs_fs_info *fs_info, +static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info,  				    int read_only); -static int btrfs_destroy_ordered_operations(struct btrfs_root *root); -static int btrfs_destroy_ordered_extents(struct btrfs_root *root); +static void btrfs_destroy_ordered_operations(struct btrfs_root *root); +static void btrfs_destroy_ordered_extents(struct btrfs_root *root);  static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,  				      struct btrfs_root *root); -static int btrfs_destroy_pending_snapshots(struct btrfs_transaction *t); -static int btrfs_destroy_delalloc_inodes(struct btrfs_root *root); +static void btrfs_destroy_pending_snapshots(struct btrfs_transaction *t); +static void btrfs_destroy_delalloc_inodes(struct btrfs_root *root);  static int btrfs_destroy_marked_extents(struct btrfs_root *root,  					struct extent_io_tree *dirty_pages,  					int mark);  static int btrfs_destroy_pinned_extent(struct btrfs_root *root,  				       struct extent_io_tree *pinned_extents); -static int btrfs_cleanup_transaction(struct btrfs_root *root);  /*   * end_io_wq structs are used to do processing in task context when an IO is @@ -99,6 +98,7 @@ struct async_submit_bio {  	 */  	u64 bio_offset;  	struct btrfs_work work; +	int error;  };  /* @@ -332,7 +332,7 @@ static int verify_parent_transid(struct extent_io_tree *io_tree,  		return 0;  	lock_extent_bits(io_tree, eb->start, eb->start + eb->len - 1, -			 0, &cached_state, GFP_NOFS); +			 0, &cached_state);  	if (extent_buffer_uptodate(eb) &&  	    btrfs_header_generation(eb) == parent_transid) {  		ret = 0; @@ -425,7 +425,6 @@ static int csum_dirty_buffer(struct btrfs_root *root, struct page *page)  	eb = (struct extent_buffer *)page->private;  	if (page != eb->pages[0])  		return 0; -  	found_start = btrfs_header_bytenr(eb);  	if (found_start != start) {  		WARN_ON(1); @@ -727,11 +726,14 @@ unsigned long btrfs_async_submit_limit(struct btrfs_fs_info *info)  static void run_one_async_start(struct btrfs_work *work)  {  	struct async_submit_bio *async; +	int ret;  	async = container_of(work, struct  async_submit_bio, work); -	async->submit_bio_start(async->inode, async->rw, async->bio, -			       async->mirror_num, async->bio_flags, -			       async->bio_offset); +	ret = async->submit_bio_start(async->inode, async->rw, async->bio, +				      async->mirror_num, async->bio_flags, +				      async->bio_offset); +	if (ret) +		async->error = ret;  }  static void run_one_async_done(struct btrfs_work *work) @@ -752,6 +754,12 @@ static void run_one_async_done(struct btrfs_work *work)  	    waitqueue_active(&fs_info->async_submit_wait))  		wake_up(&fs_info->async_submit_wait); +	/* If an error occured we just want to clean up the bio and move on */ +	if (async->error) { +		bio_endio(async->bio, async->error); +		return; +	} +  	async->submit_bio_done(async->inode, async->rw, async->bio,  			       async->mirror_num, async->bio_flags,  			       async->bio_offset); @@ -793,6 +801,8 @@ int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode,  	async->bio_flags = bio_flags;  	async->bio_offset = bio_offset; +	async->error = 0; +  	atomic_inc(&fs_info->nr_async_submits);  	if (rw & REQ_SYNC) @@ -814,15 +824,18 @@ static int btree_csum_one_bio(struct bio *bio)  	struct bio_vec *bvec = bio->bi_io_vec;  	int bio_index = 0;  	struct btrfs_root *root; +	int ret = 0;  	WARN_ON(bio->bi_vcnt <= 0);  	while (bio_index < bio->bi_vcnt) {  		root = BTRFS_I(bvec->bv_page->mapping->host)->root; -		csum_dirty_buffer(root, bvec->bv_page); +		ret = csum_dirty_buffer(root, bvec->bv_page); +		if (ret) +			break;  		bio_index++;  		bvec++;  	} -	return 0; +	return ret;  }  static int __btree_submit_bio_start(struct inode *inode, int rw, @@ -834,8 +847,7 @@ static int __btree_submit_bio_start(struct inode *inode, int rw,  	 * when we're called for a write, we're already in the async  	 * submission context.  Just jump into btrfs_map_bio  	 */ -	btree_csum_one_bio(bio); -	return 0; +	return btree_csum_one_bio(bio);  }  static int __btree_submit_bio_done(struct inode *inode, int rw, struct bio *bio, @@ -863,7 +875,8 @@ static int btree_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,  		 */  		ret = btrfs_bio_wq_end_io(BTRFS_I(inode)->root->fs_info,  					  bio, 1); -		BUG_ON(ret); +		if (ret) +			return ret;  		return btrfs_map_bio(BTRFS_I(inode)->root, rw, bio,  				     mirror_num, 0);  	} @@ -1080,8 +1093,8 @@ struct extent_buffer *read_tree_block(struct btrfs_root *root, u64 bytenr,  } -int clean_tree_block(struct btrfs_trans_handle *trans, struct btrfs_root *root, -		     struct extent_buffer *buf) +void clean_tree_block(struct btrfs_trans_handle *trans, struct btrfs_root *root, +		      struct extent_buffer *buf)  {  	if (btrfs_header_generation(buf) ==  	    root->fs_info->running_transaction->transid) { @@ -1091,8 +1104,14 @@ int clean_tree_block(struct btrfs_trans_handle *trans, struct btrfs_root *root,  			spin_lock(&root->fs_info->delalloc_lock);  			if (root->fs_info->dirty_metadata_bytes >= buf->len)  				root->fs_info->dirty_metadata_bytes -= buf->len; -			else -				WARN_ON(1); +			else { +				spin_unlock(&root->fs_info->delalloc_lock); +				btrfs_panic(root->fs_info, -EOVERFLOW, +					  "Can't clear %lu bytes from " +					  " dirty_mdatadata_bytes (%lu)", +					  buf->len, +					  root->fs_info->dirty_metadata_bytes); +			}  			spin_unlock(&root->fs_info->delalloc_lock);  		} @@ -1100,13 +1119,12 @@ int clean_tree_block(struct btrfs_trans_handle *trans, struct btrfs_root *root,  		btrfs_set_lock_blocking(buf);  		clear_extent_buffer_dirty(buf);  	} -	return 0;  } -static int __setup_root(u32 nodesize, u32 leafsize, u32 sectorsize, -			u32 stripesize, struct btrfs_root *root, -			struct btrfs_fs_info *fs_info, -			u64 objectid) +static void __setup_root(u32 nodesize, u32 leafsize, u32 sectorsize, +			 u32 stripesize, struct btrfs_root *root, +			 struct btrfs_fs_info *fs_info, +			 u64 objectid)  {  	root->node = NULL;  	root->commit_root = NULL; @@ -1158,13 +1176,12 @@ static int __setup_root(u32 nodesize, u32 leafsize, u32 sectorsize,  	root->defrag_running = 0;  	root->root_key.objectid = objectid;  	root->anon_dev = 0; -	return 0;  } -static int find_and_setup_root(struct btrfs_root *tree_root, -			       struct btrfs_fs_info *fs_info, -			       u64 objectid, -			       struct btrfs_root *root) +static int __must_check find_and_setup_root(struct btrfs_root *tree_root, +					    struct btrfs_fs_info *fs_info, +					    u64 objectid, +					    struct btrfs_root *root)  {  	int ret;  	u32 blocksize; @@ -1177,7 +1194,8 @@ static int find_and_setup_root(struct btrfs_root *tree_root,  				   &root->root_item, &root->root_key);  	if (ret > 0)  		return -ENOENT; -	BUG_ON(ret); +	else if (ret < 0) +		return ret;  	generation = btrfs_root_generation(&root->root_item);  	blocksize = btrfs_level_size(root, btrfs_root_level(&root->root_item)); @@ -1346,7 +1364,7 @@ struct btrfs_root *btrfs_read_fs_root_no_radix(struct btrfs_root *tree_root,  	root->node = read_tree_block(root, btrfs_root_bytenr(&root->root_item),  				     blocksize, generation);  	root->commit_root = btrfs_root_node(root); -	BUG_ON(!root->node); +	BUG_ON(!root->node); /* -ENOMEM */  out:  	if (location->objectid != BTRFS_TREE_LOG_OBJECTID) {  		root->ref_cows = 1; @@ -1537,9 +1555,10 @@ static int transaction_kthread(void *arg)  	u64 transid;  	unsigned long now;  	unsigned long delay; -	int ret; +	bool cannot_commit;  	do { +		cannot_commit = false;  		delay = HZ * 30;  		vfs_check_frozen(root->fs_info->sb, SB_FREEZE_WRITE);  		mutex_lock(&root->fs_info->transaction_kthread_mutex); @@ -1561,11 +1580,14 @@ static int transaction_kthread(void *arg)  		transid = cur->transid;  		spin_unlock(&root->fs_info->trans_lock); +		/* If the file system is aborted, this will always fail. */  		trans = btrfs_join_transaction(root); -		BUG_ON(IS_ERR(trans)); +		if (IS_ERR(trans)) { +			cannot_commit = true; +			goto sleep; +		}  		if (transid == trans->transid) { -			ret = btrfs_commit_transaction(trans, root); -			BUG_ON(ret); +			btrfs_commit_transaction(trans, root);  		} else {  			btrfs_end_transaction(trans, root);  		} @@ -1576,7 +1598,8 @@ sleep:  		if (!try_to_freeze()) {  			set_current_state(TASK_INTERRUPTIBLE);  			if (!kthread_should_stop() && -			    !btrfs_transaction_blocked(root->fs_info)) +			    (!btrfs_transaction_blocked(root->fs_info) || +			     cannot_commit))  				schedule_timeout(delay);  			__set_current_state(TASK_RUNNING);  		} @@ -2028,7 +2051,12 @@ int open_ctree(struct super_block *sb,  	/* check FS state, whether FS is broken. */  	fs_info->fs_state |= btrfs_super_flags(disk_super); -	btrfs_check_super_valid(fs_info, sb->s_flags & MS_RDONLY); +	ret = btrfs_check_super_valid(fs_info, sb->s_flags & MS_RDONLY); +	if (ret) { +		printk(KERN_ERR "btrfs: superblock contains fatal errors\n"); +		err = ret; +		goto fail_alloc; +	}  	/*  	 * run through our array of backup supers and setup @@ -2218,6 +2246,14 @@ int open_ctree(struct super_block *sb,  		goto fail_sb_buffer;  	} +	if ((features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS) && +			(leafsize != nodesize || sectorsize != nodesize)) { +		printk(KERN_WARNING "btrfs: unequal leaf/node/sector sizes " +				"are not allowed for mixed block groups on %s\n", +				sb->s_id); +		goto fail_sb_buffer; +	} +  	mutex_lock(&fs_info->chunk_mutex);  	ret = btrfs_read_sys_array(tree_root);  	mutex_unlock(&fs_info->chunk_mutex); @@ -2237,7 +2273,7 @@ int open_ctree(struct super_block *sb,  	chunk_root->node = read_tree_block(chunk_root,  					   btrfs_super_chunk_root(disk_super),  					   blocksize, generation); -	BUG_ON(!chunk_root->node); +	BUG_ON(!chunk_root->node); /* -ENOMEM */  	if (!test_bit(EXTENT_BUFFER_UPTODATE, &chunk_root->node->bflags)) {  		printk(KERN_WARNING "btrfs: failed to read chunk root on %s\n",  		       sb->s_id); @@ -2377,21 +2413,31 @@ retry_root_backup:  		log_tree_root->node = read_tree_block(tree_root, bytenr,  						      blocksize,  						      generation + 1); +		/* returns with log_tree_root freed on success */  		ret = btrfs_recover_log_trees(log_tree_root); -		BUG_ON(ret); +		if (ret) { +			btrfs_error(tree_root->fs_info, ret, +				    "Failed to recover log tree"); +			free_extent_buffer(log_tree_root->node); +			kfree(log_tree_root); +			goto fail_trans_kthread; +		}  		if (sb->s_flags & MS_RDONLY) { -			ret =  btrfs_commit_super(tree_root); -			BUG_ON(ret); +			ret = btrfs_commit_super(tree_root); +			if (ret) +				goto fail_trans_kthread;  		}  	}  	ret = btrfs_find_orphan_roots(tree_root); -	BUG_ON(ret); +	if (ret) +		goto fail_trans_kthread;  	if (!(sb->s_flags & MS_RDONLY)) {  		ret = btrfs_cleanup_fs_roots(fs_info); -		BUG_ON(ret); +		if (ret) { +			}  		ret = btrfs_recover_relocation(tree_root);  		if (ret < 0) { @@ -2811,6 +2857,8 @@ int write_all_supers(struct btrfs_root *root, int max_mirrors)  	if (total_errors > max_errors) {  		printk(KERN_ERR "btrfs: %d errors while writing supers\n",  		       total_errors); + +		/* This shouldn't happen. FUA is masked off if unsupported */  		BUG();  	} @@ -2827,9 +2875,9 @@ int write_all_supers(struct btrfs_root *root, int max_mirrors)  	}  	mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);  	if (total_errors > max_errors) { -		printk(KERN_ERR "btrfs: %d errors while writing supers\n", -		       total_errors); -		BUG(); +		btrfs_error(root->fs_info, -EIO, +			    "%d errors while writing supers", total_errors); +		return -EIO;  	}  	return 0;  } @@ -2843,7 +2891,20 @@ int write_ctree_super(struct btrfs_trans_handle *trans,  	return ret;  } -int btrfs_free_fs_root(struct btrfs_fs_info *fs_info, struct btrfs_root *root) +/* Kill all outstanding I/O */ +void btrfs_abort_devices(struct btrfs_root *root) +{ +	struct list_head *head; +	struct btrfs_device *dev; +	mutex_lock(&root->fs_info->fs_devices->device_list_mutex); +	head = &root->fs_info->fs_devices->devices; +	list_for_each_entry_rcu(dev, head, dev_list) { +		blk_abort_queue(dev->bdev->bd_disk->queue); +	} +	mutex_unlock(&root->fs_info->fs_devices->device_list_mutex); +} + +void btrfs_free_fs_root(struct btrfs_fs_info *fs_info, struct btrfs_root *root)  {  	spin_lock(&fs_info->fs_roots_radix_lock);  	radix_tree_delete(&fs_info->fs_roots_radix, @@ -2856,7 +2917,6 @@ int btrfs_free_fs_root(struct btrfs_fs_info *fs_info, struct btrfs_root *root)  	__btrfs_remove_free_space_cache(root->free_ino_pinned);  	__btrfs_remove_free_space_cache(root->free_ino_ctl);  	free_fs_root(root); -	return 0;  }  static void free_fs_root(struct btrfs_root *root) @@ -2873,7 +2933,7 @@ static void free_fs_root(struct btrfs_root *root)  	kfree(root);  } -static int del_fs_roots(struct btrfs_fs_info *fs_info) +static void del_fs_roots(struct btrfs_fs_info *fs_info)  {  	int ret;  	struct btrfs_root *gang[8]; @@ -2902,7 +2962,6 @@ static int del_fs_roots(struct btrfs_fs_info *fs_info)  		for (i = 0; i < ret; i++)  			btrfs_free_fs_root(fs_info, gang[i]);  	} -	return 0;  }  int btrfs_cleanup_fs_roots(struct btrfs_fs_info *fs_info) @@ -2951,14 +3010,21 @@ int btrfs_commit_super(struct btrfs_root *root)  	if (IS_ERR(trans))  		return PTR_ERR(trans);  	ret = btrfs_commit_transaction(trans, root); -	BUG_ON(ret); +	if (ret) +		return ret;  	/* run commit again to drop the original snapshot */  	trans = btrfs_join_transaction(root);  	if (IS_ERR(trans))  		return PTR_ERR(trans); -	btrfs_commit_transaction(trans, root); +	ret = btrfs_commit_transaction(trans, root); +	if (ret) +		return ret;  	ret = btrfs_write_and_wait_transaction(NULL, root); -	BUG_ON(ret); +	if (ret) { +		btrfs_error(root->fs_info, ret, +			    "Failed to sync btree inode to disk."); +		return ret; +	}  	ret = write_ctree_super(NULL, root, 0);  	return ret; @@ -3209,15 +3275,23 @@ out:  	return 0;  } -static void btrfs_check_super_valid(struct btrfs_fs_info *fs_info, +static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info,  			      int read_only)  { +	if (btrfs_super_csum_type(fs_info->super_copy) >= ARRAY_SIZE(btrfs_csum_sizes)) { +		printk(KERN_ERR "btrfs: unsupported checksum algorithm\n"); +		return -EINVAL; +	} +  	if (read_only) -		return; +		return 0; -	if (fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) +	if (fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) {  		printk(KERN_WARNING "warning: mount fs with errors, "  		       "running btrfsck is recommended\n"); +	} + +	return 0;  }  int btrfs_error_commit_super(struct btrfs_root *root) @@ -3239,7 +3313,7 @@ int btrfs_error_commit_super(struct btrfs_root *root)  	return ret;  } -static int btrfs_destroy_ordered_operations(struct btrfs_root *root) +static void btrfs_destroy_ordered_operations(struct btrfs_root *root)  {  	struct btrfs_inode *btrfs_inode;  	struct list_head splice; @@ -3261,11 +3335,9 @@ static int btrfs_destroy_ordered_operations(struct btrfs_root *root)  	spin_unlock(&root->fs_info->ordered_extent_lock);  	mutex_unlock(&root->fs_info->ordered_operations_mutex); - -	return 0;  } -static int btrfs_destroy_ordered_extents(struct btrfs_root *root) +static void btrfs_destroy_ordered_extents(struct btrfs_root *root)  {  	struct list_head splice;  	struct btrfs_ordered_extent *ordered; @@ -3297,12 +3369,10 @@ static int btrfs_destroy_ordered_extents(struct btrfs_root *root)  	}  	spin_unlock(&root->fs_info->ordered_extent_lock); - -	return 0;  } -static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans, -				      struct btrfs_root *root) +int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans, +			       struct btrfs_root *root)  {  	struct rb_node *node;  	struct btrfs_delayed_ref_root *delayed_refs; @@ -3311,6 +3381,7 @@ static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,  	delayed_refs = &trans->delayed_refs; +again:  	spin_lock(&delayed_refs->lock);  	if (delayed_refs->num_entries == 0) {  		spin_unlock(&delayed_refs->lock); @@ -3332,6 +3403,7 @@ static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,  			struct btrfs_delayed_ref_head *head;  			head = btrfs_delayed_node_to_head(ref); +			spin_unlock(&delayed_refs->lock);  			mutex_lock(&head->mutex);  			kfree(head->extent_op);  			delayed_refs->num_heads--; @@ -3339,8 +3411,9 @@ static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,  				delayed_refs->num_heads_ready--;  			list_del_init(&head->cluster);  			mutex_unlock(&head->mutex); +			btrfs_put_delayed_ref(ref); +			goto again;  		} -  		spin_unlock(&delayed_refs->lock);  		btrfs_put_delayed_ref(ref); @@ -3353,7 +3426,7 @@ static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,  	return ret;  } -static int btrfs_destroy_pending_snapshots(struct btrfs_transaction *t) +static void btrfs_destroy_pending_snapshots(struct btrfs_transaction *t)  {  	struct btrfs_pending_snapshot *snapshot;  	struct list_head splice; @@ -3371,11 +3444,9 @@ static int btrfs_destroy_pending_snapshots(struct btrfs_transaction *t)  		kfree(snapshot);  	} - -	return 0;  } -static int btrfs_destroy_delalloc_inodes(struct btrfs_root *root) +static void btrfs_destroy_delalloc_inodes(struct btrfs_root *root)  {  	struct btrfs_inode *btrfs_inode;  	struct list_head splice; @@ -3395,8 +3466,6 @@ static int btrfs_destroy_delalloc_inodes(struct btrfs_root *root)  	}  	spin_unlock(&root->fs_info->delalloc_lock); - -	return 0;  }  static int btrfs_destroy_marked_extents(struct btrfs_root *root, @@ -3487,13 +3556,43 @@ static int btrfs_destroy_pinned_extent(struct btrfs_root *root,  	return 0;  } -static int btrfs_cleanup_transaction(struct btrfs_root *root) +void btrfs_cleanup_one_transaction(struct btrfs_transaction *cur_trans, +				   struct btrfs_root *root) +{ +	btrfs_destroy_delayed_refs(cur_trans, root); +	btrfs_block_rsv_release(root, &root->fs_info->trans_block_rsv, +				cur_trans->dirty_pages.dirty_bytes); + +	/* FIXME: cleanup wait for commit */ +	cur_trans->in_commit = 1; +	cur_trans->blocked = 1; +	if (waitqueue_active(&root->fs_info->transaction_blocked_wait)) +		wake_up(&root->fs_info->transaction_blocked_wait); + +	cur_trans->blocked = 0; +	if (waitqueue_active(&root->fs_info->transaction_wait)) +		wake_up(&root->fs_info->transaction_wait); + +	cur_trans->commit_done = 1; +	if (waitqueue_active(&cur_trans->commit_wait)) +		wake_up(&cur_trans->commit_wait); + +	btrfs_destroy_pending_snapshots(cur_trans); + +	btrfs_destroy_marked_extents(root, &cur_trans->dirty_pages, +				     EXTENT_DIRTY); + +	/* +	memset(cur_trans, 0, sizeof(*cur_trans)); +	kmem_cache_free(btrfs_transaction_cachep, cur_trans); +	*/ +} + +int btrfs_cleanup_transaction(struct btrfs_root *root)  {  	struct btrfs_transaction *t;  	LIST_HEAD(list); -	WARN_ON(1); -  	mutex_lock(&root->fs_info->transaction_kthread_mutex);  	spin_lock(&root->fs_info->trans_lock); @@ -3558,6 +3657,17 @@ static int btrfs_cleanup_transaction(struct btrfs_root *root)  	return 0;  } +static int btree_writepage_io_failed_hook(struct bio *bio, struct page *page, +					  u64 start, u64 end, +					  struct extent_state *state) +{ +	struct super_block *sb = page->mapping->host->i_sb; +	struct btrfs_fs_info *fs_info = btrfs_sb(sb); +	btrfs_error(fs_info, -EIO, +		    "Error occured while writing out btree at %llu", start); +	return -EIO; +} +  static struct extent_io_ops btree_extent_io_ops = {  	.write_cache_pages_lock_hook = btree_lock_page_hook,  	.readpage_end_io_hook = btree_readpage_end_io_hook, @@ -3565,4 +3675,5 @@ static struct extent_io_ops btree_extent_io_ops = {  	.submit_bio_hook = btree_submit_bio_hook,  	/* note we're sharing with inode.c for the merge bio hook */  	.merge_bio_hook = btrfs_merge_bio_hook, +	.writepage_io_failed_hook = btree_writepage_io_failed_hook,  }; diff --git a/fs/btrfs/disk-io.h b/fs/btrfs/disk-io.h index e4bc4741319..a7ace1a2dd1 100644 --- a/fs/btrfs/disk-io.h +++ b/fs/btrfs/disk-io.h @@ -44,8 +44,8 @@ int reada_tree_block_flagged(struct btrfs_root *root, u64 bytenr, u32 blocksize,  			 int mirror_num, struct extent_buffer **eb);  struct extent_buffer *btrfs_find_create_tree_block(struct btrfs_root *root,  						   u64 bytenr, u32 blocksize); -int clean_tree_block(struct btrfs_trans_handle *trans, -		     struct btrfs_root *root, struct extent_buffer *buf); +void clean_tree_block(struct btrfs_trans_handle *trans, +		      struct btrfs_root *root, struct extent_buffer *buf);  int open_ctree(struct super_block *sb,  	       struct btrfs_fs_devices *fs_devices,  	       char *options); @@ -64,7 +64,7 @@ struct btrfs_root *btrfs_read_fs_root_no_name(struct btrfs_fs_info *fs_info,  int btrfs_cleanup_fs_roots(struct btrfs_fs_info *fs_info);  void btrfs_btree_balance_dirty(struct btrfs_root *root, unsigned long nr);  void __btrfs_btree_balance_dirty(struct btrfs_root *root, unsigned long nr); -int btrfs_free_fs_root(struct btrfs_fs_info *fs_info, struct btrfs_root *root); +void btrfs_free_fs_root(struct btrfs_fs_info *fs_info, struct btrfs_root *root);  void btrfs_mark_buffer_dirty(struct extent_buffer *buf);  int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid);  int btrfs_set_buffer_uptodate(struct extent_buffer *buf); @@ -85,6 +85,10 @@ int btrfs_init_log_root_tree(struct btrfs_trans_handle *trans,  			     struct btrfs_fs_info *fs_info);  int btrfs_add_log_tree(struct btrfs_trans_handle *trans,  		       struct btrfs_root *root); +int btrfs_cleanup_transaction(struct btrfs_root *root); +void btrfs_cleanup_one_transaction(struct btrfs_transaction *trans, +				  struct btrfs_root *root); +void btrfs_abort_devices(struct btrfs_root *root);  #ifdef CONFIG_DEBUG_LOCK_ALLOC  void btrfs_init_lockdep(void); diff --git a/fs/btrfs/export.c b/fs/btrfs/export.c index 5f77166fd01..e887ee62b6d 100644 --- a/fs/btrfs/export.c +++ b/fs/btrfs/export.c @@ -193,7 +193,7 @@ static struct dentry *btrfs_get_parent(struct dentry *child)  	if (ret < 0)  		goto fail; -	BUG_ON(ret == 0); +	BUG_ON(ret == 0); /* Key with offset of -1 found */  	if (path->slots[0] == 0) {  		ret = -ENOENT;  		goto fail; diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 1b831ac4c07..8b304e3537c 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -245,7 +245,7 @@ static int exclude_super_stripes(struct btrfs_root *root,  		cache->bytes_super += stripe_len;  		ret = add_excluded_extent(root, cache->key.objectid,  					  stripe_len); -		BUG_ON(ret); +		BUG_ON(ret); /* -ENOMEM */  	}  	for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) { @@ -253,13 +253,13 @@ static int exclude_super_stripes(struct btrfs_root *root,  		ret = btrfs_rmap_block(&root->fs_info->mapping_tree,  				       cache->key.objectid, bytenr,  				       0, &logical, &nr, &stripe_len); -		BUG_ON(ret); +		BUG_ON(ret); /* -ENOMEM */  		while (nr--) {  			cache->bytes_super += stripe_len;  			ret = add_excluded_extent(root, logical[nr],  						  stripe_len); -			BUG_ON(ret); +			BUG_ON(ret); /* -ENOMEM */  		}  		kfree(logical); @@ -321,7 +321,7 @@ static u64 add_new_free_space(struct btrfs_block_group_cache *block_group,  			total_added += size;  			ret = btrfs_add_free_space(block_group, start,  						   size); -			BUG_ON(ret); +			BUG_ON(ret); /* -ENOMEM or logic error */  			start = extent_end + 1;  		} else {  			break; @@ -332,7 +332,7 @@ static u64 add_new_free_space(struct btrfs_block_group_cache *block_group,  		size = end - start;  		total_added += size;  		ret = btrfs_add_free_space(block_group, start, size); -		BUG_ON(ret); +		BUG_ON(ret); /* -ENOMEM or logic error */  	}  	return total_added; @@ -474,7 +474,8 @@ static int cache_block_group(struct btrfs_block_group_cache *cache,  	int ret = 0;  	caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_NOFS); -	BUG_ON(!caching_ctl); +	if (!caching_ctl) +		return -ENOMEM;  	INIT_LIST_HEAD(&caching_ctl->list);  	mutex_init(&caching_ctl->mutex); @@ -982,7 +983,7 @@ static int convert_extent_item_v0(struct btrfs_trans_handle *trans,  				ret = btrfs_next_leaf(root, path);  				if (ret < 0)  					return ret; -				BUG_ON(ret > 0); +				BUG_ON(ret > 0); /* Corruption */  				leaf = path->nodes[0];  			}  			btrfs_item_key_to_cpu(leaf, &found_key, @@ -1008,9 +1009,9 @@ static int convert_extent_item_v0(struct btrfs_trans_handle *trans,  				new_size + extra_size, 1);  	if (ret < 0)  		return ret; -	BUG_ON(ret); +	BUG_ON(ret); /* Corruption */ -	ret = btrfs_extend_item(trans, root, path, new_size); +	btrfs_extend_item(trans, root, path, new_size);  	leaf = path->nodes[0];  	item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item); @@ -1478,7 +1479,11 @@ int lookup_inline_extent_backref(struct btrfs_trans_handle *trans,  		err = ret;  		goto out;  	} -	BUG_ON(ret); +	if (ret && !insert) { +		err = -ENOENT; +		goto out; +	} +	BUG_ON(ret); /* Corruption */  	leaf = path->nodes[0];  	item_size = btrfs_item_size_nr(leaf, path->slots[0]); @@ -1592,13 +1597,13 @@ out:   * helper to add new inline back ref   */  static noinline_for_stack -int setup_inline_extent_backref(struct btrfs_trans_handle *trans, -				struct btrfs_root *root, -				struct btrfs_path *path, -				struct btrfs_extent_inline_ref *iref, -				u64 parent, u64 root_objectid, -				u64 owner, u64 offset, int refs_to_add, -				struct btrfs_delayed_extent_op *extent_op) +void setup_inline_extent_backref(struct btrfs_trans_handle *trans, +				 struct btrfs_root *root, +				 struct btrfs_path *path, +				 struct btrfs_extent_inline_ref *iref, +				 u64 parent, u64 root_objectid, +				 u64 owner, u64 offset, int refs_to_add, +				 struct btrfs_delayed_extent_op *extent_op)  {  	struct extent_buffer *leaf;  	struct btrfs_extent_item *ei; @@ -1608,7 +1613,6 @@ int setup_inline_extent_backref(struct btrfs_trans_handle *trans,  	u64 refs;  	int size;  	int type; -	int ret;  	leaf = path->nodes[0];  	ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item); @@ -1617,7 +1621,7 @@ int setup_inline_extent_backref(struct btrfs_trans_handle *trans,  	type = extent_ref_type(parent, owner);  	size = btrfs_extent_inline_ref_size(type); -	ret = btrfs_extend_item(trans, root, path, size); +	btrfs_extend_item(trans, root, path, size);  	ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);  	refs = btrfs_extent_refs(leaf, ei); @@ -1652,7 +1656,6 @@ int setup_inline_extent_backref(struct btrfs_trans_handle *trans,  		btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);  	}  	btrfs_mark_buffer_dirty(leaf); -	return 0;  }  static int lookup_extent_backref(struct btrfs_trans_handle *trans, @@ -1687,12 +1690,12 @@ static int lookup_extent_backref(struct btrfs_trans_handle *trans,   * helper to update/remove inline back ref   */  static noinline_for_stack -int update_inline_extent_backref(struct btrfs_trans_handle *trans, -				 struct btrfs_root *root, -				 struct btrfs_path *path, -				 struct btrfs_extent_inline_ref *iref, -				 int refs_to_mod, -				 struct btrfs_delayed_extent_op *extent_op) +void update_inline_extent_backref(struct btrfs_trans_handle *trans, +				  struct btrfs_root *root, +				  struct btrfs_path *path, +				  struct btrfs_extent_inline_ref *iref, +				  int refs_to_mod, +				  struct btrfs_delayed_extent_op *extent_op)  {  	struct extent_buffer *leaf;  	struct btrfs_extent_item *ei; @@ -1703,7 +1706,6 @@ int update_inline_extent_backref(struct btrfs_trans_handle *trans,  	u32 item_size;  	int size;  	int type; -	int ret;  	u64 refs;  	leaf = path->nodes[0]; @@ -1745,10 +1747,9 @@ int update_inline_extent_backref(struct btrfs_trans_handle *trans,  			memmove_extent_buffer(leaf, ptr, ptr + size,  					      end - ptr - size);  		item_size -= size; -		ret = btrfs_truncate_item(trans, root, path, item_size, 1); +		btrfs_truncate_item(trans, root, path, item_size, 1);  	}  	btrfs_mark_buffer_dirty(leaf); -	return 0;  }  static noinline_for_stack @@ -1768,13 +1769,13 @@ int insert_inline_extent_backref(struct btrfs_trans_handle *trans,  					   root_objectid, owner, offset, 1);  	if (ret == 0) {  		BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID); -		ret = update_inline_extent_backref(trans, root, path, iref, -						   refs_to_add, extent_op); +		update_inline_extent_backref(trans, root, path, iref, +					     refs_to_add, extent_op);  	} else if (ret == -ENOENT) { -		ret = setup_inline_extent_backref(trans, root, path, iref, -						  parent, root_objectid, -						  owner, offset, refs_to_add, -						  extent_op); +		setup_inline_extent_backref(trans, root, path, iref, parent, +					    root_objectid, owner, offset, +					    refs_to_add, extent_op); +		ret = 0;  	}  	return ret;  } @@ -1804,12 +1805,12 @@ static int remove_extent_backref(struct btrfs_trans_handle *trans,  				 struct btrfs_extent_inline_ref *iref,  				 int refs_to_drop, int is_data)  { -	int ret; +	int ret = 0;  	BUG_ON(!is_data && refs_to_drop != 1);  	if (iref) { -		ret = update_inline_extent_backref(trans, root, path, iref, -						   -refs_to_drop, NULL); +		update_inline_extent_backref(trans, root, path, iref, +					     -refs_to_drop, NULL);  	} else if (is_data) {  		ret = remove_extent_data_ref(trans, root, path, refs_to_drop);  	} else { @@ -1835,6 +1836,7 @@ static int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,  	/* Tell the block device(s) that the sectors can be discarded */  	ret = btrfs_map_block(&root->fs_info->mapping_tree, REQ_DISCARD,  			      bytenr, &num_bytes, &bbio, 0); +	/* Error condition is -ENOMEM */  	if (!ret) {  		struct btrfs_bio_stripe *stripe = bbio->stripes;  		int i; @@ -1850,7 +1852,7 @@ static int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,  			if (!ret)  				discarded_bytes += stripe->length;  			else if (ret != -EOPNOTSUPP) -				break; +				break; /* Logic errors or -ENOMEM, or -EIO but I don't know how that could happen JDM */  			/*  			 * Just in case we get back EOPNOTSUPP for some reason, @@ -1869,6 +1871,7 @@ static int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,  	return ret;  } +/* Can return -ENOMEM */  int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,  			 struct btrfs_root *root,  			 u64 bytenr, u64 num_bytes, u64 parent, @@ -1944,7 +1947,8 @@ static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,  	ret = insert_extent_backref(trans, root->fs_info->extent_root,  				    path, bytenr, parent, root_objectid,  				    owner, offset, refs_to_add); -	BUG_ON(ret); +	if (ret) +		btrfs_abort_transaction(trans, root, ret);  out:  	btrfs_free_path(path);  	return err; @@ -2031,6 +2035,9 @@ static int run_delayed_extent_op(struct btrfs_trans_handle *trans,  	int ret;  	int err = 0; +	if (trans->aborted) +		return 0; +  	path = btrfs_alloc_path();  	if (!path)  		return -ENOMEM; @@ -2128,7 +2135,11 @@ static int run_one_delayed_ref(struct btrfs_trans_handle *trans,  			       struct btrfs_delayed_extent_op *extent_op,  			       int insert_reserved)  { -	int ret; +	int ret = 0; + +	if (trans->aborted) +		return 0; +  	if (btrfs_delayed_ref_is_head(node)) {  		struct btrfs_delayed_ref_head *head;  		/* @@ -2146,11 +2157,10 @@ static int run_one_delayed_ref(struct btrfs_trans_handle *trans,  				ret = btrfs_del_csums(trans, root,  						      node->bytenr,  						      node->num_bytes); -				BUG_ON(ret);  			}  		}  		mutex_unlock(&head->mutex); -		return 0; +		return ret;  	}  	if (node->type == BTRFS_TREE_BLOCK_REF_KEY || @@ -2197,6 +2207,10 @@ again:  	return NULL;  } +/* + * Returns 0 on success or if called with an already aborted transaction. + * Returns -ENOMEM or -EIO on failure and will abort the transaction. + */  static noinline int run_clustered_refs(struct btrfs_trans_handle *trans,  				       struct btrfs_root *root,  				       struct list_head *cluster) @@ -2285,9 +2299,13 @@ static noinline int run_clustered_refs(struct btrfs_trans_handle *trans,  				ret = run_delayed_extent_op(trans, root,  							    ref, extent_op); -				BUG_ON(ret);  				kfree(extent_op); +				if (ret) { +					printk(KERN_DEBUG "btrfs: run_delayed_extent_op returned %d\n", ret); +					return ret; +				} +  				goto next;  			} @@ -2308,11 +2326,16 @@ static noinline int run_clustered_refs(struct btrfs_trans_handle *trans,  		ret = run_one_delayed_ref(trans, root, ref, extent_op,  					  must_insert_reserved); -		BUG_ON(ret);  		btrfs_put_delayed_ref(ref);  		kfree(extent_op);  		count++; + +		if (ret) { +			printk(KERN_DEBUG "btrfs: run_one_delayed_ref returned %d\n", ret); +			return ret; +		} +  next:  		do_chunk_alloc(trans, root->fs_info->extent_root,  			       2 * 1024 * 1024, @@ -2347,6 +2370,9 @@ static void wait_for_more_refs(struct btrfs_delayed_ref_root *delayed_refs,   * 0, which means to process everything in the tree at the start   * of the run (but not newly added entries), or it can be some target   * number you'd like to process. + * + * Returns 0 on success or if called with an aborted transaction + * Returns <0 on error and aborts the transaction   */  int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,  			   struct btrfs_root *root, unsigned long count) @@ -2362,6 +2388,10 @@ int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,  	unsigned long num_refs = 0;  	int consider_waiting; +	/* We'll clean this up in btrfs_cleanup_transaction */ +	if (trans->aborted) +		return 0; +  	if (root == root->fs_info->extent_root)  		root = root->fs_info->tree_root; @@ -2419,7 +2449,11 @@ again:  		}  		ret = run_clustered_refs(trans, root, &cluster); -		BUG_ON(ret < 0); +		if (ret < 0) { +			spin_unlock(&delayed_refs->lock); +			btrfs_abort_transaction(trans, root, ret); +			return ret; +		}  		count -= min_t(unsigned long, ret, count); @@ -2584,7 +2618,7 @@ static noinline int check_committed_ref(struct btrfs_trans_handle *trans,  	ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);  	if (ret < 0)  		goto out; -	BUG_ON(ret == 0); +	BUG_ON(ret == 0); /* Corruption */  	ret = -ENOENT;  	if (path->slots[0] == 0) @@ -2738,7 +2772,6 @@ static int __btrfs_mod_ref(struct btrfs_trans_handle *trans,  	}  	return 0;  fail: -	BUG();  	return ret;  } @@ -2767,7 +2800,7 @@ static int write_one_cache_group(struct btrfs_trans_handle *trans,  	ret = btrfs_search_slot(trans, extent_root, &cache->key, path, 0, 1);  	if (ret < 0)  		goto fail; -	BUG_ON(ret); +	BUG_ON(ret); /* Corruption */  	leaf = path->nodes[0];  	bi = btrfs_item_ptr_offset(leaf, path->slots[0]); @@ -2775,8 +2808,10 @@ static int write_one_cache_group(struct btrfs_trans_handle *trans,  	btrfs_mark_buffer_dirty(leaf);  	btrfs_release_path(path);  fail: -	if (ret) +	if (ret) { +		btrfs_abort_transaction(trans, root, ret);  		return ret; +	}  	return 0;  } @@ -2949,7 +2984,8 @@ again:  		if (last == 0) {  			err = btrfs_run_delayed_refs(trans, root,  						     (unsigned long)-1); -			BUG_ON(err); +			if (err) /* File system offline */ +				goto out;  		}  		cache = btrfs_lookup_first_block_group(root->fs_info, last); @@ -2976,7 +3012,9 @@ again:  		last = cache->key.objectid + cache->key.offset;  		err = write_one_cache_group(trans, root, path, cache); -		BUG_ON(err); +		if (err) /* File system offline */ +			goto out; +  		btrfs_put_block_group(cache);  	} @@ -2989,7 +3027,8 @@ again:  		if (last == 0) {  			err = btrfs_run_delayed_refs(trans, root,  						     (unsigned long)-1); -			BUG_ON(err); +			if (err) /* File system offline */ +				goto out;  		}  		cache = btrfs_lookup_first_block_group(root->fs_info, last); @@ -3014,20 +3053,21 @@ again:  			continue;  		} -		btrfs_write_out_cache(root, trans, cache, path); +		err = btrfs_write_out_cache(root, trans, cache, path);  		/*  		 * If we didn't have an error then the cache state is still  		 * NEED_WRITE, so we can set it to WRITTEN.  		 */ -		if (cache->disk_cache_state == BTRFS_DC_NEED_WRITE) +		if (!err && cache->disk_cache_state == BTRFS_DC_NEED_WRITE)  			cache->disk_cache_state = BTRFS_DC_WRITTEN;  		last = cache->key.objectid + cache->key.offset;  		btrfs_put_block_group(cache);  	} +out:  	btrfs_free_path(path); -	return 0; +	return err;  }  int btrfs_extent_readonly(struct btrfs_root *root, u64 bytenr) @@ -3411,9 +3451,9 @@ static int do_chunk_alloc(struct btrfs_trans_handle *trans,  	if (!space_info) {  		ret = update_space_info(extent_root->fs_info, flags,  					0, 0, &space_info); -		BUG_ON(ret); +		BUG_ON(ret); /* -ENOMEM */  	} -	BUG_ON(!space_info); +	BUG_ON(!space_info); /* Logic error */  again:  	spin_lock(&space_info->lock); @@ -3678,8 +3718,10 @@ again:  		ret = wait_event_interruptible(space_info->wait,  					       !space_info->flush);  		/* Must have been interrupted, return */ -		if (ret) +		if (ret) { +			printk(KERN_DEBUG "btrfs: %s returning -EINTR\n", __func__);  			return -EINTR; +		}  		spin_lock(&space_info->lock);  	} @@ -3836,8 +3878,9 @@ out:  	return ret;  } -static struct btrfs_block_rsv *get_block_rsv(struct btrfs_trans_handle *trans, -					     struct btrfs_root *root) +static struct btrfs_block_rsv *get_block_rsv( +					const struct btrfs_trans_handle *trans, +					const struct btrfs_root *root)  {  	struct btrfs_block_rsv *block_rsv = NULL; @@ -4204,6 +4247,7 @@ void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans,  	trans->bytes_reserved = 0;  } +/* Can only return 0 or -ENOSPC */  int btrfs_orphan_reserve_metadata(struct btrfs_trans_handle *trans,  				  struct inode *inode)  { @@ -4540,7 +4584,7 @@ static int update_block_group(struct btrfs_trans_handle *trans,  	while (total) {  		cache = btrfs_lookup_block_group(info, bytenr);  		if (!cache) -			return -1; +			return -ENOENT;  		if (cache->flags & (BTRFS_BLOCK_GROUP_DUP |  				    BTRFS_BLOCK_GROUP_RAID1 |  				    BTRFS_BLOCK_GROUP_RAID10)) @@ -4643,7 +4687,7 @@ int btrfs_pin_extent(struct btrfs_root *root,  	struct btrfs_block_group_cache *cache;  	cache = btrfs_lookup_block_group(root->fs_info, bytenr); -	BUG_ON(!cache); +	BUG_ON(!cache); /* Logic error */  	pin_down_extent(root, cache, bytenr, num_bytes, reserved); @@ -4661,7 +4705,7 @@ int btrfs_pin_extent_for_log_replay(struct btrfs_trans_handle *trans,  	struct btrfs_block_group_cache *cache;  	cache = btrfs_lookup_block_group(root->fs_info, bytenr); -	BUG_ON(!cache); +	BUG_ON(!cache); /* Logic error */  	/*  	 * pull in the free space cache (if any) so that our pin @@ -4706,6 +4750,7 @@ static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,  {  	struct btrfs_space_info *space_info = cache->space_info;  	int ret = 0; +  	spin_lock(&space_info->lock);  	spin_lock(&cache->lock);  	if (reserve != RESERVE_FREE) { @@ -4734,7 +4779,7 @@ static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,  	return ret;  } -int btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans, +void btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans,  				struct btrfs_root *root)  {  	struct btrfs_fs_info *fs_info = root->fs_info; @@ -4764,7 +4809,6 @@ int btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans,  	up_write(&fs_info->extent_commit_sem);  	update_global_block_rsv(fs_info); -	return 0;  }  static int unpin_extent_range(struct btrfs_root *root, u64 start, u64 end) @@ -4779,7 +4823,7 @@ static int unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)  			if (cache)  				btrfs_put_block_group(cache);  			cache = btrfs_lookup_block_group(fs_info, start); -			BUG_ON(!cache); +			BUG_ON(!cache); /* Logic error */  		}  		len = cache->key.objectid + cache->key.offset - start; @@ -4816,6 +4860,9 @@ int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,  	u64 end;  	int ret; +	if (trans->aborted) +		return 0; +  	if (fs_info->pinned_extents == &fs_info->freed_extents[0])  		unpin = &fs_info->freed_extents[1];  	else @@ -4901,7 +4948,8 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,  			ret = remove_extent_backref(trans, extent_root, path,  						    NULL, refs_to_drop,  						    is_data); -			BUG_ON(ret); +			if (ret) +				goto abort;  			btrfs_release_path(path);  			path->leave_spinning = 1; @@ -4919,10 +4967,11 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,  					btrfs_print_leaf(extent_root,  							 path->nodes[0]);  			} -			BUG_ON(ret); +			if (ret < 0) +				goto abort;  			extent_slot = path->slots[0];  		} -	} else { +	} else if (ret == -ENOENT) {  		btrfs_print_leaf(extent_root, path->nodes[0]);  		WARN_ON(1);  		printk(KERN_ERR "btrfs unable to find ref byte nr %llu " @@ -4932,6 +4981,8 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,  		       (unsigned long long)root_objectid,  		       (unsigned long long)owner_objectid,  		       (unsigned long long)owner_offset); +	} else { +		goto abort;  	}  	leaf = path->nodes[0]; @@ -4941,7 +4992,8 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,  		BUG_ON(found_extent || extent_slot != path->slots[0]);  		ret = convert_extent_item_v0(trans, extent_root, path,  					     owner_objectid, 0); -		BUG_ON(ret < 0); +		if (ret < 0) +			goto abort;  		btrfs_release_path(path);  		path->leave_spinning = 1; @@ -4958,7 +5010,8 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,  			       (unsigned long long)bytenr);  			btrfs_print_leaf(extent_root, path->nodes[0]);  		} -		BUG_ON(ret); +		if (ret < 0) +			goto abort;  		extent_slot = path->slots[0];  		leaf = path->nodes[0];  		item_size = btrfs_item_size_nr(leaf, extent_slot); @@ -4995,7 +5048,8 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,  			ret = remove_extent_backref(trans, extent_root, path,  						    iref, refs_to_drop,  						    is_data); -			BUG_ON(ret); +			if (ret) +				goto abort;  		}  	} else {  		if (found_extent) { @@ -5012,19 +5066,27 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,  		ret = btrfs_del_items(trans, extent_root, path, path->slots[0],  				      num_to_del); -		BUG_ON(ret); +		if (ret) +			goto abort;  		btrfs_release_path(path);  		if (is_data) {  			ret = btrfs_del_csums(trans, root, bytenr, num_bytes); -			BUG_ON(ret); +			if (ret) +				goto abort;  		}  		ret = update_block_group(trans, root, bytenr, num_bytes, 0); -		BUG_ON(ret); +		if (ret) +			goto abort;  	} +out:  	btrfs_free_path(path);  	return ret; + +abort: +	btrfs_abort_transaction(trans, extent_root, ret); +	goto out;  }  /* @@ -5120,7 +5182,7 @@ void btrfs_free_tree_block(struct btrfs_trans_handle *trans,  					parent, root->root_key.objectid,  					btrfs_header_level(buf),  					BTRFS_DROP_DELAYED_REF, NULL, for_cow); -		BUG_ON(ret); +		BUG_ON(ret); /* -ENOMEM */  	}  	if (!last_ref) @@ -5154,6 +5216,7 @@ out:  	btrfs_put_block_group(cache);  } +/* Can return -ENOMEM */  int btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_root *root,  		      u64 bytenr, u64 num_bytes, u64 parent, u64 root_objectid,  		      u64 owner, u64 offset, int for_cow) @@ -5175,14 +5238,12 @@ int btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_root *root,  					num_bytes,  					parent, root_objectid, (int)owner,  					BTRFS_DROP_DELAYED_REF, NULL, for_cow); -		BUG_ON(ret);  	} else {  		ret = btrfs_add_delayed_data_ref(fs_info, trans, bytenr,  						num_bytes,  						parent, root_objectid, owner,  						offset, BTRFS_DROP_DELAYED_REF,  						NULL, for_cow); -		BUG_ON(ret);  	}  	return ret;  } @@ -5412,7 +5473,8 @@ have_block_group:  			found_uncached_bg = true;  			ret = cache_block_group(block_group, trans,  						orig_root, 0); -			BUG_ON(ret); +			BUG_ON(ret < 0); +			ret = 0;  		}  		if (unlikely(block_group->ro)) @@ -5631,6 +5693,11 @@ loop:  				ret = do_chunk_alloc(trans, root, num_bytes +  						     2 * 1024 * 1024, data,  						     CHUNK_ALLOC_LIMITED); +				if (ret < 0) { +					btrfs_abort_transaction(trans, +								root, ret); +					goto out; +				}  				allowed_chunk_alloc = 0;  				if (ret == 1)  					done_chunk_alloc = 1; @@ -5659,6 +5726,7 @@ loop:  	} else if (ins->objectid) {  		ret = 0;  	} +out:  	return ret;  } @@ -5723,10 +5791,15 @@ again:  	 * the only place that sets empty_size is btrfs_realloc_node, which  	 * is not called recursively on allocations  	 */ -	if (empty_size || root->ref_cows) +	if (empty_size || root->ref_cows) {  		ret = do_chunk_alloc(trans, root->fs_info->extent_root,  				     num_bytes + 2 * 1024 * 1024, data,  				     CHUNK_ALLOC_NO_FORCE); +		if (ret < 0 && ret != -ENOSPC) { +			btrfs_abort_transaction(trans, root, ret); +			return ret; +		} +	}  	WARN_ON(num_bytes < root->sectorsize);  	ret = find_free_extent(trans, root, num_bytes, empty_size, @@ -5737,8 +5810,12 @@ again:  			num_bytes = num_bytes >> 1;  			num_bytes = num_bytes & ~(root->sectorsize - 1);  			num_bytes = max(num_bytes, min_alloc_size); -			do_chunk_alloc(trans, root->fs_info->extent_root, +			ret = do_chunk_alloc(trans, root->fs_info->extent_root,  				       num_bytes, data, CHUNK_ALLOC_FORCE); +			if (ret < 0 && ret != -ENOSPC) { +				btrfs_abort_transaction(trans, root, ret); +				return ret; +			}  			if (num_bytes == min_alloc_size)  				final_tried = true;  			goto again; @@ -5749,7 +5826,8 @@ again:  			printk(KERN_ERR "btrfs allocation failed flags %llu, "  			       "wanted %llu\n", (unsigned long long)data,  			       (unsigned long long)num_bytes); -			dump_space_info(sinfo, num_bytes, 1); +			if (sinfo) +				dump_space_info(sinfo, num_bytes, 1);  		}  	} @@ -5828,7 +5906,10 @@ static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,  	path->leave_spinning = 1;  	ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,  				      ins, size); -	BUG_ON(ret); +	if (ret) { +		btrfs_free_path(path); +		return ret; +	}  	leaf = path->nodes[0];  	extent_item = btrfs_item_ptr(leaf, path->slots[0], @@ -5858,7 +5939,7 @@ static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,  	btrfs_free_path(path);  	ret = update_block_group(trans, root, ins->objectid, ins->offset, 1); -	if (ret) { +	if (ret) { /* -ENOENT, logic error */  		printk(KERN_ERR "btrfs update block group failed for %llu "  		       "%llu\n", (unsigned long long)ins->objectid,  		       (unsigned long long)ins->offset); @@ -5889,7 +5970,10 @@ static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,  	path->leave_spinning = 1;  	ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,  				      ins, size); -	BUG_ON(ret); +	if (ret) { +		btrfs_free_path(path); +		return ret; +	}  	leaf = path->nodes[0];  	extent_item = btrfs_item_ptr(leaf, path->slots[0], @@ -5919,7 +6003,7 @@ static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,  	btrfs_free_path(path);  	ret = update_block_group(trans, root, ins->objectid, ins->offset, 1); -	if (ret) { +	if (ret) { /* -ENOENT, logic error */  		printk(KERN_ERR "btrfs update block group failed for %llu "  		       "%llu\n", (unsigned long long)ins->objectid,  		       (unsigned long long)ins->offset); @@ -5967,28 +6051,28 @@ int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,  	if (!caching_ctl) {  		BUG_ON(!block_group_cache_done(block_group));  		ret = btrfs_remove_free_space(block_group, start, num_bytes); -		BUG_ON(ret); +		BUG_ON(ret); /* -ENOMEM */  	} else {  		mutex_lock(&caching_ctl->mutex);  		if (start >= caching_ctl->progress) {  			ret = add_excluded_extent(root, start, num_bytes); -			BUG_ON(ret); +			BUG_ON(ret); /* -ENOMEM */  		} else if (start + num_bytes <= caching_ctl->progress) {  			ret = btrfs_remove_free_space(block_group,  						      start, num_bytes); -			BUG_ON(ret); +			BUG_ON(ret); /* -ENOMEM */  		} else {  			num_bytes = caching_ctl->progress - start;  			ret = btrfs_remove_free_space(block_group,  						      start, num_bytes); -			BUG_ON(ret); +			BUG_ON(ret); /* -ENOMEM */  			start = caching_ctl->progress;  			num_bytes = ins->objectid + ins->offset -  				    caching_ctl->progress;  			ret = add_excluded_extent(root, start, num_bytes); -			BUG_ON(ret); +			BUG_ON(ret); /* -ENOMEM */  		}  		mutex_unlock(&caching_ctl->mutex); @@ -5997,7 +6081,7 @@ int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,  	ret = btrfs_update_reserved_bytes(block_group, ins->offset,  					  RESERVE_ALLOC_NO_ACCOUNT); -	BUG_ON(ret); +	BUG_ON(ret); /* logic error */  	btrfs_put_block_group(block_group);  	ret = alloc_reserved_file_extent(trans, root, 0, root_objectid,  					 0, owner, offset, ins, 1); @@ -6134,7 +6218,7 @@ struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans,  	buf = btrfs_init_new_buffer(trans, root, ins.objectid,  				    blocksize, level); -	BUG_ON(IS_ERR(buf)); +	BUG_ON(IS_ERR(buf)); /* -ENOMEM */  	if (root_objectid == BTRFS_TREE_RELOC_OBJECTID) {  		if (parent == 0) @@ -6146,7 +6230,7 @@ struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans,  	if (root_objectid != BTRFS_TREE_LOG_OBJECTID) {  		struct btrfs_delayed_extent_op *extent_op;  		extent_op = kmalloc(sizeof(*extent_op), GFP_NOFS); -		BUG_ON(!extent_op); +		BUG_ON(!extent_op); /* -ENOMEM */  		if (key)  			memcpy(&extent_op->key, key, sizeof(extent_op->key));  		else @@ -6161,7 +6245,7 @@ struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans,  					ins.offset, parent, root_objectid,  					level, BTRFS_ADD_DELAYED_EXTENT,  					extent_op, for_cow); -		BUG_ON(ret); +		BUG_ON(ret); /* -ENOMEM */  	}  	return buf;  } @@ -6231,7 +6315,9 @@ static noinline void reada_walk_down(struct btrfs_trans_handle *trans,  		/* We don't lock the tree block, it's OK to be racy here */  		ret = btrfs_lookup_extent_info(trans, root, bytenr, blocksize,  					       &refs, &flags); -		BUG_ON(ret); +		/* We don't care about errors in readahead. */ +		if (ret < 0) +			continue;  		BUG_ON(refs == 0);  		if (wc->stage == DROP_REFERENCE) { @@ -6298,7 +6384,9 @@ static noinline int walk_down_proc(struct btrfs_trans_handle *trans,  					       eb->start, eb->len,  					       &wc->refs[level],  					       &wc->flags[level]); -		BUG_ON(ret); +		BUG_ON(ret == -ENOMEM); +		if (ret) +			return ret;  		BUG_ON(wc->refs[level] == 0);  	} @@ -6317,12 +6405,12 @@ static noinline int walk_down_proc(struct btrfs_trans_handle *trans,  	if (!(wc->flags[level] & flag)) {  		BUG_ON(!path->locks[level]);  		ret = btrfs_inc_ref(trans, root, eb, 1, wc->for_reloc); -		BUG_ON(ret); +		BUG_ON(ret); /* -ENOMEM */  		ret = btrfs_dec_ref(trans, root, eb, 0, wc->for_reloc); -		BUG_ON(ret); +		BUG_ON(ret); /* -ENOMEM */  		ret = btrfs_set_disk_extent_flags(trans, root, eb->start,  						  eb->len, flag, 0); -		BUG_ON(ret); +		BUG_ON(ret); /* -ENOMEM */  		wc->flags[level] |= flag;  	} @@ -6394,7 +6482,11 @@ static noinline int do_walk_down(struct btrfs_trans_handle *trans,  	ret = btrfs_lookup_extent_info(trans, root, bytenr, blocksize,  				       &wc->refs[level - 1],  				       &wc->flags[level - 1]); -	BUG_ON(ret); +	if (ret < 0) { +		btrfs_tree_unlock(next); +		return ret; +	} +  	BUG_ON(wc->refs[level - 1] == 0);  	*lookup_info = 0; @@ -6463,7 +6555,7 @@ skip:  		ret = btrfs_free_extent(trans, root, bytenr, blocksize, parent,  				root->root_key.objectid, level - 1, 0, 0); -		BUG_ON(ret); +		BUG_ON(ret); /* -ENOMEM */  	}  	btrfs_tree_unlock(next);  	free_extent_buffer(next); @@ -6521,7 +6613,10 @@ static noinline int walk_up_proc(struct btrfs_trans_handle *trans,  						       eb->start, eb->len,  						       &wc->refs[level],  						       &wc->flags[level]); -			BUG_ON(ret); +			if (ret < 0) { +				btrfs_tree_unlock_rw(eb, path->locks[level]); +				return ret; +			}  			BUG_ON(wc->refs[level] == 0);  			if (wc->refs[level] == 1) {  				btrfs_tree_unlock_rw(eb, path->locks[level]); @@ -6541,7 +6636,7 @@ static noinline int walk_up_proc(struct btrfs_trans_handle *trans,  			else  				ret = btrfs_dec_ref(trans, root, eb, 0,  						    wc->for_reloc); -			BUG_ON(ret); +			BUG_ON(ret); /* -ENOMEM */  		}  		/* make block locked assertion in clean_tree_block happy */  		if (!path->locks[level] && @@ -6650,7 +6745,7 @@ static noinline int walk_up_tree(struct btrfs_trans_handle *trans,   * also make sure backrefs for the shared block and all lower level   * blocks are properly updated.   */ -void btrfs_drop_snapshot(struct btrfs_root *root, +int btrfs_drop_snapshot(struct btrfs_root *root,  			 struct btrfs_block_rsv *block_rsv, int update_ref,  			 int for_reloc)  { @@ -6678,7 +6773,10 @@ void btrfs_drop_snapshot(struct btrfs_root *root,  	}  	trans = btrfs_start_transaction(tree_root, 0); -	BUG_ON(IS_ERR(trans)); +	if (IS_ERR(trans)) { +		err = PTR_ERR(trans); +		goto out_free; +	}  	if (block_rsv)  		trans->block_rsv = block_rsv; @@ -6703,7 +6801,7 @@ void btrfs_drop_snapshot(struct btrfs_root *root,  		path->lowest_level = 0;  		if (ret < 0) {  			err = ret; -			goto out_free; +			goto out_end_trans;  		}  		WARN_ON(ret > 0); @@ -6723,7 +6821,10 @@ void btrfs_drop_snapshot(struct btrfs_root *root,  						path->nodes[level]->len,  						&wc->refs[level],  						&wc->flags[level]); -			BUG_ON(ret); +			if (ret < 0) { +				err = ret; +				goto out_end_trans; +			}  			BUG_ON(wc->refs[level] == 0);  			if (level == root_item->drop_level) @@ -6774,26 +6875,40 @@ void btrfs_drop_snapshot(struct btrfs_root *root,  			ret = btrfs_update_root(trans, tree_root,  						&root->root_key,  						root_item); -			BUG_ON(ret); +			if (ret) { +				btrfs_abort_transaction(trans, tree_root, ret); +				err = ret; +				goto out_end_trans; +			}  			btrfs_end_transaction_throttle(trans, tree_root);  			trans = btrfs_start_transaction(tree_root, 0); -			BUG_ON(IS_ERR(trans)); +			if (IS_ERR(trans)) { +				err = PTR_ERR(trans); +				goto out_free; +			}  			if (block_rsv)  				trans->block_rsv = block_rsv;  		}  	}  	btrfs_release_path(path); -	BUG_ON(err); +	if (err) +		goto out_end_trans;  	ret = btrfs_del_root(trans, tree_root, &root->root_key); -	BUG_ON(ret); +	if (ret) { +		btrfs_abort_transaction(trans, tree_root, ret); +		goto out_end_trans; +	}  	if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {  		ret = btrfs_find_last_root(tree_root, root->root_key.objectid,  					   NULL, NULL); -		BUG_ON(ret < 0); -		if (ret > 0) { +		if (ret < 0) { +			btrfs_abort_transaction(trans, tree_root, ret); +			err = ret; +			goto out_end_trans; +		} else if (ret > 0) {  			/* if we fail to delete the orphan item this time  			 * around, it'll get picked up the next time.  			 * @@ -6811,14 +6926,15 @@ void btrfs_drop_snapshot(struct btrfs_root *root,  		free_extent_buffer(root->commit_root);  		kfree(root);  	} -out_free: +out_end_trans:  	btrfs_end_transaction_throttle(trans, tree_root); +out_free:  	kfree(wc);  	btrfs_free_path(path);  out:  	if (err)  		btrfs_std_error(root->fs_info, err); -	return; +	return err;  }  /* @@ -7015,12 +7131,16 @@ int btrfs_set_block_group_ro(struct btrfs_root *root,  	BUG_ON(cache->ro);  	trans = btrfs_join_transaction(root); -	BUG_ON(IS_ERR(trans)); +	if (IS_ERR(trans)) +		return PTR_ERR(trans);  	alloc_flags = update_block_group_flags(root, cache->flags); -	if (alloc_flags != cache->flags) -		do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags, -			       CHUNK_ALLOC_FORCE); +	if (alloc_flags != cache->flags) { +		ret = do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags, +				     CHUNK_ALLOC_FORCE); +		if (ret < 0) +			goto out; +	}  	ret = set_block_group_ro(cache, 0);  	if (!ret) @@ -7100,7 +7220,7 @@ u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo)  	return free_bytes;  } -int btrfs_set_block_group_rw(struct btrfs_root *root, +void btrfs_set_block_group_rw(struct btrfs_root *root,  			      struct btrfs_block_group_cache *cache)  {  	struct btrfs_space_info *sinfo = cache->space_info; @@ -7116,7 +7236,6 @@ int btrfs_set_block_group_rw(struct btrfs_root *root,  	cache->ro = 0;  	spin_unlock(&cache->lock);  	spin_unlock(&sinfo->lock); -	return 0;  }  /* @@ -7484,7 +7603,7 @@ int btrfs_read_block_groups(struct btrfs_root *root)  		ret = update_space_info(info, cache->flags, found_key.offset,  					btrfs_block_group_used(&cache->item),  					&space_info); -		BUG_ON(ret); +		BUG_ON(ret); /* -ENOMEM */  		cache->space_info = space_info;  		spin_lock(&cache->space_info->lock);  		cache->space_info->bytes_readonly += cache->bytes_super; @@ -7493,7 +7612,7 @@ int btrfs_read_block_groups(struct btrfs_root *root)  		__link_block_group(space_info, cache);  		ret = btrfs_add_block_group_cache(root->fs_info, cache); -		BUG_ON(ret); +		BUG_ON(ret); /* Logic error */  		set_avail_alloc_bits(root->fs_info, cache->flags);  		if (btrfs_chunk_readonly(root, cache->key.objectid)) @@ -7575,7 +7694,7 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans,  	ret = update_space_info(root->fs_info, cache->flags, size, bytes_used,  				&cache->space_info); -	BUG_ON(ret); +	BUG_ON(ret); /* -ENOMEM */  	update_global_block_rsv(root->fs_info);  	spin_lock(&cache->space_info->lock); @@ -7585,11 +7704,14 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans,  	__link_block_group(cache->space_info, cache);  	ret = btrfs_add_block_group_cache(root->fs_info, cache); -	BUG_ON(ret); +	BUG_ON(ret); /* Logic error */  	ret = btrfs_insert_item(trans, extent_root, &cache->key, &cache->item,  				sizeof(cache->item)); -	BUG_ON(ret); +	if (ret) { +		btrfs_abort_transaction(trans, extent_root, ret); +		return ret; +	}  	set_avail_alloc_bits(extent_root->fs_info, type); @@ -7670,7 +7792,10 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,  	inode = lookup_free_space_inode(tree_root, block_group, path);  	if (!IS_ERR(inode)) {  		ret = btrfs_orphan_add(trans, inode); -		BUG_ON(ret); +		if (ret) { +			btrfs_add_delayed_iput(inode); +			goto out; +		}  		clear_nlink(inode);  		/* One for the block groups ref */  		spin_lock(&block_group->lock); diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 49a368593a1..0c3ec003f27 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -55,6 +55,11 @@ struct extent_page_data {  };  static noinline void flush_write_bio(void *data); +static inline struct btrfs_fs_info * +tree_fs_info(struct extent_io_tree *tree) +{ +	return btrfs_sb(tree->mapping->host->i_sb); +}  int __init extent_io_init(void)  { @@ -139,6 +144,7 @@ static struct extent_state *alloc_extent_state(gfp_t mask)  #endif  	atomic_set(&state->refs, 1);  	init_waitqueue_head(&state->wq); +	trace_alloc_extent_state(state, mask, _RET_IP_);  	return state;  } @@ -156,6 +162,7 @@ void free_extent_state(struct extent_state *state)  		list_del(&state->leak_list);  		spin_unlock_irqrestore(&leak_lock, flags);  #endif +		trace_free_extent_state(state, _RET_IP_);  		kmem_cache_free(extent_state_cache, state);  	}  } @@ -442,6 +449,13 @@ alloc_extent_state_atomic(struct extent_state *prealloc)  	return prealloc;  } +void extent_io_tree_panic(struct extent_io_tree *tree, int err) +{ +	btrfs_panic(tree_fs_info(tree), err, "Locking error: " +		    "Extent tree was modified by another " +		    "thread while locked."); +} +  /*   * clear some bits on a range in the tree.  This may require splitting   * or inserting elements in the tree, so the gfp mask is used to @@ -452,8 +466,7 @@ alloc_extent_state_atomic(struct extent_state *prealloc)   *   * the range [start, end] is inclusive.   * - * This takes the tree lock, and returns < 0 on error, > 0 if any of the - * bits were already set, or zero if none of the bits were already set. + * This takes the tree lock, and returns 0 on success and < 0 on error.   */  int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,  		     int bits, int wake, int delete, @@ -467,7 +480,6 @@ int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,  	struct rb_node *node;  	u64 last_end;  	int err; -	int set = 0;  	int clear = 0;  	if (delete) @@ -545,12 +557,14 @@ hit_next:  		prealloc = alloc_extent_state_atomic(prealloc);  		BUG_ON(!prealloc);  		err = split_state(tree, state, prealloc, start); -		BUG_ON(err == -EEXIST); +		if (err) +			extent_io_tree_panic(tree, err); +  		prealloc = NULL;  		if (err)  			goto out;  		if (state->end <= end) { -			set |= clear_state_bit(tree, state, &bits, wake); +			clear_state_bit(tree, state, &bits, wake);  			if (last_end == (u64)-1)  				goto out;  			start = last_end + 1; @@ -567,17 +581,19 @@ hit_next:  		prealloc = alloc_extent_state_atomic(prealloc);  		BUG_ON(!prealloc);  		err = split_state(tree, state, prealloc, end + 1); -		BUG_ON(err == -EEXIST); +		if (err) +			extent_io_tree_panic(tree, err); +  		if (wake)  			wake_up(&state->wq); -		set |= clear_state_bit(tree, prealloc, &bits, wake); +		clear_state_bit(tree, prealloc, &bits, wake);  		prealloc = NULL;  		goto out;  	} -	set |= clear_state_bit(tree, state, &bits, wake); +	clear_state_bit(tree, state, &bits, wake);  next:  	if (last_end == (u64)-1)  		goto out; @@ -594,7 +610,7 @@ out:  	if (prealloc)  		free_extent_state(prealloc); -	return set; +	return 0;  search_again:  	if (start > end) @@ -605,8 +621,8 @@ search_again:  	goto again;  } -static int wait_on_state(struct extent_io_tree *tree, -			 struct extent_state *state) +static void wait_on_state(struct extent_io_tree *tree, +			  struct extent_state *state)  		__releases(tree->lock)  		__acquires(tree->lock)  { @@ -616,7 +632,6 @@ static int wait_on_state(struct extent_io_tree *tree,  	schedule();  	spin_lock(&tree->lock);  	finish_wait(&state->wq, &wait); -	return 0;  }  /* @@ -624,7 +639,7 @@ static int wait_on_state(struct extent_io_tree *tree,   * The range [start, end] is inclusive.   * The tree lock is taken by this function   */ -int wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int bits) +void wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int bits)  {  	struct extent_state *state;  	struct rb_node *node; @@ -661,7 +676,6 @@ again:  	}  out:  	spin_unlock(&tree->lock); -	return 0;  }  static void set_state_bits(struct extent_io_tree *tree, @@ -709,9 +723,10 @@ static void uncache_state(struct extent_state **cached_ptr)   * [start, end] is inclusive This takes the tree lock.   */ -int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, -		   int bits, int exclusive_bits, u64 *failed_start, -		   struct extent_state **cached_state, gfp_t mask) +static int __must_check +__set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, +		 int bits, int exclusive_bits, u64 *failed_start, +		 struct extent_state **cached_state, gfp_t mask)  {  	struct extent_state *state;  	struct extent_state *prealloc = NULL; @@ -745,8 +760,10 @@ again:  		prealloc = alloc_extent_state_atomic(prealloc);  		BUG_ON(!prealloc);  		err = insert_state(tree, prealloc, start, end, &bits); +		if (err) +			extent_io_tree_panic(tree, err); +  		prealloc = NULL; -		BUG_ON(err == -EEXIST);  		goto out;  	}  	state = rb_entry(node, struct extent_state, rb_node); @@ -812,7 +829,9 @@ hit_next:  		prealloc = alloc_extent_state_atomic(prealloc);  		BUG_ON(!prealloc);  		err = split_state(tree, state, prealloc, start); -		BUG_ON(err == -EEXIST); +		if (err) +			extent_io_tree_panic(tree, err); +  		prealloc = NULL;  		if (err)  			goto out; @@ -849,12 +868,9 @@ hit_next:  		 */  		err = insert_state(tree, prealloc, start, this_end,  				   &bits); -		BUG_ON(err == -EEXIST); -		if (err) { -			free_extent_state(prealloc); -			prealloc = NULL; -			goto out; -		} +		if (err) +			extent_io_tree_panic(tree, err); +  		cache_state(prealloc, cached_state);  		prealloc = NULL;  		start = this_end + 1; @@ -876,7 +892,8 @@ hit_next:  		prealloc = alloc_extent_state_atomic(prealloc);  		BUG_ON(!prealloc);  		err = split_state(tree, state, prealloc, end + 1); -		BUG_ON(err == -EEXIST); +		if (err) +			extent_io_tree_panic(tree, err);  		set_state_bits(tree, prealloc, &bits);  		cache_state(prealloc, cached_state); @@ -903,6 +920,15 @@ search_again:  	goto again;  } +int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int bits, +		   u64 *failed_start, struct extent_state **cached_state, +		   gfp_t mask) +{ +	return __set_extent_bit(tree, start, end, bits, 0, failed_start, +				cached_state, mask); +} + +  /**   * convert_extent - convert all bits in a given range from one bit to another   * @tree:	the io tree to search @@ -949,7 +975,8 @@ again:  		}  		err = insert_state(tree, prealloc, start, end, &bits);  		prealloc = NULL; -		BUG_ON(err == -EEXIST); +		if (err) +			extent_io_tree_panic(tree, err);  		goto out;  	}  	state = rb_entry(node, struct extent_state, rb_node); @@ -1005,7 +1032,8 @@ hit_next:  			goto out;  		}  		err = split_state(tree, state, prealloc, start); -		BUG_ON(err == -EEXIST); +		if (err) +			extent_io_tree_panic(tree, err);  		prealloc = NULL;  		if (err)  			goto out; @@ -1044,12 +1072,8 @@ hit_next:  		 */  		err = insert_state(tree, prealloc, start, this_end,  				   &bits); -		BUG_ON(err == -EEXIST); -		if (err) { -			free_extent_state(prealloc); -			prealloc = NULL; -			goto out; -		} +		if (err) +			extent_io_tree_panic(tree, err);  		prealloc = NULL;  		start = this_end + 1;  		goto search_again; @@ -1068,7 +1092,8 @@ hit_next:  		}  		err = split_state(tree, state, prealloc, end + 1); -		BUG_ON(err == -EEXIST); +		if (err) +			extent_io_tree_panic(tree, err);  		set_state_bits(tree, prealloc, &bits);  		clear_state_bit(tree, prealloc, &clear_bits, 0); @@ -1098,14 +1123,14 @@ search_again:  int set_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,  		     gfp_t mask)  { -	return set_extent_bit(tree, start, end, EXTENT_DIRTY, 0, NULL, +	return set_extent_bit(tree, start, end, EXTENT_DIRTY, NULL,  			      NULL, mask);  }  int set_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,  		    int bits, gfp_t mask)  { -	return set_extent_bit(tree, start, end, bits, 0, NULL, +	return set_extent_bit(tree, start, end, bits, NULL,  			      NULL, mask);  } @@ -1120,7 +1145,7 @@ int set_extent_delalloc(struct extent_io_tree *tree, u64 start, u64 end,  {  	return set_extent_bit(tree, start, end,  			      EXTENT_DELALLOC | EXTENT_UPTODATE, -			      0, NULL, cached_state, mask); +			      NULL, cached_state, mask);  }  int clear_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end, @@ -1134,7 +1159,7 @@ int clear_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,  int set_extent_new(struct extent_io_tree *tree, u64 start, u64 end,  		     gfp_t mask)  { -	return set_extent_bit(tree, start, end, EXTENT_NEW, 0, NULL, +	return set_extent_bit(tree, start, end, EXTENT_NEW, NULL,  			      NULL, mask);  } @@ -1142,7 +1167,7 @@ int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,  			struct extent_state **cached_state, gfp_t mask)  {  	return set_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, -			      NULL, cached_state, mask); +			      cached_state, mask);  }  static int clear_extent_uptodate(struct extent_io_tree *tree, u64 start, @@ -1158,42 +1183,40 @@ static int clear_extent_uptodate(struct extent_io_tree *tree, u64 start,   * us if waiting is desired.   */  int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, -		     int bits, struct extent_state **cached_state, gfp_t mask) +		     int bits, struct extent_state **cached_state)  {  	int err;  	u64 failed_start;  	while (1) { -		err = set_extent_bit(tree, start, end, EXTENT_LOCKED | bits, -				     EXTENT_LOCKED, &failed_start, -				     cached_state, mask); -		if (err == -EEXIST && (mask & __GFP_WAIT)) { +		err = __set_extent_bit(tree, start, end, EXTENT_LOCKED | bits, +				       EXTENT_LOCKED, &failed_start, +				       cached_state, GFP_NOFS); +		if (err == -EEXIST) {  			wait_extent_bit(tree, failed_start, end, EXTENT_LOCKED);  			start = failed_start; -		} else { +		} else  			break; -		}  		WARN_ON(start > end);  	}  	return err;  } -int lock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask) +int lock_extent(struct extent_io_tree *tree, u64 start, u64 end)  { -	return lock_extent_bits(tree, start, end, 0, NULL, mask); +	return lock_extent_bits(tree, start, end, 0, NULL);  } -int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end, -		    gfp_t mask) +int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end)  {  	int err;  	u64 failed_start; -	err = set_extent_bit(tree, start, end, EXTENT_LOCKED, EXTENT_LOCKED, -			     &failed_start, NULL, mask); +	err = __set_extent_bit(tree, start, end, EXTENT_LOCKED, EXTENT_LOCKED, +			       &failed_start, NULL, GFP_NOFS);  	if (err == -EEXIST) {  		if (failed_start > start)  			clear_extent_bit(tree, start, failed_start - 1, -					 EXTENT_LOCKED, 1, 0, NULL, mask); +					 EXTENT_LOCKED, 1, 0, NULL, GFP_NOFS);  		return 0;  	}  	return 1; @@ -1206,10 +1229,10 @@ int unlock_extent_cached(struct extent_io_tree *tree, u64 start, u64 end,  				mask);  } -int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask) +int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end)  {  	return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, NULL, -				mask); +				GFP_NOFS);  }  /* @@ -1223,7 +1246,7 @@ static int set_range_writeback(struct extent_io_tree *tree, u64 start, u64 end)  	while (index <= end_index) {  		page = find_get_page(tree->mapping, index); -		BUG_ON(!page); +		BUG_ON(!page); /* Pages should be in the extent_io_tree */  		set_page_writeback(page);  		page_cache_release(page);  		index++; @@ -1346,9 +1369,9 @@ out:  	return found;  } -static noinline int __unlock_for_delalloc(struct inode *inode, -					  struct page *locked_page, -					  u64 start, u64 end) +static noinline void __unlock_for_delalloc(struct inode *inode, +					   struct page *locked_page, +					   u64 start, u64 end)  {  	int ret;  	struct page *pages[16]; @@ -1358,7 +1381,7 @@ static noinline int __unlock_for_delalloc(struct inode *inode,  	int i;  	if (index == locked_page->index && end_index == index) -		return 0; +		return;  	while (nr_pages > 0) {  		ret = find_get_pages_contig(inode->i_mapping, index, @@ -1373,7 +1396,6 @@ static noinline int __unlock_for_delalloc(struct inode *inode,  		index += ret;  		cond_resched();  	} -	return 0;  }  static noinline int lock_delalloc_pages(struct inode *inode, @@ -1503,11 +1525,10 @@ again:  			goto out_failed;  		}  	} -	BUG_ON(ret); +	BUG_ON(ret); /* Only valid values are 0 and -EAGAIN */  	/* step three, lock the state bits for the whole range */ -	lock_extent_bits(tree, delalloc_start, delalloc_end, -			 0, &cached_state, GFP_NOFS); +	lock_extent_bits(tree, delalloc_start, delalloc_end, 0, &cached_state);  	/* then test to make sure it is all still delalloc */  	ret = test_range_bit(tree, delalloc_start, delalloc_end, @@ -1764,39 +1785,34 @@ int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,   * helper function to set a given page up to date if all the   * extents in the tree for that page are up to date   */ -static int check_page_uptodate(struct extent_io_tree *tree, -			       struct page *page) +static void check_page_uptodate(struct extent_io_tree *tree, struct page *page)  {  	u64 start = (u64)page->index << PAGE_CACHE_SHIFT;  	u64 end = start + PAGE_CACHE_SIZE - 1;  	if (test_range_bit(tree, start, end, EXTENT_UPTODATE, 1, NULL))  		SetPageUptodate(page); -	return 0;  }  /*   * helper function to unlock a page if all the extents in the tree   * for that page are unlocked   */ -static int check_page_locked(struct extent_io_tree *tree, -			     struct page *page) +static void check_page_locked(struct extent_io_tree *tree, struct page *page)  {  	u64 start = (u64)page->index << PAGE_CACHE_SHIFT;  	u64 end = start + PAGE_CACHE_SIZE - 1;  	if (!test_range_bit(tree, start, end, EXTENT_LOCKED, 0, NULL))  		unlock_page(page); -	return 0;  }  /*   * helper function to end page writeback if all the extents   * in the tree for that page are done with writeback   */ -static int check_page_writeback(struct extent_io_tree *tree, -			     struct page *page) +static void check_page_writeback(struct extent_io_tree *tree, +				 struct page *page)  {  	end_page_writeback(page); -	return 0;  }  /* @@ -2409,8 +2425,12 @@ btrfs_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs,  	return bio;  } -static int submit_one_bio(int rw, struct bio *bio, int mirror_num, -			  unsigned long bio_flags) +/* + * Since writes are async, they will only return -ENOMEM. + * Reads can return the full range of I/O error conditions. + */ +static int __must_check submit_one_bio(int rw, struct bio *bio, +				       int mirror_num, unsigned long bio_flags)  {  	int ret = 0;  	struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1; @@ -2436,6 +2456,19 @@ static int submit_one_bio(int rw, struct bio *bio, int mirror_num,  	return ret;  } +static int merge_bio(struct extent_io_tree *tree, struct page *page, +		     unsigned long offset, size_t size, struct bio *bio, +		     unsigned long bio_flags) +{ +	int ret = 0; +	if (tree->ops && tree->ops->merge_bio_hook) +		ret = tree->ops->merge_bio_hook(page, offset, size, bio, +						bio_flags); +	BUG_ON(ret < 0); +	return ret; + +} +  static int submit_extent_page(int rw, struct extent_io_tree *tree,  			      struct page *page, sector_t sector,  			      size_t size, unsigned long offset, @@ -2464,12 +2497,12 @@ static int submit_extent_page(int rw, struct extent_io_tree *tree,  				sector;  		if (prev_bio_flags != bio_flags || !contig || -		    (tree->ops && tree->ops->merge_bio_hook && -		     tree->ops->merge_bio_hook(page, offset, page_size, bio, -					       bio_flags)) || +		    merge_bio(tree, page, offset, page_size, bio, bio_flags) ||  		    bio_add_page(bio, page, page_size, offset) < page_size) {  			ret = submit_one_bio(rw, bio, mirror_num,  					     prev_bio_flags); +			if (ret < 0) +				return ret;  			bio = NULL;  		} else {  			return 0; @@ -2520,6 +2553,7 @@ void set_page_extent_mapped(struct page *page)   * basic readpage implementation.  Locked extent state structs are inserted   * into the tree that are removed when the IO is done (by the end_io   * handlers) + * XXX JDM: This needs looking at to ensure proper page locking   */  static int __extent_read_full_page(struct extent_io_tree *tree,  				   struct page *page, @@ -2559,11 +2593,11 @@ static int __extent_read_full_page(struct extent_io_tree *tree,  	end = page_end;  	while (1) { -		lock_extent(tree, start, end, GFP_NOFS); +		lock_extent(tree, start, end);  		ordered = btrfs_lookup_ordered_extent(inode, start);  		if (!ordered)  			break; -		unlock_extent(tree, start, end, GFP_NOFS); +		unlock_extent(tree, start, end);  		btrfs_start_ordered_extent(inode, ordered, 1);  		btrfs_put_ordered_extent(ordered);  	} @@ -2600,7 +2634,7 @@ static int __extent_read_full_page(struct extent_io_tree *tree,  				end - cur + 1, 0);  		if (IS_ERR_OR_NULL(em)) {  			SetPageError(page); -			unlock_extent(tree, cur, end, GFP_NOFS); +			unlock_extent(tree, cur, end);  			break;  		}  		extent_offset = cur - em->start; @@ -2652,7 +2686,7 @@ static int __extent_read_full_page(struct extent_io_tree *tree,  		if (test_range_bit(tree, cur, cur_end,  				   EXTENT_UPTODATE, 1, NULL)) {  			check_page_uptodate(tree, page); -			unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS); +			unlock_extent(tree, cur, cur + iosize - 1);  			cur = cur + iosize;  			pg_offset += iosize;  			continue; @@ -2662,7 +2696,7 @@ static int __extent_read_full_page(struct extent_io_tree *tree,  		 */  		if (block_start == EXTENT_MAP_INLINE) {  			SetPageError(page); -			unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS); +			unlock_extent(tree, cur, cur + iosize - 1);  			cur = cur + iosize;  			pg_offset += iosize;  			continue; @@ -2682,6 +2716,7 @@ static int __extent_read_full_page(struct extent_io_tree *tree,  					 end_bio_extent_readpage, mirror_num,  					 *bio_flags,  					 this_bio_flag); +			BUG_ON(ret == -ENOMEM);  			nr++;  			*bio_flags = this_bio_flag;  		} @@ -2823,7 +2858,11 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,  						       delalloc_end,  						       &page_started,  						       &nr_written); -			BUG_ON(ret); +			/* File system has been set read-only */ +			if (ret) { +				SetPageError(page); +				goto done; +			}  			/*  			 * delalloc_end is already one less than the total  			 * length, so we don't subtract one from @@ -3396,10 +3435,14 @@ retry:  static void flush_epd_write_bio(struct extent_page_data *epd)  {  	if (epd->bio) { +		int rw = WRITE; +		int ret; +  		if (epd->sync_io) -			submit_one_bio(WRITE_SYNC, epd->bio, 0, 0); -		else -			submit_one_bio(WRITE, epd->bio, 0, 0); +			rw = WRITE_SYNC; + +		ret = submit_one_bio(rw, epd->bio, 0, 0); +		BUG_ON(ret < 0); /* -ENOMEM */  		epd->bio = NULL;  	}  } @@ -3516,7 +3559,7 @@ int extent_readpages(struct extent_io_tree *tree,  	}  	BUG_ON(!list_empty(pages));  	if (bio) -		submit_one_bio(READ, bio, 0, bio_flags); +		return submit_one_bio(READ, bio, 0, bio_flags);  	return 0;  } @@ -3537,7 +3580,7 @@ int extent_invalidatepage(struct extent_io_tree *tree,  	if (start > end)  		return 0; -	lock_extent_bits(tree, start, end, 0, &cached_state, GFP_NOFS); +	lock_extent_bits(tree, start, end, 0, &cached_state);  	wait_on_page_writeback(page);  	clear_extent_bit(tree, start, end,  			 EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC | @@ -3751,7 +3794,7 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,  	}  	lock_extent_bits(&BTRFS_I(inode)->io_tree, start, start + len, 0, -			 &cached_state, GFP_NOFS); +			 &cached_state);  	em = get_extent_skip_holes(inode, start, last_for_get_extent,  				   get_extent); @@ -4239,14 +4282,13 @@ void free_extent_buffer_stale(struct extent_buffer *eb)  	release_extent_buffer(eb, GFP_NOFS);  } -int clear_extent_buffer_dirty(struct extent_buffer *eb) +void clear_extent_buffer_dirty(struct extent_buffer *eb)  {  	unsigned long i;  	unsigned long num_pages;  	struct page *page;  	num_pages = num_extent_pages(eb->start, eb->len); -	WARN_ON(atomic_read(&eb->refs) == 0);  	for (i = 0; i < num_pages; i++) {  		page = extent_buffer_page(eb, i); @@ -4268,7 +4310,6 @@ int clear_extent_buffer_dirty(struct extent_buffer *eb)  		unlock_page(page);  	}  	WARN_ON(atomic_read(&eb->refs) == 0); -	return 0;  }  int set_extent_buffer_dirty(struct extent_buffer *eb) @@ -4433,8 +4474,11 @@ int read_extent_buffer_pages(struct extent_io_tree *tree,  		}  	} -	if (bio) -		submit_one_bio(READ, bio, mirror_num, bio_flags); +	if (bio) { +		err = submit_one_bio(READ, bio, mirror_num, bio_flags); +		if (err) +			return err; +	}  	if (ret || wait != WAIT_COMPLETE)  		return ret; diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h index 38c1af7092f..faf10eb57f7 100644 --- a/fs/btrfs/extent_io.h +++ b/fs/btrfs/extent_io.h @@ -194,14 +194,13 @@ int try_release_extent_buffer(struct page *page, gfp_t mask);  int try_release_extent_state(struct extent_map_tree *map,  			     struct extent_io_tree *tree, struct page *page,  			     gfp_t mask); -int lock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask); +int lock_extent(struct extent_io_tree *tree, u64 start, u64 end);  int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, -		     int bits, struct extent_state **cached, gfp_t mask); -int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask); +		     int bits, struct extent_state **cached); +int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end);  int unlock_extent_cached(struct extent_io_tree *tree, u64 start, u64 end,  			 struct extent_state **cached, gfp_t mask); -int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end, -		    gfp_t mask); +int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end);  int extent_read_full_page(struct extent_io_tree *tree, struct page *page,  			  get_extent_t *get_extent, int mirror_num);  int __init extent_io_init(void); @@ -222,7 +221,7 @@ int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,  int set_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,  		    int bits, gfp_t mask);  int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, -		   int bits, int exclusive_bits, u64 *failed_start, +		   int bits, u64 *failed_start,  		   struct extent_state **cached_state, gfp_t mask);  int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,  			struct extent_state **cached_state, gfp_t mask); @@ -301,8 +300,8 @@ void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,  			   unsigned long src_offset, unsigned long len);  void memset_extent_buffer(struct extent_buffer *eb, char c,  			  unsigned long start, unsigned long len); -int wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int bits); -int clear_extent_buffer_dirty(struct extent_buffer *eb); +void wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int bits); +void clear_extent_buffer_dirty(struct extent_buffer *eb);  int set_extent_buffer_dirty(struct extent_buffer *eb);  int set_extent_buffer_uptodate(struct extent_buffer *eb);  int clear_extent_buffer_uptodate(struct extent_buffer *eb); diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c index c7fb3a4247d..a14dbca5974 100644 --- a/fs/btrfs/file-item.c +++ b/fs/btrfs/file-item.c @@ -59,7 +59,7 @@ int btrfs_insert_file_extent(struct btrfs_trans_handle *trans,  				      sizeof(*item));  	if (ret < 0)  		goto out; -	BUG_ON(ret); +	BUG_ON(ret); /* Can't happen */  	leaf = path->nodes[0];  	item = btrfs_item_ptr(leaf, path->slots[0],  			      struct btrfs_file_extent_item); @@ -284,6 +284,7 @@ int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end,  	struct btrfs_ordered_sum *sums;  	struct btrfs_sector_sum *sector_sum;  	struct btrfs_csum_item *item; +	LIST_HEAD(tmplist);  	unsigned long offset;  	int ret;  	size_t size; @@ -358,7 +359,10 @@ int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end,  					MAX_ORDERED_SUM_BYTES(root));  			sums = kzalloc(btrfs_ordered_sum_size(root, size),  					GFP_NOFS); -			BUG_ON(!sums); +			if (!sums) { +				ret = -ENOMEM; +				goto fail; +			}  			sector_sum = sums->sums;  			sums->bytenr = start; @@ -380,12 +384,19 @@ int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end,  				offset += csum_size;  				sector_sum++;  			} -			list_add_tail(&sums->list, list); +			list_add_tail(&sums->list, &tmplist);  		}  		path->slots[0]++;  	}  	ret = 0;  fail: +	while (ret < 0 && !list_empty(&tmplist)) { +		sums = list_entry(&tmplist, struct btrfs_ordered_sum, list); +		list_del(&sums->list); +		kfree(sums); +	} +	list_splice_tail(&tmplist, list); +  	btrfs_free_path(path);  	return ret;  } @@ -420,7 +431,7 @@ int btrfs_csum_one_bio(struct btrfs_root *root, struct inode *inode,  		offset = page_offset(bvec->bv_page) + bvec->bv_offset;  	ordered = btrfs_lookup_ordered_extent(inode, offset); -	BUG_ON(!ordered); +	BUG_ON(!ordered); /* Logic error */  	sums->bytenr = ordered->start;  	while (bio_index < bio->bi_vcnt) { @@ -439,11 +450,11 @@ int btrfs_csum_one_bio(struct btrfs_root *root, struct inode *inode,  			sums = kzalloc(btrfs_ordered_sum_size(root, bytes_left),  				       GFP_NOFS); -			BUG_ON(!sums); +			BUG_ON(!sums); /* -ENOMEM */  			sector_sum = sums->sums;  			sums->len = bytes_left;  			ordered = btrfs_lookup_ordered_extent(inode, offset); -			BUG_ON(!ordered); +			BUG_ON(!ordered); /* Logic error */  			sums->bytenr = ordered->start;  		} @@ -483,18 +494,17 @@ int btrfs_csum_one_bio(struct btrfs_root *root, struct inode *inode,   * This calls btrfs_truncate_item with the correct args based on the   * overlap, and fixes up the key as required.   */ -static noinline int truncate_one_csum(struct btrfs_trans_handle *trans, -				      struct btrfs_root *root, -				      struct btrfs_path *path, -				      struct btrfs_key *key, -				      u64 bytenr, u64 len) +static noinline void truncate_one_csum(struct btrfs_trans_handle *trans, +				       struct btrfs_root *root, +				       struct btrfs_path *path, +				       struct btrfs_key *key, +				       u64 bytenr, u64 len)  {  	struct extent_buffer *leaf;  	u16 csum_size = btrfs_super_csum_size(root->fs_info->super_copy);  	u64 csum_end;  	u64 end_byte = bytenr + len;  	u32 blocksize_bits = root->fs_info->sb->s_blocksize_bits; -	int ret;  	leaf = path->nodes[0];  	csum_end = btrfs_item_size_nr(leaf, path->slots[0]) / csum_size; @@ -510,7 +520,7 @@ static noinline int truncate_one_csum(struct btrfs_trans_handle *trans,  		 */  		u32 new_size = (bytenr - key->offset) >> blocksize_bits;  		new_size *= csum_size; -		ret = btrfs_truncate_item(trans, root, path, new_size, 1); +		btrfs_truncate_item(trans, root, path, new_size, 1);  	} else if (key->offset >= bytenr && csum_end > end_byte &&  		   end_byte > key->offset) {  		/* @@ -522,15 +532,13 @@ static noinline int truncate_one_csum(struct btrfs_trans_handle *trans,  		u32 new_size = (csum_end - end_byte) >> blocksize_bits;  		new_size *= csum_size; -		ret = btrfs_truncate_item(trans, root, path, new_size, 0); +		btrfs_truncate_item(trans, root, path, new_size, 0);  		key->offset = end_byte; -		ret = btrfs_set_item_key_safe(trans, root, path, key); -		BUG_ON(ret); +		btrfs_set_item_key_safe(trans, root, path, key);  	} else {  		BUG();  	} -	return 0;  }  /* @@ -635,13 +643,14 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans,  			 * item changed size or key  			 */  			ret = btrfs_split_item(trans, root, path, &key, offset); -			BUG_ON(ret && ret != -EAGAIN); +			if (ret && ret != -EAGAIN) { +				btrfs_abort_transaction(trans, root, ret); +				goto out; +			}  			key.offset = end_byte - 1;  		} else { -			ret = truncate_one_csum(trans, root, path, -						&key, bytenr, len); -			BUG_ON(ret); +			truncate_one_csum(trans, root, path, &key, bytenr, len);  			if (key.offset < bytenr)  				break;  		} @@ -772,7 +781,7 @@ again:  		if (diff != csum_size)  			goto insert; -		ret = btrfs_extend_item(trans, root, path, diff); +		btrfs_extend_item(trans, root, path, diff);  		goto csum;  	} diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index e8d06b6b919..d83260d7498 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -452,7 +452,7 @@ int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end,  			split = alloc_extent_map();  		if (!split2)  			split2 = alloc_extent_map(); -		BUG_ON(!split || !split2); +		BUG_ON(!split || !split2); /* -ENOMEM */  		write_lock(&em_tree->lock);  		em = lookup_extent_mapping(em_tree, start, len); @@ -494,7 +494,7 @@ int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end,  			split->flags = flags;  			split->compress_type = em->compress_type;  			ret = add_extent_mapping(em_tree, split); -			BUG_ON(ret); +			BUG_ON(ret); /* Logic error */  			free_extent_map(split);  			split = split2;  			split2 = NULL; @@ -520,7 +520,7 @@ int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end,  			}  			ret = add_extent_mapping(em_tree, split); -			BUG_ON(ret); +			BUG_ON(ret); /* Logic error */  			free_extent_map(split);  			split = NULL;  		} @@ -679,7 +679,7 @@ next_slot:  						root->root_key.objectid,  						new_key.objectid,  						start - extent_offset, 0); -				BUG_ON(ret); +				BUG_ON(ret); /* -ENOMEM */  				*hint_byte = disk_bytenr;  			}  			key.offset = start; @@ -754,7 +754,7 @@ next_slot:  						root->root_key.objectid,  						key.objectid, key.offset -  						extent_offset, 0); -				BUG_ON(ret); +				BUG_ON(ret); /* -ENOMEM */  				inode_sub_bytes(inode,  						extent_end - key.offset);  				*hint_byte = disk_bytenr; @@ -770,7 +770,10 @@ next_slot:  			ret = btrfs_del_items(trans, root, path, del_slot,  					      del_nr); -			BUG_ON(ret); +			if (ret) { +				btrfs_abort_transaction(trans, root, ret); +				goto out; +			}  			del_nr = 0;  			del_slot = 0; @@ -782,11 +785,13 @@ next_slot:  		BUG_ON(1);  	} -	if (del_nr > 0) { +	if (!ret && del_nr > 0) {  		ret = btrfs_del_items(trans, root, path, del_slot, del_nr); -		BUG_ON(ret); +		if (ret) +			btrfs_abort_transaction(trans, root, ret);  	} +out:  	btrfs_free_path(path);  	return ret;  } @@ -944,7 +949,10 @@ again:  			btrfs_release_path(path);  			goto again;  		} -		BUG_ON(ret < 0); +		if (ret < 0) { +			btrfs_abort_transaction(trans, root, ret); +			goto out; +		}  		leaf = path->nodes[0];  		fi = btrfs_item_ptr(leaf, path->slots[0] - 1, @@ -963,7 +971,7 @@ again:  		ret = btrfs_inc_extent_ref(trans, root, bytenr, num_bytes, 0,  					   root->root_key.objectid,  					   ino, orig_offset, 0); -		BUG_ON(ret); +		BUG_ON(ret); /* -ENOMEM */  		if (split == start) {  			key.offset = start; @@ -990,7 +998,7 @@ again:  		ret = btrfs_free_extent(trans, root, bytenr, num_bytes,  					0, root->root_key.objectid,  					ino, orig_offset, 0); -		BUG_ON(ret); +		BUG_ON(ret); /* -ENOMEM */  	}  	other_start = 0;  	other_end = start; @@ -1007,7 +1015,7 @@ again:  		ret = btrfs_free_extent(trans, root, bytenr, num_bytes,  					0, root->root_key.objectid,  					ino, orig_offset, 0); -		BUG_ON(ret); +		BUG_ON(ret); /* -ENOMEM */  	}  	if (del_nr == 0) {  		fi = btrfs_item_ptr(leaf, path->slots[0], @@ -1025,7 +1033,10 @@ again:  		btrfs_mark_buffer_dirty(leaf);  		ret = btrfs_del_items(trans, root, path, del_slot, del_nr); -		BUG_ON(ret); +		if (ret < 0) { +			btrfs_abort_transaction(trans, root, ret); +			goto out; +		}  	}  out:  	btrfs_free_path(path); @@ -1105,8 +1116,7 @@ again:  	if (start_pos < inode->i_size) {  		struct btrfs_ordered_extent *ordered;  		lock_extent_bits(&BTRFS_I(inode)->io_tree, -				 start_pos, last_pos - 1, 0, &cached_state, -				 GFP_NOFS); +				 start_pos, last_pos - 1, 0, &cached_state);  		ordered = btrfs_lookup_first_ordered_extent(inode,  							    last_pos - 1);  		if (ordered && @@ -1638,7 +1648,7 @@ static long btrfs_fallocate(struct file *file, int mode,  		 * transaction  		 */  		lock_extent_bits(&BTRFS_I(inode)->io_tree, alloc_start, -				 locked_end, 0, &cached_state, GFP_NOFS); +				 locked_end, 0, &cached_state);  		ordered = btrfs_lookup_first_ordered_extent(inode,  							    alloc_end - 1);  		if (ordered && @@ -1667,7 +1677,13 @@ static long btrfs_fallocate(struct file *file, int mode,  		em = btrfs_get_extent(inode, NULL, 0, cur_offset,  				      alloc_end - cur_offset, 0); -		BUG_ON(IS_ERR_OR_NULL(em)); +		if (IS_ERR_OR_NULL(em)) { +			if (!em) +				ret = -ENOMEM; +			else +				ret = PTR_ERR(em); +			break; +		}  		last_byte = min(extent_map_end(em), alloc_end);  		actual_end = min_t(u64, extent_map_end(em), offset + len);  		last_byte = (last_byte + mask) & ~mask; @@ -1737,7 +1753,7 @@ static int find_desired_extent(struct inode *inode, loff_t *offset, int origin)  		return -ENXIO;  	lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend, 0, -			 &cached_state, GFP_NOFS); +			 &cached_state);  	/*  	 * Delalloc is such a pain.  If we have a hole and we have pending diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index 710ea380c7e..054707ed579 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -230,11 +230,13 @@ int btrfs_truncate_free_space_cache(struct btrfs_root *root,  	if (ret) {  		trans->block_rsv = rsv; -		WARN_ON(1); +		btrfs_abort_transaction(trans, root, ret);  		return ret;  	}  	ret = btrfs_update_inode(trans, root, inode); +	if (ret) +		btrfs_abort_transaction(trans, root, ret);  	trans->block_rsv = rsv;  	return ret; @@ -869,7 +871,7 @@ int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,  	io_ctl_prepare_pages(&io_ctl, inode, 0);  	lock_extent_bits(&BTRFS_I(inode)->io_tree, 0, i_size_read(inode) - 1, -			 0, &cached_state, GFP_NOFS); +			 0, &cached_state);  	node = rb_first(&ctl->free_space_offset);  	if (!node && cluster) { @@ -1948,14 +1950,14 @@ again:  		 */  		ret = btrfs_add_free_space(block_group, old_start,  					   offset - old_start); -		WARN_ON(ret); +		WARN_ON(ret); /* -ENOMEM */  		goto out;  	}  	ret = remove_from_bitmap(ctl, info, &offset, &bytes);  	if (ret == -EAGAIN)  		goto again; -	BUG_ON(ret); +	BUG_ON(ret); /* logic error */  out_lock:  	spin_unlock(&ctl->tree_lock);  out: @@ -2346,7 +2348,7 @@ again:  	rb_erase(&entry->offset_index, &ctl->free_space_offset);  	ret = tree_insert_offset(&cluster->root, entry->offset,  				 &entry->offset_index, 1); -	BUG_ON(ret); +	BUG_ON(ret); /* -EEXIST; Logic error */  	trace_btrfs_setup_cluster(block_group, cluster,  				  total_found * block_group->sectorsize, 1); @@ -2439,7 +2441,7 @@ setup_cluster_no_bitmap(struct btrfs_block_group_cache *block_group,  		ret = tree_insert_offset(&cluster->root, entry->offset,  					 &entry->offset_index, 0);  		total_size += entry->bytes; -		BUG_ON(ret); +		BUG_ON(ret); /* -EEXIST; Logic error */  	} while (node && entry != last);  	cluster->max_size = max_extent; @@ -2830,6 +2832,7 @@ u64 btrfs_find_ino_for_alloc(struct btrfs_root *fs_root)  		int ret;  		ret = search_bitmap(ctl, entry, &offset, &count); +		/* Logic error; Should be empty if it can't find anything */  		BUG_ON(ret);  		ino = offset; diff --git a/fs/btrfs/inode-item.c b/fs/btrfs/inode-item.c index 6ea71c60e80..a13cf1a96c7 100644 --- a/fs/btrfs/inode-item.c +++ b/fs/btrfs/inode-item.c @@ -129,13 +129,14 @@ int btrfs_del_inode_ref(struct btrfs_trans_handle *trans,  	item_start = btrfs_item_ptr_offset(leaf, path->slots[0]);  	memmove_extent_buffer(leaf, ptr, ptr + sub_item_len,  			      item_size - (ptr + sub_item_len - item_start)); -	ret = btrfs_truncate_item(trans, root, path, +	btrfs_truncate_item(trans, root, path,  				  item_size - sub_item_len, 1);  out:  	btrfs_free_path(path);  	return ret;  } +/* Will return 0, -ENOMEM, -EMLINK, or -EEXIST or anything from the CoW path */  int btrfs_insert_inode_ref(struct btrfs_trans_handle *trans,  			   struct btrfs_root *root,  			   const char *name, int name_len, @@ -166,7 +167,7 @@ int btrfs_insert_inode_ref(struct btrfs_trans_handle *trans,  			goto out;  		old_size = btrfs_item_size_nr(path->nodes[0], path->slots[0]); -		ret = btrfs_extend_item(trans, root, path, ins_len); +		btrfs_extend_item(trans, root, path, ins_len);  		ref = btrfs_item_ptr(path->nodes[0], path->slots[0],  				     struct btrfs_inode_ref);  		ref = (struct btrfs_inode_ref *)((unsigned long)ref + old_size); diff --git a/fs/btrfs/inode-map.c b/fs/btrfs/inode-map.c index ee15d88b33d..7ca46e6e11a 100644 --- a/fs/btrfs/inode-map.c +++ b/fs/btrfs/inode-map.c @@ -178,7 +178,7 @@ static void start_caching(struct btrfs_root *root)  	tsk = kthread_run(caching_kthread, root, "btrfs-ino-cache-%llu\n",  			  root->root_key.objectid); -	BUG_ON(IS_ERR(tsk)); +	BUG_ON(IS_ERR(tsk)); /* -ENOMEM */  }  int btrfs_find_free_ino(struct btrfs_root *root, u64 *objectid) @@ -271,7 +271,7 @@ void btrfs_unpin_free_ino(struct btrfs_root *root)  			break;  		info = rb_entry(n, struct btrfs_free_space, offset_index); -		BUG_ON(info->bitmap); +		BUG_ON(info->bitmap); /* Logic error */  		if (info->offset > root->cache_progress)  			goto free; @@ -443,13 +443,13 @@ int btrfs_save_ino_cache(struct btrfs_root *root,  				      trans->bytes_reserved, 1);  again:  	inode = lookup_free_ino_inode(root, path); -	if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) { +	if (IS_ERR(inode) && (PTR_ERR(inode) != -ENOENT || retry)) {  		ret = PTR_ERR(inode);  		goto out_release;  	}  	if (IS_ERR(inode)) { -		BUG_ON(retry); +		BUG_ON(retry); /* Logic error */  		retry = true;  		ret = create_free_ino_inode(root, trans, path); @@ -460,12 +460,17 @@ again:  	BTRFS_I(inode)->generation = 0;  	ret = btrfs_update_inode(trans, root, inode); -	WARN_ON(ret); +	if (ret) { +		btrfs_abort_transaction(trans, root, ret); +		goto out_put; +	}  	if (i_size_read(inode) > 0) {  		ret = btrfs_truncate_free_space_cache(root, trans, path, inode); -		if (ret) +		if (ret) { +			btrfs_abort_transaction(trans, root, ret);  			goto out_put; +		}  	}  	spin_lock(&root->cache_lock); @@ -532,7 +537,7 @@ static int btrfs_find_highest_objectid(struct btrfs_root *root, u64 *objectid)  	ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0);  	if (ret < 0)  		goto error; -	BUG_ON(ret == 0); +	BUG_ON(ret == 0); /* Corruption */  	if (path->slots[0] > 0) {  		slot = path->slots[0] - 1;  		l = path->nodes[0]; diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 341a8670165..eb6aec7bbac 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -150,7 +150,6 @@ static noinline int insert_inline_extent(struct btrfs_trans_handle *trans,  	inode_add_bytes(inode, size);  	ret = btrfs_insert_empty_item(trans, root, path, &key,  				      datasize); -	BUG_ON(ret);  	if (ret) {  		err = ret;  		goto fail; @@ -206,9 +205,9 @@ static noinline int insert_inline_extent(struct btrfs_trans_handle *trans,  	 * could end up racing with unlink.  	 */  	BTRFS_I(inode)->disk_i_size = inode->i_size; -	btrfs_update_inode(trans, root, inode); +	ret = btrfs_update_inode(trans, root, inode); -	return 0; +	return ret;  fail:  	btrfs_free_path(path);  	return err; @@ -250,14 +249,18 @@ static noinline int cow_file_range_inline(struct btrfs_trans_handle *trans,  	ret = btrfs_drop_extents(trans, inode, start, aligned_end,  				 &hint_byte, 1); -	BUG_ON(ret); +	if (ret) +		return ret;  	if (isize > actual_end)  		inline_len = min_t(u64, isize, actual_end);  	ret = insert_inline_extent(trans, root, inode, start,  				   inline_len, compressed_size,  				   compress_type, compressed_pages); -	BUG_ON(ret); +	if (ret) { +		btrfs_abort_transaction(trans, root, ret); +		return ret; +	}  	btrfs_delalloc_release_metadata(inode, end + 1 - start);  	btrfs_drop_extent_cache(inode, start, aligned_end - 1, 0);  	return 0; @@ -293,7 +296,7 @@ static noinline int add_async_extent(struct async_cow *cow,  	struct async_extent *async_extent;  	async_extent = kmalloc(sizeof(*async_extent), GFP_NOFS); -	BUG_ON(!async_extent); +	BUG_ON(!async_extent); /* -ENOMEM */  	async_extent->start = start;  	async_extent->ram_size = ram_size;  	async_extent->compressed_size = compressed_size; @@ -433,7 +436,11 @@ again:  cont:  	if (start == 0) {  		trans = btrfs_join_transaction(root); -		BUG_ON(IS_ERR(trans)); +		if (IS_ERR(trans)) { +			ret = PTR_ERR(trans); +			trans = NULL; +			goto cleanup_and_out; +		}  		trans->block_rsv = &root->fs_info->delalloc_block_rsv;  		/* lets try to make an inline extent */ @@ -450,11 +457,11 @@ cont:  						    total_compressed,  						    compress_type, pages);  		} -		if (ret == 0) { +		if (ret <= 0) {  			/* -			 * inline extent creation worked, we don't need -			 * to create any more async work items.  Unlock -			 * and free up our temp pages. +			 * inline extent creation worked or returned error, +			 * we don't need to create any more async work items. +			 * Unlock and free up our temp pages.  			 */  			extent_clear_unlock_delalloc(inode,  			     &BTRFS_I(inode)->io_tree, @@ -547,7 +554,7 @@ cleanup_and_bail_uncompressed:  	}  out: -	return 0; +	return ret;  free_pages_out:  	for (i = 0; i < nr_pages_ret; i++) { @@ -557,6 +564,20 @@ free_pages_out:  	kfree(pages);  	goto out; + +cleanup_and_out: +	extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree, +				     start, end, NULL, +				     EXTENT_CLEAR_UNLOCK_PAGE | +				     EXTENT_CLEAR_DIRTY | +				     EXTENT_CLEAR_DELALLOC | +				     EXTENT_SET_WRITEBACK | +				     EXTENT_END_WRITEBACK); +	if (!trans || IS_ERR(trans)) +		btrfs_error(root->fs_info, ret, "Failed to join transaction"); +	else +		btrfs_abort_transaction(trans, root, ret); +	goto free_pages_out;  }  /* @@ -597,7 +618,7 @@ retry:  			lock_extent(io_tree, async_extent->start,  					 async_extent->start + -					 async_extent->ram_size - 1, GFP_NOFS); +					 async_extent->ram_size - 1);  			/* allocate blocks */  			ret = cow_file_range(inode, async_cow->locked_page, @@ -606,6 +627,8 @@ retry:  					     async_extent->ram_size - 1,  					     &page_started, &nr_written, 0); +			/* JDM XXX */ +  			/*  			 * if page_started, cow_file_range inserted an  			 * inline extent and took care of all the unlocking @@ -625,17 +648,21 @@ retry:  		}  		lock_extent(io_tree, async_extent->start, -			    async_extent->start + async_extent->ram_size - 1, -			    GFP_NOFS); +			    async_extent->start + async_extent->ram_size - 1);  		trans = btrfs_join_transaction(root); -		BUG_ON(IS_ERR(trans)); -		trans->block_rsv = &root->fs_info->delalloc_block_rsv; -		ret = btrfs_reserve_extent(trans, root, +		if (IS_ERR(trans)) { +			ret = PTR_ERR(trans); +		} else { +			trans->block_rsv = &root->fs_info->delalloc_block_rsv; +			ret = btrfs_reserve_extent(trans, root,  					   async_extent->compressed_size,  					   async_extent->compressed_size,  					   0, alloc_hint, &ins, 1); -		btrfs_end_transaction(trans, root); +			if (ret) +				btrfs_abort_transaction(trans, root, ret); +			btrfs_end_transaction(trans, root); +		}  		if (ret) {  			int i; @@ -648,8 +675,10 @@ retry:  			async_extent->pages = NULL;  			unlock_extent(io_tree, async_extent->start,  				      async_extent->start + -				      async_extent->ram_size - 1, GFP_NOFS); -			goto retry; +				      async_extent->ram_size - 1); +			if (ret == -ENOSPC) +				goto retry; +			goto out_free; /* JDM: Requeue? */  		}  		/* @@ -661,7 +690,7 @@ retry:  					async_extent->ram_size - 1, 0);  		em = alloc_extent_map(); -		BUG_ON(!em); +		BUG_ON(!em); /* -ENOMEM */  		em->start = async_extent->start;  		em->len = async_extent->ram_size;  		em->orig_start = em->start; @@ -693,7 +722,7 @@ retry:  						ins.offset,  						BTRFS_ORDERED_COMPRESSED,  						async_extent->compress_type); -		BUG_ON(ret); +		BUG_ON(ret); /* -ENOMEM */  		/*  		 * clear dirty, set writeback and unlock the pages. @@ -715,13 +744,17 @@ retry:  				    ins.offset, async_extent->pages,  				    async_extent->nr_pages); -		BUG_ON(ret); +		BUG_ON(ret); /* -ENOMEM */  		alloc_hint = ins.objectid + ins.offset;  		kfree(async_extent);  		cond_resched();  	} - -	return 0; +	ret = 0; +out: +	return ret; +out_free: +	kfree(async_extent); +	goto out;  }  static u64 get_extent_allocation_hint(struct inode *inode, u64 start, @@ -790,7 +823,18 @@ static noinline int cow_file_range(struct inode *inode,  	BUG_ON(btrfs_is_free_space_inode(root, inode));  	trans = btrfs_join_transaction(root); -	BUG_ON(IS_ERR(trans)); +	if (IS_ERR(trans)) { +		extent_clear_unlock_delalloc(inode, +			     &BTRFS_I(inode)->io_tree, +			     start, end, NULL, +			     EXTENT_CLEAR_UNLOCK_PAGE | +			     EXTENT_CLEAR_UNLOCK | +			     EXTENT_CLEAR_DELALLOC | +			     EXTENT_CLEAR_DIRTY | +			     EXTENT_SET_WRITEBACK | +			     EXTENT_END_WRITEBACK); +		return PTR_ERR(trans); +	}  	trans->block_rsv = &root->fs_info->delalloc_block_rsv;  	num_bytes = (end - start + blocksize) & ~(blocksize - 1); @@ -820,8 +864,10 @@ static noinline int cow_file_range(struct inode *inode,  			*nr_written = *nr_written +  			     (end - start + PAGE_CACHE_SIZE) / PAGE_CACHE_SIZE;  			*page_started = 1; -			ret = 0;  			goto out; +		} else if (ret < 0) { +			btrfs_abort_transaction(trans, root, ret); +			goto out_unlock;  		}  	} @@ -838,10 +884,13 @@ static noinline int cow_file_range(struct inode *inode,  		ret = btrfs_reserve_extent(trans, root, cur_alloc_size,  					   root->sectorsize, 0, alloc_hint,  					   &ins, 1); -		BUG_ON(ret); +		if (ret < 0) { +			btrfs_abort_transaction(trans, root, ret); +			goto out_unlock; +		}  		em = alloc_extent_map(); -		BUG_ON(!em); +		BUG_ON(!em); /* -ENOMEM */  		em->start = start;  		em->orig_start = em->start;  		ram_size = ins.offset; @@ -867,13 +916,16 @@ static noinline int cow_file_range(struct inode *inode,  		cur_alloc_size = ins.offset;  		ret = btrfs_add_ordered_extent(inode, start, ins.objectid,  					       ram_size, cur_alloc_size, 0); -		BUG_ON(ret); +		BUG_ON(ret); /* -ENOMEM */  		if (root->root_key.objectid ==  		    BTRFS_DATA_RELOC_TREE_OBJECTID) {  			ret = btrfs_reloc_clone_csums(inode, start,  						      cur_alloc_size); -			BUG_ON(ret); +			if (ret) { +				btrfs_abort_transaction(trans, root, ret); +				goto out_unlock; +			}  		}  		if (disk_num_bytes < cur_alloc_size) @@ -898,11 +950,23 @@ static noinline int cow_file_range(struct inode *inode,  		alloc_hint = ins.objectid + ins.offset;  		start += cur_alloc_size;  	} -out:  	ret = 0; +out:  	btrfs_end_transaction(trans, root);  	return ret; +out_unlock: +	extent_clear_unlock_delalloc(inode, +		     &BTRFS_I(inode)->io_tree, +		     start, end, NULL, +		     EXTENT_CLEAR_UNLOCK_PAGE | +		     EXTENT_CLEAR_UNLOCK | +		     EXTENT_CLEAR_DELALLOC | +		     EXTENT_CLEAR_DIRTY | +		     EXTENT_SET_WRITEBACK | +		     EXTENT_END_WRITEBACK); + +	goto out;  }  /* @@ -968,7 +1032,7 @@ static int cow_file_range_async(struct inode *inode, struct page *locked_page,  			 1, 0, NULL, GFP_NOFS);  	while (start < end) {  		async_cow = kmalloc(sizeof(*async_cow), GFP_NOFS); -		BUG_ON(!async_cow); +		BUG_ON(!async_cow); /* -ENOMEM */  		async_cow->inode = inode;  		async_cow->root = root;  		async_cow->locked_page = locked_page; @@ -1059,7 +1123,7 @@ static noinline int run_delalloc_nocow(struct inode *inode,  	u64 disk_bytenr;  	u64 num_bytes;  	int extent_type; -	int ret; +	int ret, err;  	int type;  	int nocow;  	int check_prev = 1; @@ -1077,7 +1141,11 @@ static noinline int run_delalloc_nocow(struct inode *inode,  	else  		trans = btrfs_join_transaction(root); -	BUG_ON(IS_ERR(trans)); +	if (IS_ERR(trans)) { +		btrfs_free_path(path); +		return PTR_ERR(trans); +	} +  	trans->block_rsv = &root->fs_info->delalloc_block_rsv;  	cow_start = (u64)-1; @@ -1085,7 +1153,10 @@ static noinline int run_delalloc_nocow(struct inode *inode,  	while (1) {  		ret = btrfs_lookup_file_extent(trans, root, path, ino,  					       cur_offset, 0); -		BUG_ON(ret < 0); +		if (ret < 0) { +			btrfs_abort_transaction(trans, root, ret); +			goto error; +		}  		if (ret > 0 && path->slots[0] > 0 && check_prev) {  			leaf = path->nodes[0];  			btrfs_item_key_to_cpu(leaf, &found_key, @@ -1099,8 +1170,10 @@ next_slot:  		leaf = path->nodes[0];  		if (path->slots[0] >= btrfs_header_nritems(leaf)) {  			ret = btrfs_next_leaf(root, path); -			if (ret < 0) -				BUG_ON(1); +			if (ret < 0) { +				btrfs_abort_transaction(trans, root, ret); +				goto error; +			}  			if (ret > 0)  				break;  			leaf = path->nodes[0]; @@ -1188,7 +1261,10 @@ out_check:  			ret = cow_file_range(inode, locked_page, cow_start,  					found_key.offset - 1, page_started,  					nr_written, 1); -			BUG_ON(ret); +			if (ret) { +				btrfs_abort_transaction(trans, root, ret); +				goto error; +			}  			cow_start = (u64)-1;  		} @@ -1197,7 +1273,7 @@ out_check:  			struct extent_map_tree *em_tree;  			em_tree = &BTRFS_I(inode)->extent_tree;  			em = alloc_extent_map(); -			BUG_ON(!em); +			BUG_ON(!em); /* -ENOMEM */  			em->start = cur_offset;  			em->orig_start = em->start;  			em->len = num_bytes; @@ -1223,13 +1299,16 @@ out_check:  		ret = btrfs_add_ordered_extent(inode, cur_offset, disk_bytenr,  					       num_bytes, num_bytes, type); -		BUG_ON(ret); +		BUG_ON(ret); /* -ENOMEM */  		if (root->root_key.objectid ==  		    BTRFS_DATA_RELOC_TREE_OBJECTID) {  			ret = btrfs_reloc_clone_csums(inode, cur_offset,  						      num_bytes); -			BUG_ON(ret); +			if (ret) { +				btrfs_abort_transaction(trans, root, ret); +				goto error; +			}  		}  		extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree, @@ -1248,18 +1327,23 @@ out_check:  	if (cow_start != (u64)-1) {  		ret = cow_file_range(inode, locked_page, cow_start, end,  				     page_started, nr_written, 1); -		BUG_ON(ret); +		if (ret) { +			btrfs_abort_transaction(trans, root, ret); +			goto error; +		}  	} +error:  	if (nolock) { -		ret = btrfs_end_transaction_nolock(trans, root); -		BUG_ON(ret); +		err = btrfs_end_transaction_nolock(trans, root);  	} else { -		ret = btrfs_end_transaction(trans, root); -		BUG_ON(ret); +		err = btrfs_end_transaction(trans, root);  	} +	if (!ret) +		ret = err; +  	btrfs_free_path(path); -	return 0; +	return ret;  }  /* @@ -1424,10 +1508,11 @@ int btrfs_merge_bio_hook(struct page *page, unsigned long offset,  	map_length = length;  	ret = btrfs_map_block(map_tree, READ, logical,  			      &map_length, NULL, 0); - +	/* Will always return 0 or 1 with map_multi == NULL */ +	BUG_ON(ret < 0);  	if (map_length < length + size)  		return 1; -	return ret; +	return 0;  }  /* @@ -1447,7 +1532,7 @@ static int __btrfs_submit_bio_start(struct inode *inode, int rw,  	int ret = 0;  	ret = btrfs_csum_one_bio(root, inode, bio, 0, 0); -	BUG_ON(ret); +	BUG_ON(ret); /* -ENOMEM */  	return 0;  } @@ -1478,14 +1563,16 @@ static int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,  	struct btrfs_root *root = BTRFS_I(inode)->root;  	int ret = 0;  	int skip_sum; +	int metadata = 0;  	skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;  	if (btrfs_is_free_space_inode(root, inode)) -		ret = btrfs_bio_wq_end_io(root->fs_info, bio, 2); -	else -		ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0); -	BUG_ON(ret); +		metadata = 2; + +	ret = btrfs_bio_wq_end_io(root->fs_info, bio, metadata); +	if (ret) +		return ret;  	if (!(rw & REQ_WRITE)) {  		if (bio_flags & EXTENT_BIO_COMPRESSED) { @@ -1570,7 +1657,7 @@ again:  	page_end = page_offset(page) + PAGE_CACHE_SIZE - 1;  	lock_extent_bits(&BTRFS_I(inode)->io_tree, page_start, page_end, 0, -			 &cached_state, GFP_NOFS); +			 &cached_state);  	/* already ordered? We're done */  	if (PagePrivate2(page)) @@ -1674,13 +1761,15 @@ static int insert_reserved_file_extent(struct btrfs_trans_handle *trans,  	 */  	ret = btrfs_drop_extents(trans, inode, file_pos, file_pos + num_bytes,  				 &hint, 0); -	BUG_ON(ret); +	if (ret) +		goto out;  	ins.objectid = btrfs_ino(inode);  	ins.offset = file_pos;  	ins.type = BTRFS_EXTENT_DATA_KEY;  	ret = btrfs_insert_empty_item(trans, root, path, &ins, sizeof(*fi)); -	BUG_ON(ret); +	if (ret) +		goto out;  	leaf = path->nodes[0];  	fi = btrfs_item_ptr(leaf, path->slots[0],  			    struct btrfs_file_extent_item); @@ -1708,10 +1797,10 @@ static int insert_reserved_file_extent(struct btrfs_trans_handle *trans,  	ret = btrfs_alloc_reserved_file_extent(trans, root,  					root->root_key.objectid,  					btrfs_ino(inode), file_pos, &ins); -	BUG_ON(ret); +out:  	btrfs_free_path(path); -	return 0; +	return ret;  }  /* @@ -1739,35 +1828,41 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end)  					     end - start + 1);  	if (!ret)  		return 0; -	BUG_ON(!ordered_extent); +	BUG_ON(!ordered_extent); /* Logic error */  	nolock = btrfs_is_free_space_inode(root, inode);  	if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags)) { -		BUG_ON(!list_empty(&ordered_extent->list)); +		BUG_ON(!list_empty(&ordered_extent->list)); /* Logic error */  		ret = btrfs_ordered_update_i_size(inode, 0, ordered_extent);  		if (!ret) {  			if (nolock)  				trans = btrfs_join_transaction_nolock(root);  			else  				trans = btrfs_join_transaction(root); -			BUG_ON(IS_ERR(trans)); +			if (IS_ERR(trans)) +				return PTR_ERR(trans);  			trans->block_rsv = &root->fs_info->delalloc_block_rsv;  			ret = btrfs_update_inode_fallback(trans, root, inode); -			BUG_ON(ret); +			if (ret) /* -ENOMEM or corruption */ +				btrfs_abort_transaction(trans, root, ret);  		}  		goto out;  	}  	lock_extent_bits(io_tree, ordered_extent->file_offset,  			 ordered_extent->file_offset + ordered_extent->len - 1, -			 0, &cached_state, GFP_NOFS); +			 0, &cached_state);  	if (nolock)  		trans = btrfs_join_transaction_nolock(root);  	else  		trans = btrfs_join_transaction(root); -	BUG_ON(IS_ERR(trans)); +	if (IS_ERR(trans)) { +		ret = PTR_ERR(trans); +		trans = NULL; +		goto out_unlock; +	}  	trans->block_rsv = &root->fs_info->delalloc_block_rsv;  	if (test_bit(BTRFS_ORDERED_COMPRESSED, &ordered_extent->flags)) @@ -1778,7 +1873,6 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end)  						ordered_extent->file_offset,  						ordered_extent->file_offset +  						ordered_extent->len); -		BUG_ON(ret);  	} else {  		BUG_ON(root == root->fs_info->tree_root);  		ret = insert_reserved_file_extent(trans, inode, @@ -1792,11 +1886,14 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end)  		unpin_extent_cache(&BTRFS_I(inode)->extent_tree,  				   ordered_extent->file_offset,  				   ordered_extent->len); -		BUG_ON(ret);  	}  	unlock_extent_cached(io_tree, ordered_extent->file_offset,  			     ordered_extent->file_offset +  			     ordered_extent->len - 1, &cached_state, GFP_NOFS); +	if (ret < 0) { +		btrfs_abort_transaction(trans, root, ret); +		goto out; +	}  	add_pending_csums(trans, inode, ordered_extent->file_offset,  			  &ordered_extent->list); @@ -1804,7 +1901,10 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end)  	ret = btrfs_ordered_update_i_size(inode, 0, ordered_extent);  	if (!ret || !test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) {  		ret = btrfs_update_inode_fallback(trans, root, inode); -		BUG_ON(ret); +		if (ret) { /* -ENOMEM or corruption */ +			btrfs_abort_transaction(trans, root, ret); +			goto out; +		}  	}  	ret = 0;  out: @@ -1823,6 +1923,11 @@ out:  	btrfs_put_ordered_extent(ordered_extent);  	return 0; +out_unlock: +	unlock_extent_cached(io_tree, ordered_extent->file_offset, +			     ordered_extent->file_offset + +			     ordered_extent->len - 1, &cached_state, GFP_NOFS); +	goto out;  }  static int btrfs_writepage_end_io_hook(struct page *page, u64 start, u64 end, @@ -1904,6 +2009,8 @@ struct delayed_iput {  	struct inode *inode;  }; +/* JDM: If this is fs-wide, why can't we add a pointer to + * btrfs_inode instead and avoid the allocation? */  void btrfs_add_delayed_iput(struct inode *inode)  {  	struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info; @@ -2050,20 +2157,27 @@ int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode)  	/* grab metadata reservation from transaction handle */  	if (reserve) {  		ret = btrfs_orphan_reserve_metadata(trans, inode); -		BUG_ON(ret); +		BUG_ON(ret); /* -ENOSPC in reservation; Logic error? JDM */  	}  	/* insert an orphan item to track this unlinked/truncated file */  	if (insert >= 1) {  		ret = btrfs_insert_orphan_item(trans, root, btrfs_ino(inode)); -		BUG_ON(ret && ret != -EEXIST); +		if (ret && ret != -EEXIST) { +			btrfs_abort_transaction(trans, root, ret); +			return ret; +		} +		ret = 0;  	}  	/* insert an orphan item to track subvolume contains orphan files */  	if (insert >= 2) {  		ret = btrfs_insert_orphan_item(trans, root->fs_info->tree_root,  					       root->root_key.objectid); -		BUG_ON(ret); +		if (ret && ret != -EEXIST) { +			btrfs_abort_transaction(trans, root, ret); +			return ret; +		}  	}  	return 0;  } @@ -2093,7 +2207,7 @@ int btrfs_orphan_del(struct btrfs_trans_handle *trans, struct inode *inode)  	if (trans && delete_item) {  		ret = btrfs_del_orphan_item(trans, root, btrfs_ino(inode)); -		BUG_ON(ret); +		BUG_ON(ret); /* -ENOMEM or corruption (JDM: Recheck) */  	}  	if (release_rsv) @@ -2227,7 +2341,7 @@ int btrfs_orphan_cleanup(struct btrfs_root *root)  			}  			ret = btrfs_del_orphan_item(trans, root,  						    found_key.objectid); -			BUG_ON(ret); +			BUG_ON(ret); /* -ENOMEM or corruption (JDM: Recheck) */  			btrfs_end_transaction(trans, root);  			continue;  		} @@ -2609,16 +2723,22 @@ static int __btrfs_unlink_inode(struct btrfs_trans_handle *trans,  		printk(KERN_INFO "btrfs failed to delete reference to %.*s, "  		       "inode %llu parent %llu\n", name_len, name,  		       (unsigned long long)ino, (unsigned long long)dir_ino); +		btrfs_abort_transaction(trans, root, ret);  		goto err;  	}  	ret = btrfs_delete_delayed_dir_index(trans, root, dir, index); -	if (ret) +	if (ret) { +		btrfs_abort_transaction(trans, root, ret);  		goto err; +	}  	ret = btrfs_del_inode_ref_in_log(trans, root, name, name_len,  					 inode, dir_ino); -	BUG_ON(ret != 0 && ret != -ENOENT); +	if (ret != 0 && ret != -ENOENT) { +		btrfs_abort_transaction(trans, root, ret); +		goto err; +	}  	ret = btrfs_del_dir_entries_in_log(trans, root, name, name_len,  					   dir, index); @@ -2776,7 +2896,7 @@ static struct btrfs_trans_handle *__unlink_start_trans(struct inode *dir,  			err = ret;  			goto out;  		} -		BUG_ON(ret == 0); +		BUG_ON(ret == 0); /* Corruption */  		if (check_path_shared(root, path))  			goto out;  		btrfs_release_path(path); @@ -2809,7 +2929,7 @@ static struct btrfs_trans_handle *__unlink_start_trans(struct inode *dir,  		err = PTR_ERR(ref);  		goto out;  	} -	BUG_ON(!ref); +	BUG_ON(!ref); /* Logic error */  	if (check_path_shared(root, path))  		goto out;  	index = btrfs_inode_ref_index(path->nodes[0], ref); @@ -2916,23 +3036,42 @@ int btrfs_unlink_subvol(struct btrfs_trans_handle *trans,  	di = btrfs_lookup_dir_item(trans, root, path, dir_ino,  				   name, name_len, -1); -	BUG_ON(IS_ERR_OR_NULL(di)); +	if (IS_ERR_OR_NULL(di)) { +		if (!di) +			ret = -ENOENT; +		else +			ret = PTR_ERR(di); +		goto out; +	}  	leaf = path->nodes[0];  	btrfs_dir_item_key_to_cpu(leaf, di, &key);  	WARN_ON(key.type != BTRFS_ROOT_ITEM_KEY || key.objectid != objectid);  	ret = btrfs_delete_one_dir_name(trans, root, path, di); -	BUG_ON(ret); +	if (ret) { +		btrfs_abort_transaction(trans, root, ret); +		goto out; +	}  	btrfs_release_path(path);  	ret = btrfs_del_root_ref(trans, root->fs_info->tree_root,  				 objectid, root->root_key.objectid,  				 dir_ino, &index, name, name_len);  	if (ret < 0) { -		BUG_ON(ret != -ENOENT); +		if (ret != -ENOENT) { +			btrfs_abort_transaction(trans, root, ret); +			goto out; +		}  		di = btrfs_search_dir_index_item(root, path, dir_ino,  						 name, name_len); -		BUG_ON(IS_ERR_OR_NULL(di)); +		if (IS_ERR_OR_NULL(di)) { +			if (!di) +				ret = -ENOENT; +			else +				ret = PTR_ERR(di); +			btrfs_abort_transaction(trans, root, ret); +			goto out; +		}  		leaf = path->nodes[0];  		btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); @@ -2942,15 +3081,19 @@ int btrfs_unlink_subvol(struct btrfs_trans_handle *trans,  	btrfs_release_path(path);  	ret = btrfs_delete_delayed_dir_index(trans, root, dir, index); -	BUG_ON(ret); +	if (ret) { +		btrfs_abort_transaction(trans, root, ret); +		goto out; +	}  	btrfs_i_size_write(dir, dir->i_size - name_len * 2);  	dir->i_mtime = dir->i_ctime = CURRENT_TIME;  	ret = btrfs_update_inode(trans, root, dir); -	BUG_ON(ret); - +	if (ret) +		btrfs_abort_transaction(trans, root, ret); +out:  	btrfs_free_path(path); -	return 0; +	return ret;  }  static int btrfs_rmdir(struct inode *dir, struct dentry *dentry) @@ -3160,8 +3303,8 @@ search_again:  				}  				size =  				    btrfs_file_extent_calc_inline_size(size); -				ret = btrfs_truncate_item(trans, root, path, -							  size, 1); +				btrfs_truncate_item(trans, root, path, +						    size, 1);  			} else if (root->ref_cows) {  				inode_sub_bytes(inode, item_end + 1 -  						found_key.offset); @@ -3209,7 +3352,11 @@ delete:  				ret = btrfs_del_items(trans, root, path,  						pending_del_slot,  						pending_del_nr); -				BUG_ON(ret); +				if (ret) { +					btrfs_abort_transaction(trans, +								root, ret); +					goto error; +				}  				pending_del_nr = 0;  			}  			btrfs_release_path(path); @@ -3222,8 +3369,10 @@ out:  	if (pending_del_nr) {  		ret = btrfs_del_items(trans, root, path, pending_del_slot,  				      pending_del_nr); -		BUG_ON(ret); +		if (ret) +			btrfs_abort_transaction(trans, root, ret);  	} +error:  	btrfs_free_path(path);  	return err;  } @@ -3281,8 +3430,7 @@ again:  	}  	wait_on_page_writeback(page); -	lock_extent_bits(io_tree, page_start, page_end, 0, &cached_state, -			 GFP_NOFS); +	lock_extent_bits(io_tree, page_start, page_end, 0, &cached_state);  	set_page_extent_mapped(page);  	ordered = btrfs_lookup_ordered_extent(inode, page_start); @@ -3358,7 +3506,7 @@ int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size)  		btrfs_wait_ordered_range(inode, hole_start,  					 block_end - hole_start);  		lock_extent_bits(io_tree, hole_start, block_end - 1, 0, -				 &cached_state, GFP_NOFS); +				 &cached_state);  		ordered = btrfs_lookup_ordered_extent(inode, hole_start);  		if (!ordered)  			break; @@ -3371,7 +3519,10 @@ int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size)  	while (1) {  		em = btrfs_get_extent(inode, NULL, 0, cur_offset,  				block_end - cur_offset, 0); -		BUG_ON(IS_ERR_OR_NULL(em)); +		if (IS_ERR(em)) { +			err = PTR_ERR(em); +			break; +		}  		last_byte = min(extent_map_end(em), block_end);  		last_byte = (last_byte + mask) & ~mask;  		if (!test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) { @@ -3388,7 +3539,7 @@ int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size)  						 cur_offset + hole_size,  						 &hint_byte, 1);  			if (err) { -				btrfs_update_inode(trans, root, inode); +				btrfs_abort_transaction(trans, root, err);  				btrfs_end_transaction(trans, root);  				break;  			} @@ -3398,7 +3549,7 @@ int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size)  					0, hole_size, 0, hole_size,  					0, 0, 0);  			if (err) { -				btrfs_update_inode(trans, root, inode); +				btrfs_abort_transaction(trans, root, err);  				btrfs_end_transaction(trans, root);  				break;  			} @@ -3778,7 +3929,7 @@ static void inode_tree_del(struct inode *inode)  	}  } -int btrfs_invalidate_inodes(struct btrfs_root *root) +void btrfs_invalidate_inodes(struct btrfs_root *root)  {  	struct rb_node *node;  	struct rb_node *prev; @@ -3838,7 +3989,6 @@ again:  		node = rb_next(node);  	}  	spin_unlock(&root->inode_lock); -	return 0;  }  static int btrfs_init_locked_inode(struct inode *inode, void *p) @@ -4580,18 +4730,26 @@ int btrfs_add_link(struct btrfs_trans_handle *trans,  					     parent_ino, index);  	} -	if (ret == 0) { -		ret = btrfs_insert_dir_item(trans, root, name, name_len, -					    parent_inode, &key, -					    btrfs_inode_type(inode), index); -		if (ret) -			goto fail_dir_item; +	/* Nothing to clean up yet */ +	if (ret) +		return ret; -		btrfs_i_size_write(parent_inode, parent_inode->i_size + -				   name_len * 2); -		parent_inode->i_mtime = parent_inode->i_ctime = CURRENT_TIME; -		ret = btrfs_update_inode(trans, root, parent_inode); +	ret = btrfs_insert_dir_item(trans, root, name, name_len, +				    parent_inode, &key, +				    btrfs_inode_type(inode), index); +	if (ret == -EEXIST) +		goto fail_dir_item; +	else if (ret) { +		btrfs_abort_transaction(trans, root, ret); +		return ret;  	} + +	btrfs_i_size_write(parent_inode, parent_inode->i_size + +			   name_len * 2); +	parent_inode->i_mtime = parent_inode->i_ctime = CURRENT_TIME; +	ret = btrfs_update_inode(trans, root, parent_inode); +	if (ret) +		btrfs_abort_transaction(trans, root, ret);  	return ret;  fail_dir_item: @@ -4805,7 +4963,8 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir,  	} else {  		struct dentry *parent = dentry->d_parent;  		err = btrfs_update_inode(trans, root, inode); -		BUG_ON(err); +		if (err) +			goto fail;  		d_instantiate(dentry, inode);  		btrfs_log_new_name(trans, inode, NULL, parent);  	} @@ -5136,7 +5295,7 @@ again:  				ret = uncompress_inline(path, inode, page,  							pg_offset,  							extent_offset, item); -				BUG_ON(ret); +				BUG_ON(ret); /* -ENOMEM */  			} else {  				map = kmap(page);  				read_extent_buffer(leaf, map + pg_offset, ptr, @@ -5251,6 +5410,7 @@ out:  		free_extent_map(em);  		return ERR_PTR(err);  	} +	BUG_ON(!em); /* Error is always set */  	return em;  } @@ -5601,7 +5761,7 @@ static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock,  		free_extent_map(em);  		/* DIO will do one hole at a time, so just unlock a sector */  		unlock_extent(&BTRFS_I(inode)->io_tree, start, -			      start + root->sectorsize - 1, GFP_NOFS); +			      start + root->sectorsize - 1);  		return 0;  	} @@ -5742,7 +5902,7 @@ static void btrfs_endio_direct_read(struct bio *bio, int err)  	} while (bvec <= bvec_end);  	unlock_extent(&BTRFS_I(inode)->io_tree, dip->logical_offset, -		      dip->logical_offset + dip->bytes - 1, GFP_NOFS); +		      dip->logical_offset + dip->bytes - 1);  	bio->bi_private = dip->private;  	kfree(dip->csums); @@ -5793,7 +5953,7 @@ again:  	lock_extent_bits(&BTRFS_I(inode)->io_tree, ordered->file_offset,  			 ordered->file_offset + ordered->len - 1, 0, -			 &cached_state, GFP_NOFS); +			 &cached_state);  	if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered->flags)) {  		ret = btrfs_mark_extent_written(trans, inode, @@ -5867,7 +6027,7 @@ static int __btrfs_submit_bio_start_direct_io(struct inode *inode, int rw,  	int ret;  	struct btrfs_root *root = BTRFS_I(inode)->root;  	ret = btrfs_csum_one_bio(root, inode, bio, offset, 1); -	BUG_ON(ret); +	BUG_ON(ret); /* -ENOMEM */  	return 0;  } @@ -6208,7 +6368,7 @@ static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb,  	while (1) {  		lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend, -				 0, &cached_state, GFP_NOFS); +				 0, &cached_state);  		/*  		 * We're concerned with the entire range that we're going to be  		 * doing DIO to, so we need to make sure theres no ordered @@ -6232,7 +6392,7 @@ static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb,  	if (writing) {  		write_bits = EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING;  		ret = set_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, lockend, -				     EXTENT_DELALLOC, 0, NULL, &cached_state, +				     EXTENT_DELALLOC, NULL, &cached_state,  				     GFP_NOFS);  		if (ret) {  			clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, @@ -6362,8 +6522,7 @@ static void btrfs_invalidatepage(struct page *page, unsigned long offset)  		btrfs_releasepage(page, GFP_NOFS);  		return;  	} -	lock_extent_bits(tree, page_start, page_end, 0, &cached_state, -			 GFP_NOFS); +	lock_extent_bits(tree, page_start, page_end, 0, &cached_state);  	ordered = btrfs_lookup_ordered_extent(page->mapping->host,  					   page_offset(page));  	if (ordered) { @@ -6385,8 +6544,7 @@ static void btrfs_invalidatepage(struct page *page, unsigned long offset)  		}  		btrfs_put_ordered_extent(ordered);  		cached_state = NULL; -		lock_extent_bits(tree, page_start, page_end, 0, &cached_state, -				 GFP_NOFS); +		lock_extent_bits(tree, page_start, page_end, 0, &cached_state);  	}  	clear_extent_bit(tree, page_start, page_end,  		 EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC | @@ -6461,8 +6619,7 @@ again:  	}  	wait_on_page_writeback(page); -	lock_extent_bits(io_tree, page_start, page_end, 0, &cached_state, -			 GFP_NOFS); +	lock_extent_bits(io_tree, page_start, page_end, 0, &cached_state);  	set_page_extent_mapped(page);  	/* @@ -6736,10 +6893,9 @@ int btrfs_create_subvol_root(struct btrfs_trans_handle *trans,  	btrfs_i_size_write(inode, 0);  	err = btrfs_update_inode(trans, new_root, inode); -	BUG_ON(err);  	iput(inode); -	return 0; +	return err;  }  struct inode *btrfs_alloc_inode(struct super_block *sb) @@ -7073,7 +7229,10 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,  		if (!ret)  			ret = btrfs_update_inode(trans, root, old_inode);  	} -	BUG_ON(ret); +	if (ret) { +		btrfs_abort_transaction(trans, root, ret); +		goto out_fail; +	}  	if (new_inode) {  		new_inode->i_ctime = CURRENT_TIME; @@ -7091,11 +7250,14 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,  						 new_dentry->d_name.name,  						 new_dentry->d_name.len);  		} -		BUG_ON(ret); -		if (new_inode->i_nlink == 0) { +		if (!ret && new_inode->i_nlink == 0) {  			ret = btrfs_orphan_add(trans, new_dentry->d_inode);  			BUG_ON(ret);  		} +		if (ret) { +			btrfs_abort_transaction(trans, root, ret); +			goto out_fail; +		}  	}  	fixup_inode_flags(new_dir, old_inode); @@ -7103,7 +7265,10 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,  	ret = btrfs_add_link(trans, new_dir, old_inode,  			     new_dentry->d_name.name,  			     new_dentry->d_name.len, 0, index); -	BUG_ON(ret); +	if (ret) { +		btrfs_abort_transaction(trans, root, ret); +		goto out_fail; +	}  	if (old_ino != BTRFS_FIRST_FREE_OBJECTID) {  		struct dentry *parent = new_dentry->d_parent; @@ -7328,7 +7493,12 @@ static int __btrfs_prealloc_file_range(struct inode *inode, int mode,  						  ins.offset, ins.offset,  						  ins.offset, 0, 0, 0,  						  BTRFS_FILE_EXTENT_PREALLOC); -		BUG_ON(ret); +		if (ret) { +			btrfs_abort_transaction(trans, root, ret); +			if (own_trans) +				btrfs_end_transaction(trans, root); +			break; +		}  		btrfs_drop_extent_cache(inode, cur_offset,  					cur_offset + ins.offset -1, 0); @@ -7350,7 +7520,13 @@ static int __btrfs_prealloc_file_range(struct inode *inode, int mode,  		}  		ret = btrfs_update_inode(trans, root, inode); -		BUG_ON(ret); + +		if (ret) { +			btrfs_abort_transaction(trans, root, ret); +			if (own_trans) +				btrfs_end_transaction(trans, root); +			break; +		}  		if (own_trans)  			btrfs_end_transaction(trans, root); diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index d8b54715c2d..20580920071 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -425,22 +425,37 @@ static noinline int create_subvol(struct btrfs_root *root,  	key.offset = (u64)-1;  	new_root = btrfs_read_fs_root_no_name(root->fs_info, &key); -	BUG_ON(IS_ERR(new_root)); +	if (IS_ERR(new_root)) { +		btrfs_abort_transaction(trans, root, PTR_ERR(new_root)); +		ret = PTR_ERR(new_root); +		goto fail; +	}  	btrfs_record_root_in_trans(trans, new_root);  	ret = btrfs_create_subvol_root(trans, new_root, new_dirid); +	if (ret) { +		/* We potentially lose an unused inode item here */ +		btrfs_abort_transaction(trans, root, ret); +		goto fail; +	} +  	/*  	 * insert the directory item  	 */  	ret = btrfs_set_inode_index(dir, &index); -	BUG_ON(ret); +	if (ret) { +		btrfs_abort_transaction(trans, root, ret); +		goto fail; +	}  	ret = btrfs_insert_dir_item(trans, root,  				    name, namelen, dir, &key,  				    BTRFS_FT_DIR, index); -	if (ret) +	if (ret) { +		btrfs_abort_transaction(trans, root, ret);  		goto fail; +	}  	btrfs_i_size_write(dir, dir->i_size + namelen * 2);  	ret = btrfs_update_inode(trans, root, dir); @@ -797,9 +812,9 @@ static int should_defrag_range(struct inode *inode, u64 start, u64 len,  	if (!em) {  		/* get the big lock and read metadata off disk */ -		lock_extent(io_tree, start, start + len - 1, GFP_NOFS); +		lock_extent(io_tree, start, start + len - 1);  		em = btrfs_get_extent(inode, NULL, 0, start, len, 0); -		unlock_extent(io_tree, start, start + len - 1, GFP_NOFS); +		unlock_extent(io_tree, start, start + len - 1);  		if (IS_ERR(em))  			return 0; @@ -887,10 +902,10 @@ again:  		page_start = page_offset(page);  		page_end = page_start + PAGE_CACHE_SIZE - 1;  		while (1) { -			lock_extent(tree, page_start, page_end, GFP_NOFS); +			lock_extent(tree, page_start, page_end);  			ordered = btrfs_lookup_ordered_extent(inode,  							      page_start); -			unlock_extent(tree, page_start, page_end, GFP_NOFS); +			unlock_extent(tree, page_start, page_end);  			if (!ordered)  				break; @@ -946,8 +961,7 @@ again:  	page_end = page_offset(pages[i_done - 1]) + PAGE_CACHE_SIZE;  	lock_extent_bits(&BTRFS_I(inode)->io_tree, -			 page_start, page_end - 1, 0, &cached_state, -			 GFP_NOFS); +			 page_start, page_end - 1, 0, &cached_state);  	clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start,  			  page_end - 1, EXTENT_DIRTY | EXTENT_DELALLOC |  			  EXTENT_DO_ACCOUNTING, 0, 0, &cached_state, @@ -1966,7 +1980,11 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file,  				dest->root_key.objectid,  				dentry->d_name.name,  				dentry->d_name.len); -	BUG_ON(ret); +	if (ret) { +		err = ret; +		btrfs_abort_transaction(trans, root, ret); +		goto out_end_trans; +	}  	btrfs_record_root_in_trans(trans, dest); @@ -1979,11 +1997,16 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file,  		ret = btrfs_insert_orphan_item(trans,  					root->fs_info->tree_root,  					dest->root_key.objectid); -		BUG_ON(ret); +		if (ret) { +			btrfs_abort_transaction(trans, root, ret); +			err = ret; +			goto out_end_trans; +		}  	} - +out_end_trans:  	ret = btrfs_end_transaction(trans, root); -	BUG_ON(ret); +	if (ret && !err) +		err = ret;  	inode->i_flags |= S_DEAD;  out_up_write:  	up_write(&root->fs_info->subvol_sem); @@ -2326,13 +2349,13 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,  	   another, and lock file content */  	while (1) {  		struct btrfs_ordered_extent *ordered; -		lock_extent(&BTRFS_I(src)->io_tree, off, off+len, GFP_NOFS); +		lock_extent(&BTRFS_I(src)->io_tree, off, off+len);  		ordered = btrfs_lookup_first_ordered_extent(src, off+len);  		if (!ordered &&  		    !test_range_bit(&BTRFS_I(src)->io_tree, off, off+len,  				   EXTENT_DELALLOC, 0, NULL))  			break; -		unlock_extent(&BTRFS_I(src)->io_tree, off, off+len, GFP_NOFS); +		unlock_extent(&BTRFS_I(src)->io_tree, off, off+len);  		if (ordered)  			btrfs_put_ordered_extent(ordered);  		btrfs_wait_ordered_range(src, off, len); @@ -2447,11 +2470,21 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,  							 new_key.offset,  							 new_key.offset + datal,  							 &hint_byte, 1); -				BUG_ON(ret); +				if (ret) { +					btrfs_abort_transaction(trans, root, +								ret); +					btrfs_end_transaction(trans, root); +					goto out; +				}  				ret = btrfs_insert_empty_item(trans, root, path,  							      &new_key, size); -				BUG_ON(ret); +				if (ret) { +					btrfs_abort_transaction(trans, root, +								ret); +					btrfs_end_transaction(trans, root); +					goto out; +				}  				leaf = path->nodes[0];  				slot = path->slots[0]; @@ -2478,7 +2511,15 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,  							btrfs_ino(inode),  							new_key.offset - datao,  							0); -					BUG_ON(ret); +					if (ret) { +						btrfs_abort_transaction(trans, +									root, +									ret); +						btrfs_end_transaction(trans, +								      root); +						goto out; + +					}  				}  			} else if (type == BTRFS_FILE_EXTENT_INLINE) {  				u64 skip = 0; @@ -2503,11 +2544,21 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,  							 new_key.offset,  							 new_key.offset + datal,  							 &hint_byte, 1); -				BUG_ON(ret); +				if (ret) { +					btrfs_abort_transaction(trans, root, +								ret); +					btrfs_end_transaction(trans, root); +					goto out; +				}  				ret = btrfs_insert_empty_item(trans, root, path,  							      &new_key, size); -				BUG_ON(ret); +				if (ret) { +					btrfs_abort_transaction(trans, root, +								ret); +					btrfs_end_transaction(trans, root); +					goto out; +				}  				if (skip) {  					u32 start = @@ -2541,8 +2592,12 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,  				btrfs_i_size_write(inode, endoff);  			ret = btrfs_update_inode(trans, root, inode); -			BUG_ON(ret); -			btrfs_end_transaction(trans, root); +			if (ret) { +				btrfs_abort_transaction(trans, root, ret); +				btrfs_end_transaction(trans, root); +				goto out; +			} +			ret = btrfs_end_transaction(trans, root);  		}  next:  		btrfs_release_path(path); @@ -2551,7 +2606,7 @@ next:  	ret = 0;  out:  	btrfs_release_path(path); -	unlock_extent(&BTRFS_I(src)->io_tree, off, off+len, GFP_NOFS); +	unlock_extent(&BTRFS_I(src)->io_tree, off, off+len);  out_unlock:  	mutex_unlock(&src->i_mutex);  	mutex_unlock(&inode->i_mutex); diff --git a/fs/btrfs/locking.c b/fs/btrfs/locking.c index 5e178d8f716..272f911203f 100644 --- a/fs/btrfs/locking.c +++ b/fs/btrfs/locking.c @@ -208,7 +208,7 @@ void btrfs_tree_read_unlock_blocking(struct extent_buffer *eb)   * take a spinning write lock.  This will wait for both   * blocking readers or writers   */ -int btrfs_tree_lock(struct extent_buffer *eb) +void btrfs_tree_lock(struct extent_buffer *eb)  {  again:  	wait_event(eb->read_lock_wq, atomic_read(&eb->blocking_readers) == 0); @@ -230,13 +230,12 @@ again:  	atomic_inc(&eb->spinning_writers);  	atomic_inc(&eb->write_locks);  	eb->lock_owner = current->pid; -	return 0;  }  /*   * drop a spinning or a blocking write lock.   */ -int btrfs_tree_unlock(struct extent_buffer *eb) +void btrfs_tree_unlock(struct extent_buffer *eb)  {  	int blockers = atomic_read(&eb->blocking_writers); @@ -255,7 +254,6 @@ int btrfs_tree_unlock(struct extent_buffer *eb)  		atomic_dec(&eb->spinning_writers);  		write_unlock(&eb->lock);  	} -	return 0;  }  void btrfs_assert_tree_locked(struct extent_buffer *eb) diff --git a/fs/btrfs/locking.h b/fs/btrfs/locking.h index 17247ddb81a..ca52681e5f4 100644 --- a/fs/btrfs/locking.h +++ b/fs/btrfs/locking.h @@ -24,8 +24,8 @@  #define BTRFS_WRITE_LOCK_BLOCKING 3  #define BTRFS_READ_LOCK_BLOCKING 4 -int btrfs_tree_lock(struct extent_buffer *eb); -int btrfs_tree_unlock(struct extent_buffer *eb); +void btrfs_tree_lock(struct extent_buffer *eb); +void btrfs_tree_unlock(struct extent_buffer *eb);  int btrfs_try_spin_lock(struct extent_buffer *eb);  void btrfs_tree_read_lock(struct extent_buffer *eb); diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c index a1c94042530..bbf6d0d9aeb 100644 --- a/fs/btrfs/ordered-data.c +++ b/fs/btrfs/ordered-data.c @@ -59,6 +59,14 @@ static struct rb_node *tree_insert(struct rb_root *root, u64 file_offset,  	return NULL;  } +static void ordered_data_tree_panic(struct inode *inode, int errno, +					       u64 offset) +{ +	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); +	btrfs_panic(fs_info, errno, "Inconsistency in ordered tree at offset " +		    "%llu\n", (unsigned long long)offset); +} +  /*   * look for a given offset in the tree, and if it can't be found return the   * first lesser offset @@ -207,7 +215,8 @@ static int __btrfs_add_ordered_extent(struct inode *inode, u64 file_offset,  	spin_lock(&tree->lock);  	node = tree_insert(&tree->tree, file_offset,  			   &entry->rb_node); -	BUG_ON(node); +	if (node) +		ordered_data_tree_panic(inode, -EEXIST, file_offset);  	spin_unlock(&tree->lock);  	spin_lock(&BTRFS_I(inode)->root->fs_info->ordered_extent_lock); @@ -215,7 +224,6 @@ static int __btrfs_add_ordered_extent(struct inode *inode, u64 file_offset,  		      &BTRFS_I(inode)->root->fs_info->ordered_extents);  	spin_unlock(&BTRFS_I(inode)->root->fs_info->ordered_extent_lock); -	BUG_ON(node);  	return 0;  } @@ -249,9 +257,9 @@ int btrfs_add_ordered_extent_compress(struct inode *inode, u64 file_offset,   * when an ordered extent is finished.  If the list covers more than one   * ordered extent, it is split across multiples.   */ -int btrfs_add_ordered_sum(struct inode *inode, -			  struct btrfs_ordered_extent *entry, -			  struct btrfs_ordered_sum *sum) +void btrfs_add_ordered_sum(struct inode *inode, +			   struct btrfs_ordered_extent *entry, +			   struct btrfs_ordered_sum *sum)  {  	struct btrfs_ordered_inode_tree *tree; @@ -259,7 +267,6 @@ int btrfs_add_ordered_sum(struct inode *inode,  	spin_lock(&tree->lock);  	list_add_tail(&sum->list, &entry->list);  	spin_unlock(&tree->lock); -	return 0;  }  /* @@ -384,7 +391,7 @@ out:   * used to drop a reference on an ordered extent.  This will free   * the extent if the last reference is dropped   */ -int btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry) +void btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry)  {  	struct list_head *cur;  	struct btrfs_ordered_sum *sum; @@ -400,7 +407,6 @@ int btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry)  		}  		kfree(entry);  	} -	return 0;  }  /* @@ -408,8 +414,8 @@ int btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry)   * and you must wake_up entry->wait.  You must hold the tree lock   * while you call this function.   */ -static int __btrfs_remove_ordered_extent(struct inode *inode, -				struct btrfs_ordered_extent *entry) +static void __btrfs_remove_ordered_extent(struct inode *inode, +					  struct btrfs_ordered_extent *entry)  {  	struct btrfs_ordered_inode_tree *tree;  	struct btrfs_root *root = BTRFS_I(inode)->root; @@ -436,35 +442,30 @@ static int __btrfs_remove_ordered_extent(struct inode *inode,  		list_del_init(&BTRFS_I(inode)->ordered_operations);  	}  	spin_unlock(&root->fs_info->ordered_extent_lock); - -	return 0;  }  /*   * remove an ordered extent from the tree.  No references are dropped   * but any waiters are woken.   */ -int btrfs_remove_ordered_extent(struct inode *inode, -				struct btrfs_ordered_extent *entry) +void btrfs_remove_ordered_extent(struct inode *inode, +				 struct btrfs_ordered_extent *entry)  {  	struct btrfs_ordered_inode_tree *tree; -	int ret;  	tree = &BTRFS_I(inode)->ordered_tree;  	spin_lock(&tree->lock); -	ret = __btrfs_remove_ordered_extent(inode, entry); +	__btrfs_remove_ordered_extent(inode, entry);  	spin_unlock(&tree->lock);  	wake_up(&entry->wait); - -	return ret;  }  /*   * wait for all the ordered extents in a root.  This is done when balancing   * space between drives.   */ -int btrfs_wait_ordered_extents(struct btrfs_root *root, -			       int nocow_only, int delay_iput) +void btrfs_wait_ordered_extents(struct btrfs_root *root, +				int nocow_only, int delay_iput)  {  	struct list_head splice;  	struct list_head *cur; @@ -512,7 +513,6 @@ int btrfs_wait_ordered_extents(struct btrfs_root *root,  		spin_lock(&root->fs_info->ordered_extent_lock);  	}  	spin_unlock(&root->fs_info->ordered_extent_lock); -	return 0;  }  /* @@ -525,7 +525,7 @@ int btrfs_wait_ordered_extents(struct btrfs_root *root,   * extra check to make sure the ordered operation list really is empty   * before we return   */ -int btrfs_run_ordered_operations(struct btrfs_root *root, int wait) +void btrfs_run_ordered_operations(struct btrfs_root *root, int wait)  {  	struct btrfs_inode *btrfs_inode;  	struct inode *inode; @@ -573,8 +573,6 @@ again:  	spin_unlock(&root->fs_info->ordered_extent_lock);  	mutex_unlock(&root->fs_info->ordered_operations_mutex); - -	return 0;  }  /* @@ -609,7 +607,7 @@ void btrfs_start_ordered_extent(struct inode *inode,  /*   * Used to wait on ordered extents across a large range of bytes.   */ -int btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len) +void btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len)  {  	u64 end;  	u64 orig_end; @@ -664,7 +662,6 @@ again:  		schedule_timeout(1);  		goto again;  	} -	return 0;  }  /* @@ -948,9 +945,8 @@ out:   * If trans is not null, we'll do a friendly check for a transaction that   * is already flushing things and force the IO down ourselves.   */ -int btrfs_add_ordered_operation(struct btrfs_trans_handle *trans, -				struct btrfs_root *root, -				struct inode *inode) +void btrfs_add_ordered_operation(struct btrfs_trans_handle *trans, +				 struct btrfs_root *root, struct inode *inode)  {  	u64 last_mod; @@ -961,7 +957,7 @@ int btrfs_add_ordered_operation(struct btrfs_trans_handle *trans,  	 * commit, we can safely return without doing anything  	 */  	if (last_mod < root->fs_info->last_trans_committed) -		return 0; +		return;  	/*  	 * the transaction is already committing.  Just start the IO and @@ -969,7 +965,7 @@ int btrfs_add_ordered_operation(struct btrfs_trans_handle *trans,  	 */  	if (trans && root->fs_info->running_transaction->blocked) {  		btrfs_wait_ordered_range(inode, 0, (u64)-1); -		return 0; +		return;  	}  	spin_lock(&root->fs_info->ordered_extent_lock); @@ -978,6 +974,4 @@ int btrfs_add_ordered_operation(struct btrfs_trans_handle *trans,  			      &root->fs_info->ordered_operations);  	}  	spin_unlock(&root->fs_info->ordered_extent_lock); - -	return 0;  } diff --git a/fs/btrfs/ordered-data.h b/fs/btrfs/ordered-data.h index ff1f69aa188..c355ad4dc1a 100644 --- a/fs/btrfs/ordered-data.h +++ b/fs/btrfs/ordered-data.h @@ -138,8 +138,8 @@ btrfs_ordered_inode_tree_init(struct btrfs_ordered_inode_tree *t)  	t->last = NULL;  } -int btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry); -int btrfs_remove_ordered_extent(struct inode *inode, +void btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry); +void btrfs_remove_ordered_extent(struct inode *inode,  				struct btrfs_ordered_extent *entry);  int btrfs_dec_test_ordered_pending(struct inode *inode,  				   struct btrfs_ordered_extent **cached, @@ -154,14 +154,14 @@ int btrfs_add_ordered_extent_dio(struct inode *inode, u64 file_offset,  int btrfs_add_ordered_extent_compress(struct inode *inode, u64 file_offset,  				      u64 start, u64 len, u64 disk_len,  				      int type, int compress_type); -int btrfs_add_ordered_sum(struct inode *inode, -			  struct btrfs_ordered_extent *entry, -			  struct btrfs_ordered_sum *sum); +void btrfs_add_ordered_sum(struct inode *inode, +			   struct btrfs_ordered_extent *entry, +			   struct btrfs_ordered_sum *sum);  struct btrfs_ordered_extent *btrfs_lookup_ordered_extent(struct inode *inode,  							 u64 file_offset);  void btrfs_start_ordered_extent(struct inode *inode,  				struct btrfs_ordered_extent *entry, int wait); -int btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len); +void btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len);  struct btrfs_ordered_extent *  btrfs_lookup_first_ordered_extent(struct inode * inode, u64 file_offset);  struct btrfs_ordered_extent *btrfs_lookup_ordered_range(struct inode *inode, @@ -170,10 +170,10 @@ struct btrfs_ordered_extent *btrfs_lookup_ordered_range(struct inode *inode,  int btrfs_ordered_update_i_size(struct inode *inode, u64 offset,  				struct btrfs_ordered_extent *ordered);  int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u64 disk_bytenr, u32 *sum); -int btrfs_run_ordered_operations(struct btrfs_root *root, int wait); -int btrfs_add_ordered_operation(struct btrfs_trans_handle *trans, -				struct btrfs_root *root, -				struct inode *inode); -int btrfs_wait_ordered_extents(struct btrfs_root *root, -			       int nocow_only, int delay_iput); +void btrfs_run_ordered_operations(struct btrfs_root *root, int wait); +void btrfs_add_ordered_operation(struct btrfs_trans_handle *trans, +				 struct btrfs_root *root, +				 struct inode *inode); +void btrfs_wait_ordered_extents(struct btrfs_root *root, +				int nocow_only, int delay_iput);  #endif diff --git a/fs/btrfs/orphan.c b/fs/btrfs/orphan.c index f8be250963a..24cad1695af 100644 --- a/fs/btrfs/orphan.c +++ b/fs/btrfs/orphan.c @@ -58,7 +58,7 @@ int btrfs_del_orphan_item(struct btrfs_trans_handle *trans,  	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);  	if (ret < 0)  		goto out; -	if (ret) { +	if (ret) { /* JDM: Really? */  		ret = -ENOENT;  		goto out;  	} diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c index 8c1aae2c845..017281dbb2a 100644 --- a/fs/btrfs/relocation.c +++ b/fs/btrfs/relocation.c @@ -326,6 +326,19 @@ static struct rb_node *tree_search(struct rb_root *root, u64 bytenr)  	return NULL;  } +void backref_tree_panic(struct rb_node *rb_node, int errno, +					  u64 bytenr) +{ + +	struct btrfs_fs_info *fs_info = NULL; +	struct backref_node *bnode = rb_entry(rb_node, struct backref_node, +					      rb_node); +	if (bnode->root) +		fs_info = bnode->root->fs_info; +	btrfs_panic(fs_info, errno, "Inconsistency in backref cache " +		    "found at offset %llu\n", (unsigned long long)bytenr); +} +  /*   * walk up backref nodes until reach node presents tree root   */ @@ -452,7 +465,8 @@ static void update_backref_node(struct backref_cache *cache,  	rb_erase(&node->rb_node, &cache->rb_root);  	node->bytenr = bytenr;  	rb_node = tree_insert(&cache->rb_root, node->bytenr, &node->rb_node); -	BUG_ON(rb_node); +	if (rb_node) +		backref_tree_panic(rb_node, -EEXIST, bytenr);  }  /* @@ -999,7 +1013,8 @@ next:  	if (!cowonly) {  		rb_node = tree_insert(&cache->rb_root, node->bytenr,  				      &node->rb_node); -		BUG_ON(rb_node); +		if (rb_node) +			backref_tree_panic(rb_node, -EEXIST, node->bytenr);  		list_add_tail(&node->lower, &cache->leaves);  	} @@ -1034,7 +1049,9 @@ next:  		if (!cowonly) {  			rb_node = tree_insert(&cache->rb_root, upper->bytenr,  					      &upper->rb_node); -			BUG_ON(rb_node); +			if (rb_node) +				backref_tree_panic(rb_node, -EEXIST, +						   upper->bytenr);  		}  		list_add_tail(&edge->list[UPPER], &upper->lower); @@ -1180,7 +1197,8 @@ static int clone_backref_node(struct btrfs_trans_handle *trans,  	rb_node = tree_insert(&cache->rb_root, new_node->bytenr,  			      &new_node->rb_node); -	BUG_ON(rb_node); +	if (rb_node) +		backref_tree_panic(rb_node, -EEXIST, new_node->bytenr);  	if (!new_node->lowest) {  		list_for_each_entry(new_edge, &new_node->lower, list[UPPER]) { @@ -1203,14 +1221,15 @@ fail:  /*   * helper to add 'address of tree root -> reloc tree' mapping   */ -static int __add_reloc_root(struct btrfs_root *root) +static int __must_check __add_reloc_root(struct btrfs_root *root)  {  	struct rb_node *rb_node;  	struct mapping_node *node;  	struct reloc_control *rc = root->fs_info->reloc_ctl;  	node = kmalloc(sizeof(*node), GFP_NOFS); -	BUG_ON(!node); +	if (!node) +		return -ENOMEM;  	node->bytenr = root->node->start;  	node->data = root; @@ -1219,7 +1238,12 @@ static int __add_reloc_root(struct btrfs_root *root)  	rb_node = tree_insert(&rc->reloc_root_tree.rb_root,  			      node->bytenr, &node->rb_node);  	spin_unlock(&rc->reloc_root_tree.lock); -	BUG_ON(rb_node); +	if (rb_node) { +		kfree(node); +		btrfs_panic(root->fs_info, -EEXIST, "Duplicate root found " +			    "for start=%llu while inserting into relocation " +			    "tree\n"); +	}  	list_add_tail(&root->root_list, &rc->reloc_roots);  	return 0; @@ -1252,7 +1276,8 @@ static int __update_reloc_root(struct btrfs_root *root, int del)  		rb_node = tree_insert(&rc->reloc_root_tree.rb_root,  				      node->bytenr, &node->rb_node);  		spin_unlock(&rc->reloc_root_tree.lock); -		BUG_ON(rb_node); +		if (rb_node) +			backref_tree_panic(rb_node, -EEXIST, node->bytenr);  	} else {  		list_del_init(&root->root_list);  		kfree(node); @@ -1334,6 +1359,7 @@ int btrfs_init_reloc_root(struct btrfs_trans_handle *trans,  	struct btrfs_root *reloc_root;  	struct reloc_control *rc = root->fs_info->reloc_ctl;  	int clear_rsv = 0; +	int ret;  	if (root->reloc_root) {  		reloc_root = root->reloc_root; @@ -1353,7 +1379,8 @@ int btrfs_init_reloc_root(struct btrfs_trans_handle *trans,  	if (clear_rsv)  		trans->block_rsv = NULL; -	__add_reloc_root(reloc_root); +	ret = __add_reloc_root(reloc_root); +	BUG_ON(ret < 0);  	root->reloc_root = reloc_root;  	return 0;  } @@ -1577,15 +1604,14 @@ int replace_file_extents(struct btrfs_trans_handle *trans,  				WARN_ON(!IS_ALIGNED(end, root->sectorsize));  				end--;  				ret = try_lock_extent(&BTRFS_I(inode)->io_tree, -						      key.offset, end, -						      GFP_NOFS); +						      key.offset, end);  				if (!ret)  					continue;  				btrfs_drop_extent_cache(inode, key.offset, end,  							1);  				unlock_extent(&BTRFS_I(inode)->io_tree, -					      key.offset, end, GFP_NOFS); +					      key.offset, end);  			}  		} @@ -1956,9 +1982,9 @@ static int invalidate_extent_cache(struct btrfs_root *root,  		}  		/* the lock_extent waits for readpage to complete */ -		lock_extent(&BTRFS_I(inode)->io_tree, start, end, GFP_NOFS); +		lock_extent(&BTRFS_I(inode)->io_tree, start, end);  		btrfs_drop_extent_cache(inode, start, end, 1); -		unlock_extent(&BTRFS_I(inode)->io_tree, start, end, GFP_NOFS); +		unlock_extent(&BTRFS_I(inode)->io_tree, start, end);  	}  	return 0;  } @@ -2246,7 +2272,8 @@ again:  		} else {  			list_del_init(&reloc_root->root_list);  		} -		btrfs_drop_snapshot(reloc_root, rc->block_rsv, 0, 1); +		ret = btrfs_drop_snapshot(reloc_root, rc->block_rsv, 0, 1); +		BUG_ON(ret < 0);  	}  	if (found) { @@ -2862,12 +2889,12 @@ int prealloc_file_extent_cluster(struct inode *inode,  		else  			end = cluster->end - offset; -		lock_extent(&BTRFS_I(inode)->io_tree, start, end, GFP_NOFS); +		lock_extent(&BTRFS_I(inode)->io_tree, start, end);  		num_bytes = end + 1 - start;  		ret = btrfs_prealloc_file_range(inode, 0, start,  						num_bytes, num_bytes,  						end + 1, &alloc_hint); -		unlock_extent(&BTRFS_I(inode)->io_tree, start, end, GFP_NOFS); +		unlock_extent(&BTRFS_I(inode)->io_tree, start, end);  		if (ret)  			break;  		nr++; @@ -2899,7 +2926,7 @@ int setup_extent_mapping(struct inode *inode, u64 start, u64 end,  	em->bdev = root->fs_info->fs_devices->latest_bdev;  	set_bit(EXTENT_FLAG_PINNED, &em->flags); -	lock_extent(&BTRFS_I(inode)->io_tree, start, end, GFP_NOFS); +	lock_extent(&BTRFS_I(inode)->io_tree, start, end);  	while (1) {  		write_lock(&em_tree->lock);  		ret = add_extent_mapping(em_tree, em); @@ -2910,7 +2937,7 @@ int setup_extent_mapping(struct inode *inode, u64 start, u64 end,  		}  		btrfs_drop_extent_cache(inode, start, end, 0);  	} -	unlock_extent(&BTRFS_I(inode)->io_tree, start, end, GFP_NOFS); +	unlock_extent(&BTRFS_I(inode)->io_tree, start, end);  	return ret;  } @@ -2990,8 +3017,7 @@ static int relocate_file_extent_cluster(struct inode *inode,  		page_start = (u64)page->index << PAGE_CACHE_SHIFT;  		page_end = page_start + PAGE_CACHE_SIZE - 1; -		lock_extent(&BTRFS_I(inode)->io_tree, -			    page_start, page_end, GFP_NOFS); +		lock_extent(&BTRFS_I(inode)->io_tree, page_start, page_end);  		set_page_extent_mapped(page); @@ -3007,7 +3033,7 @@ static int relocate_file_extent_cluster(struct inode *inode,  		set_page_dirty(page);  		unlock_extent(&BTRFS_I(inode)->io_tree, -			      page_start, page_end, GFP_NOFS); +			      page_start, page_end);  		unlock_page(page);  		page_cache_release(page); @@ -3154,7 +3180,8 @@ static int add_tree_block(struct reloc_control *rc,  	block->key_ready = 0;  	rb_node = tree_insert(blocks, block->bytenr, &block->rb_node); -	BUG_ON(rb_node); +	if (rb_node) +		backref_tree_panic(rb_node, -EEXIST, block->bytenr);  	return 0;  } @@ -3426,7 +3453,9 @@ static int find_data_references(struct reloc_control *rc,  			block->key_ready = 1;  			rb_node = tree_insert(blocks, block->bytenr,  					      &block->rb_node); -			BUG_ON(rb_node); +			if (rb_node) +				backref_tree_panic(rb_node, -EEXIST, +						   block->bytenr);  		}  		if (counted)  			added = 1; @@ -4073,10 +4102,11 @@ out:  static noinline_for_stack int mark_garbage_root(struct btrfs_root *root)  {  	struct btrfs_trans_handle *trans; -	int ret; +	int ret, err;  	trans = btrfs_start_transaction(root->fs_info->tree_root, 0); -	BUG_ON(IS_ERR(trans)); +	if (IS_ERR(trans)) +		return PTR_ERR(trans);  	memset(&root->root_item.drop_progress, 0,  		sizeof(root->root_item.drop_progress)); @@ -4084,11 +4114,11 @@ static noinline_for_stack int mark_garbage_root(struct btrfs_root *root)  	btrfs_set_root_refs(&root->root_item, 0);  	ret = btrfs_update_root(trans, root->fs_info->tree_root,  				&root->root_key, &root->root_item); -	BUG_ON(ret); -	ret = btrfs_end_transaction(trans, root->fs_info->tree_root); -	BUG_ON(ret); -	return 0; +	err = btrfs_end_transaction(trans, root->fs_info->tree_root); +	if (err) +		return err; +	return ret;  }  /* @@ -4156,7 +4186,11 @@ int btrfs_recover_relocation(struct btrfs_root *root)  					err = ret;  					goto out;  				} -				mark_garbage_root(reloc_root); +				ret = mark_garbage_root(reloc_root); +				if (ret < 0) { +					err = ret; +					goto out; +				}  			}  		} @@ -4202,13 +4236,19 @@ int btrfs_recover_relocation(struct btrfs_root *root)  		fs_root = read_fs_root(root->fs_info,  				       reloc_root->root_key.offset); -		BUG_ON(IS_ERR(fs_root)); +		if (IS_ERR(fs_root)) { +			err = PTR_ERR(fs_root); +			goto out_free; +		} -		__add_reloc_root(reloc_root); +		err = __add_reloc_root(reloc_root); +		BUG_ON(err < 0); /* -ENOMEM or logic error */  		fs_root->reloc_root = reloc_root;  	} -	btrfs_commit_transaction(trans, rc->extent_root); +	err = btrfs_commit_transaction(trans, rc->extent_root); +	if (err) +		goto out_free;  	merge_reloc_roots(rc); @@ -4218,7 +4258,7 @@ int btrfs_recover_relocation(struct btrfs_root *root)  	if (IS_ERR(trans))  		err = PTR_ERR(trans);  	else -		btrfs_commit_transaction(trans, rc->extent_root); +		err = btrfs_commit_transaction(trans, rc->extent_root);  out_free:  	kfree(rc);  out: @@ -4267,6 +4307,8 @@ int btrfs_reloc_clone_csums(struct inode *inode, u64 file_pos, u64 len)  	disk_bytenr = file_pos + BTRFS_I(inode)->index_cnt;  	ret = btrfs_lookup_csums_range(root->fs_info->csum_root, disk_bytenr,  				       disk_bytenr + len - 1, &list, 0); +	if (ret) +		goto out;  	while (!list_empty(&list)) {  		sums = list_entry(list.next, struct btrfs_ordered_sum, list); @@ -4284,6 +4326,7 @@ int btrfs_reloc_clone_csums(struct inode *inode, u64 file_pos, u64 len)  		btrfs_add_ordered_sum(inode, ordered, sums);  	} +out:  	btrfs_put_ordered_extent(ordered);  	return ret;  } @@ -4380,7 +4423,7 @@ void btrfs_reloc_pre_snapshot(struct btrfs_trans_handle *trans,   * called after snapshot is created. migrate block reservation   * and create reloc root for the newly created snapshot   */ -void btrfs_reloc_post_snapshot(struct btrfs_trans_handle *trans, +int btrfs_reloc_post_snapshot(struct btrfs_trans_handle *trans,  			       struct btrfs_pending_snapshot *pending)  {  	struct btrfs_root *root = pending->root; @@ -4390,7 +4433,7 @@ void btrfs_reloc_post_snapshot(struct btrfs_trans_handle *trans,  	int ret;  	if (!root->reloc_root) -		return; +		return 0;  	rc = root->fs_info->reloc_ctl;  	rc->merging_rsv_size += rc->nodes_relocated; @@ -4399,18 +4442,21 @@ void btrfs_reloc_post_snapshot(struct btrfs_trans_handle *trans,  		ret = btrfs_block_rsv_migrate(&pending->block_rsv,  					      rc->block_rsv,  					      rc->nodes_relocated); -		BUG_ON(ret); +		if (ret) +			return ret;  	}  	new_root = pending->snap;  	reloc_root = create_reloc_root(trans, root->reloc_root,  				       new_root->root_key.objectid); +	if (IS_ERR(reloc_root)) +		return PTR_ERR(reloc_root); -	__add_reloc_root(reloc_root); +	ret = __add_reloc_root(reloc_root); +	BUG_ON(ret < 0);  	new_root->reloc_root = reloc_root; -	if (rc->create_reloc_tree) { +	if (rc->create_reloc_tree)  		ret = clone_backref_node(trans, rc, root, reloc_root); -		BUG_ON(ret); -	} +	return ret;  } diff --git a/fs/btrfs/root-tree.c b/fs/btrfs/root-tree.c index f4099904565..24fb8ce4e07 100644 --- a/fs/btrfs/root-tree.c +++ b/fs/btrfs/root-tree.c @@ -93,10 +93,14 @@ int btrfs_update_root(struct btrfs_trans_handle *trans, struct btrfs_root  	unsigned long ptr;  	path = btrfs_alloc_path(); -	BUG_ON(!path); +	if (!path) +		return -ENOMEM; +  	ret = btrfs_search_slot(trans, root, key, path, 0, 1); -	if (ret < 0) +	if (ret < 0) { +		btrfs_abort_transaction(trans, root, ret);  		goto out; +	}  	if (ret != 0) {  		btrfs_print_leaf(root, path->nodes[0]); @@ -116,13 +120,10 @@ out:  	return ret;  } -int btrfs_insert_root(struct btrfs_trans_handle *trans, struct btrfs_root -		      *root, struct btrfs_key *key, struct btrfs_root_item -		      *item) +int btrfs_insert_root(struct btrfs_trans_handle *trans, struct btrfs_root *root, +		      struct btrfs_key *key, struct btrfs_root_item *item)  { -	int ret; -	ret = btrfs_insert_item(trans, root, key, item, sizeof(*item)); -	return ret; +	return btrfs_insert_item(trans, root, key, item, sizeof(*item));  }  /* @@ -384,6 +385,8 @@ int btrfs_find_root_ref(struct btrfs_root *tree_root,   *   * For a back ref the root_id is the id of the subvol or snapshot and   * ref_id is the id of the tree referencing it. + * + * Will return 0, -ENOMEM, or anything from the CoW path   */  int btrfs_add_root_ref(struct btrfs_trans_handle *trans,  		       struct btrfs_root *tree_root, @@ -407,7 +410,11 @@ int btrfs_add_root_ref(struct btrfs_trans_handle *trans,  again:  	ret = btrfs_insert_empty_item(trans, tree_root, path, &key,  				      sizeof(*ref) + name_len); -	BUG_ON(ret); +	if (ret) { +		btrfs_abort_transaction(trans, tree_root, ret); +		btrfs_free_path(path); +		return ret; +	}  	leaf = path->nodes[0];  	ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_ref); diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c index 5221e072bb6..07e59d97551 100644 --- a/fs/btrfs/scrub.c +++ b/fs/btrfs/scrub.c @@ -2157,6 +2157,9 @@ static noinline_for_stack int scrub_supers(struct scrub_dev *sdev)  	struct btrfs_device *device = sdev->dev;  	struct btrfs_root *root = device->dev_root; +	if (root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) +		return -EIO; +  	gen = root->fs_info->last_trans_committed;  	for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) { @@ -2317,7 +2320,7 @@ int btrfs_scrub_dev(struct btrfs_root *root, u64 devid, u64 start, u64 end,  	return ret;  } -int btrfs_scrub_pause(struct btrfs_root *root) +void btrfs_scrub_pause(struct btrfs_root *root)  {  	struct btrfs_fs_info *fs_info = root->fs_info; @@ -2332,34 +2335,28 @@ int btrfs_scrub_pause(struct btrfs_root *root)  		mutex_lock(&fs_info->scrub_lock);  	}  	mutex_unlock(&fs_info->scrub_lock); - -	return 0;  } -int btrfs_scrub_continue(struct btrfs_root *root) +void btrfs_scrub_continue(struct btrfs_root *root)  {  	struct btrfs_fs_info *fs_info = root->fs_info;  	atomic_dec(&fs_info->scrub_pause_req);  	wake_up(&fs_info->scrub_pause_wait); -	return 0;  } -int btrfs_scrub_pause_super(struct btrfs_root *root) +void btrfs_scrub_pause_super(struct btrfs_root *root)  {  	down_write(&root->fs_info->scrub_super_lock); -	return 0;  } -int btrfs_scrub_continue_super(struct btrfs_root *root) +void btrfs_scrub_continue_super(struct btrfs_root *root)  {  	up_write(&root->fs_info->scrub_super_lock); -	return 0;  } -int btrfs_scrub_cancel(struct btrfs_root *root) +int __btrfs_scrub_cancel(struct btrfs_fs_info *fs_info)  { -	struct btrfs_fs_info *fs_info = root->fs_info;  	mutex_lock(&fs_info->scrub_lock);  	if (!atomic_read(&fs_info->scrubs_running)) { @@ -2380,6 +2377,11 @@ int btrfs_scrub_cancel(struct btrfs_root *root)  	return 0;  } +int btrfs_scrub_cancel(struct btrfs_root *root) +{ +	return __btrfs_scrub_cancel(root->fs_info); +} +  int btrfs_scrub_cancel_dev(struct btrfs_root *root, struct btrfs_device *dev)  {  	struct btrfs_fs_info *fs_info = root->fs_info; diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index 3ce97b217cb..9db64165123 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c @@ -76,6 +76,9 @@ static const char *btrfs_decode_error(struct btrfs_fs_info *fs_info, int errno,  	case -EROFS:  		errstr = "Readonly filesystem";  		break; +	case -EEXIST: +		errstr = "Object already exists"; +		break;  	default:  		if (nbuf) {  			if (snprintf(nbuf, 16, "error %d", -errno) >= 0) @@ -116,6 +119,8 @@ static void btrfs_handle_error(struct btrfs_fs_info *fs_info)  	if (fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) {  		sb->s_flags |= MS_RDONLY;  		printk(KERN_INFO "btrfs is forced readonly\n"); +		__btrfs_scrub_cancel(fs_info); +//		WARN_ON(1);  	}  } @@ -124,25 +129,132 @@ static void btrfs_handle_error(struct btrfs_fs_info *fs_info)   * invokes the approciate error response.   */  void __btrfs_std_error(struct btrfs_fs_info *fs_info, const char *function, -		     unsigned int line, int errno) +		       unsigned int line, int errno, const char *fmt, ...)  {  	struct super_block *sb = fs_info->sb;  	char nbuf[16];  	const char *errstr; +	va_list args; +	va_start(args, fmt);  	/*  	 * Special case: if the error is EROFS, and we're already  	 * under MS_RDONLY, then it is safe here.  	 */  	if (errno == -EROFS && (sb->s_flags & MS_RDONLY)) +  		return; + +  	errstr = btrfs_decode_error(fs_info, errno, nbuf); +	if (fmt) { +		struct va_format vaf = { +			.fmt = fmt, +			.va = &args, +		}; + +		printk(KERN_CRIT "BTRFS error (device %s) in %s:%d: %s (%pV)\n", +			sb->s_id, function, line, errstr, &vaf); +	} else { +		printk(KERN_CRIT "BTRFS error (device %s) in %s:%d: %s\n", +			sb->s_id, function, line, errstr); +	} + +	/* Don't go through full error handling during mount */ +	if (sb->s_flags & MS_BORN) { +		save_error_info(fs_info); +		btrfs_handle_error(fs_info); +	} +	va_end(args); +} + +const char *logtypes[] = { +	"emergency", +	"alert", +	"critical", +	"error", +	"warning", +	"notice", +	"info", +	"debug", +}; + +void btrfs_printk(struct btrfs_fs_info *fs_info, const char *fmt, ...) +{ +	struct super_block *sb = fs_info->sb; +	char lvl[4]; +	struct va_format vaf; +	va_list args; +	const char *type = logtypes[4]; + +	va_start(args, fmt); + +	if (fmt[0] == '<' && isdigit(fmt[1]) && fmt[2] == '>') { +		strncpy(lvl, fmt, 3); +		fmt += 3; +		type = logtypes[fmt[1] - '0']; +	} else +		*lvl = '\0'; + +	vaf.fmt = fmt; +	vaf.va = &args; +	printk("%sBTRFS %s (device %s): %pV", lvl, type, sb->s_id, &vaf); +} + +/* + * We only mark the transaction aborted and then set the file system read-only. + * This will prevent new transactions from starting or trying to join this + * one. + * + * This means that error recovery at the call site is limited to freeing + * any local memory allocations and passing the error code up without + * further cleanup. The transaction should complete as it normally would + * in the call path but will return -EIO. + * + * We'll complete the cleanup in btrfs_end_transaction and + * btrfs_commit_transaction. + */ +void __btrfs_abort_transaction(struct btrfs_trans_handle *trans, +			       struct btrfs_root *root, const char *function, +			       unsigned int line, int errno) +{ +	WARN_ONCE(1, KERN_DEBUG "btrfs: Transaction aborted"); +	trans->aborted = errno; +	/* Nothing used. The other threads that have joined this +	 * transaction may be able to continue. */ +	if (!trans->blocks_used) { +		btrfs_printk(root->fs_info, "Aborting unused transaction.\n");  		return; +	} +	trans->transaction->aborted = errno; +	__btrfs_std_error(root->fs_info, function, line, errno, NULL); +} +/* + * __btrfs_panic decodes unexpected, fatal errors from the caller, + * issues an alert, and either panics or BUGs, depending on mount options. + */ +void __btrfs_panic(struct btrfs_fs_info *fs_info, const char *function, +		   unsigned int line, int errno, const char *fmt, ...) +{ +	char nbuf[16]; +	char *s_id = "<unknown>"; +	const char *errstr; +	struct va_format vaf = { .fmt = fmt }; +	va_list args; + +	if (fs_info) +		s_id = fs_info->sb->s_id; + +	va_start(args, fmt); +	vaf.va = &args;  	errstr = btrfs_decode_error(fs_info, errno, nbuf); -	printk(KERN_CRIT "BTRFS error (device %s) in %s:%d: %s\n", -		sb->s_id, function, line, errstr); -	save_error_info(fs_info); +	if (fs_info->mount_opt & BTRFS_MOUNT_PANIC_ON_FATAL_ERROR) +		panic(KERN_CRIT "BTRFS panic (device %s) in %s:%d: %pV (%s)\n", +			s_id, function, line, &vaf, errstr); -	btrfs_handle_error(fs_info); +	printk(KERN_CRIT "BTRFS panic (device %s) in %s:%d: %pV (%s)\n", +	       s_id, function, line, &vaf, errstr); +	va_end(args); +	/* Caller calls BUG() */  }  static void btrfs_put_super(struct super_block *sb) @@ -166,7 +278,7 @@ enum {  	Opt_enospc_debug, Opt_subvolrootid, Opt_defrag, Opt_inode_cache,  	Opt_no_space_cache, Opt_recovery, Opt_skip_balance,  	Opt_check_integrity, Opt_check_integrity_including_extent_data, -	Opt_check_integrity_print_mask, +	Opt_check_integrity_print_mask, Opt_fatal_errors,  	Opt_err,  }; @@ -206,12 +318,14 @@ static match_table_t tokens = {  	{Opt_check_integrity, "check_int"},  	{Opt_check_integrity_including_extent_data, "check_int_data"},  	{Opt_check_integrity_print_mask, "check_int_print_mask=%d"}, +	{Opt_fatal_errors, "fatal_errors=%s"},  	{Opt_err, NULL},  };  /*   * Regular mount options parser.  Everything that is needed only when   * reading in a new superblock is parsed here. + * XXX JDM: This needs to be cleaned up for remount.   */  int btrfs_parse_options(struct btrfs_root *root, char *options)  { @@ -438,6 +552,18 @@ int btrfs_parse_options(struct btrfs_root *root, char *options)  			ret = -EINVAL;  			goto out;  #endif +		case Opt_fatal_errors: +			if (strcmp(args[0].from, "panic") == 0) +				btrfs_set_opt(info->mount_opt, +					      PANIC_ON_FATAL_ERROR); +			else if (strcmp(args[0].from, "bug") == 0) +				btrfs_clear_opt(info->mount_opt, +					      PANIC_ON_FATAL_ERROR); +			else { +				ret = -EINVAL; +				goto out; +			} +			break;  		case Opt_err:  			printk(KERN_INFO "btrfs: unrecognized mount option "  			       "'%s'\n", p); @@ -766,6 +892,8 @@ static int btrfs_show_options(struct seq_file *seq, struct dentry *dentry)  		seq_puts(seq, ",inode_cache");  	if (btrfs_test_opt(root, SKIP_BALANCE))  		seq_puts(seq, ",skip_balance"); +	if (btrfs_test_opt(root, PANIC_ON_FATAL_ERROR)) +		seq_puts(seq, ",fatal_errors=panic");  	return 0;  } @@ -999,11 +1127,20 @@ static int btrfs_remount(struct super_block *sb, int *flags, char *data)  {  	struct btrfs_fs_info *fs_info = btrfs_sb(sb);  	struct btrfs_root *root = fs_info->tree_root; +	unsigned old_flags = sb->s_flags; +	unsigned long old_opts = fs_info->mount_opt; +	unsigned long old_compress_type = fs_info->compress_type; +	u64 old_max_inline = fs_info->max_inline; +	u64 old_alloc_start = fs_info->alloc_start; +	int old_thread_pool_size = fs_info->thread_pool_size; +	unsigned int old_metadata_ratio = fs_info->metadata_ratio;  	int ret;  	ret = btrfs_parse_options(root, data); -	if (ret) -		return -EINVAL; +	if (ret) { +		ret = -EINVAL; +		goto restore; +	}  	if ((*flags & MS_RDONLY) == (sb->s_flags & MS_RDONLY))  		return 0; @@ -1011,26 +1148,44 @@ static int btrfs_remount(struct super_block *sb, int *flags, char *data)  	if (*flags & MS_RDONLY) {  		sb->s_flags |= MS_RDONLY; -		ret =  btrfs_commit_super(root); -		WARN_ON(ret); +		ret = btrfs_commit_super(root); +		if (ret) +			goto restore;  	} else {  		if (fs_info->fs_devices->rw_devices == 0) -			return -EACCES; +			ret = -EACCES; +			goto restore;  		if (btrfs_super_log_root(fs_info->super_copy) != 0) -			return -EINVAL; +			ret = -EINVAL; +			goto restore;  		ret = btrfs_cleanup_fs_roots(fs_info); -		WARN_ON(ret); +		if (ret) +			goto restore;  		/* recover relocation */  		ret = btrfs_recover_relocation(root); -		WARN_ON(ret); +		if (ret) +			goto restore;  		sb->s_flags &= ~MS_RDONLY;  	}  	return 0; + +restore: +	/* We've hit an error - don't reset MS_RDONLY */ +	if (sb->s_flags & MS_RDONLY) +		old_flags |= MS_RDONLY; +	sb->s_flags = old_flags; +	fs_info->mount_opt = old_opts; +	fs_info->compress_type = old_compress_type; +	fs_info->max_inline = old_max_inline; +	fs_info->alloc_start = old_alloc_start; +	fs_info->thread_pool_size = old_thread_pool_size; +	fs_info->metadata_ratio = old_metadata_ratio; +	return ret;  }  /* Used to sort the devices by max_avail(descending sort) */ @@ -1360,9 +1515,7 @@ static int __init init_btrfs_fs(void)  	if (err)  		return err; -	err = btrfs_init_compress(); -	if (err) -		goto free_sysfs; +	btrfs_init_compress();  	err = btrfs_init_cachep();  	if (err) @@ -1403,7 +1556,6 @@ free_cachep:  	btrfs_destroy_cachep();  free_compress:  	btrfs_exit_compress(); -free_sysfs:  	btrfs_exit_sysfs();  	return err;  } diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index 04b77e3ceb7..63f835aa978 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -31,7 +31,7 @@  #define BTRFS_ROOT_TRANS_TAG 0 -static noinline void put_transaction(struct btrfs_transaction *transaction) +void put_transaction(struct btrfs_transaction *transaction)  {  	WARN_ON(atomic_read(&transaction->use_count) == 0);  	if (atomic_dec_and_test(&transaction->use_count)) { @@ -58,6 +58,12 @@ static noinline int join_transaction(struct btrfs_root *root, int nofail)  	spin_lock(&root->fs_info->trans_lock);  loop: +	/* The file system has been taken offline. No new transactions. */ +	if (root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) { +		spin_unlock(&root->fs_info->trans_lock); +		return -EROFS; +	} +  	if (root->fs_info->trans_no_join) {  		if (!nofail) {  			spin_unlock(&root->fs_info->trans_lock); @@ -67,6 +73,8 @@ loop:  	cur_trans = root->fs_info->running_transaction;  	if (cur_trans) { +		if (cur_trans->aborted) +			return cur_trans->aborted;  		atomic_inc(&cur_trans->use_count);  		atomic_inc(&cur_trans->num_writers);  		cur_trans->num_joined++; @@ -123,6 +131,7 @@ loop:  	root->fs_info->generation++;  	cur_trans->transid = root->fs_info->generation;  	root->fs_info->running_transaction = cur_trans; +	cur_trans->aborted = 0;  	spin_unlock(&root->fs_info->trans_lock);  	return 0; @@ -318,6 +327,7 @@ again:  	h->use_count = 1;  	h->block_rsv = NULL;  	h->orig_rsv = NULL; +	h->aborted = 0;  	smp_mb();  	if (cur_trans->blocked && may_wait_transaction(root, type)) { @@ -440,6 +450,7 @@ int btrfs_should_end_transaction(struct btrfs_trans_handle *trans,  	struct btrfs_transaction *cur_trans = trans->transaction;  	struct btrfs_block_rsv *rsv = trans->block_rsv;  	int updates; +	int err;  	smp_mb();  	if (cur_trans->blocked || cur_trans->delayed_refs.flushing) @@ -453,8 +464,11 @@ int btrfs_should_end_transaction(struct btrfs_trans_handle *trans,  	updates = trans->delayed_ref_updates;  	trans->delayed_ref_updates = 0; -	if (updates) -		btrfs_run_delayed_refs(trans, root, updates); +	if (updates) { +		err = btrfs_run_delayed_refs(trans, root, updates); +		if (err) /* Error code will also eval true */ +			return err; +	}  	trans->block_rsv = rsv; @@ -525,6 +539,11 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,  	if (throttle)  		btrfs_run_delayed_iputs(root); +	if (trans->aborted || +	    root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) { +		return -EIO; +	} +  	return 0;  } @@ -690,11 +709,13 @@ static int update_cowonly_root(struct btrfs_trans_handle *trans,  		ret = btrfs_update_root(trans, tree_root,  					&root->root_key,  					&root->root_item); -		BUG_ON(ret); +		if (ret) +			return ret;  		old_root_used = btrfs_root_used(&root->root_item);  		ret = btrfs_write_dirty_block_groups(trans, root); -		BUG_ON(ret); +		if (ret) +			return ret;  	}  	if (root != root->fs_info->extent_root) @@ -705,6 +726,10 @@ static int update_cowonly_root(struct btrfs_trans_handle *trans,  /*   * update all the cowonly tree roots on disk + * + * The error handling in this function may not be obvious. Any of the + * failures will cause the file system to go offline. We still need + * to clean up the delayed refs.   */  static noinline int commit_cowonly_roots(struct btrfs_trans_handle *trans,  					 struct btrfs_root *root) @@ -715,22 +740,30 @@ static noinline int commit_cowonly_roots(struct btrfs_trans_handle *trans,  	int ret;  	ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1); -	BUG_ON(ret); +	if (ret) +		return ret;  	eb = btrfs_lock_root_node(fs_info->tree_root); -	btrfs_cow_block(trans, fs_info->tree_root, eb, NULL, 0, &eb); +	ret = btrfs_cow_block(trans, fs_info->tree_root, eb, NULL, +			      0, &eb);  	btrfs_tree_unlock(eb);  	free_extent_buffer(eb); +	if (ret) +		return ret; +  	ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1); -	BUG_ON(ret); +	if (ret) +		return ret;  	while (!list_empty(&fs_info->dirty_cowonly_roots)) {  		next = fs_info->dirty_cowonly_roots.next;  		list_del_init(next);  		root = list_entry(next, struct btrfs_root, dirty_list); -		update_cowonly_root(trans, root); +		ret = update_cowonly_root(trans, root); +		if (ret) +			return ret;  	}  	down_write(&fs_info->extent_commit_sem); @@ -874,7 +907,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,  	new_root_item = kmalloc(sizeof(*new_root_item), GFP_NOFS);  	if (!new_root_item) { -		pending->error = -ENOMEM; +		ret = pending->error = -ENOMEM;  		goto fail;  	} @@ -911,21 +944,24 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,  	 * insert the directory item  	 */  	ret = btrfs_set_inode_index(parent_inode, &index); -	BUG_ON(ret); +	BUG_ON(ret); /* -ENOMEM */  	ret = btrfs_insert_dir_item(trans, parent_root,  				dentry->d_name.name, dentry->d_name.len,  				parent_inode, &key,  				BTRFS_FT_DIR, index); -	if (ret) { +	if (ret == -EEXIST) {  		pending->error = -EEXIST;  		dput(parent);  		goto fail; +	} else if (ret) { +		goto abort_trans_dput;  	}  	btrfs_i_size_write(parent_inode, parent_inode->i_size +  					 dentry->d_name.len * 2);  	ret = btrfs_update_inode(trans, parent_root, parent_inode); -	BUG_ON(ret); +	if (ret) +		goto abort_trans_dput;  	/*  	 * pull in the delayed directory update @@ -934,7 +970,10 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,  	 * snapshot  	 */  	ret = btrfs_run_delayed_items(trans, root); -	BUG_ON(ret); +	if (ret) { /* Transaction aborted */ +		dput(parent); +		goto fail; +	}  	record_root_in_trans(trans, root);  	btrfs_set_root_last_snapshot(&root->root_item, trans->transid); @@ -949,12 +988,21 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,  	btrfs_set_root_flags(new_root_item, root_flags);  	old = btrfs_lock_root_node(root); -	btrfs_cow_block(trans, root, old, NULL, 0, &old); +	ret = btrfs_cow_block(trans, root, old, NULL, 0, &old); +	if (ret) { +		btrfs_tree_unlock(old); +		free_extent_buffer(old); +		goto abort_trans_dput; +	} +  	btrfs_set_lock_blocking(old); -	btrfs_copy_root(trans, root, old, &tmp, objectid); +	ret = btrfs_copy_root(trans, root, old, &tmp, objectid); +	/* clean up in any case */  	btrfs_tree_unlock(old);  	free_extent_buffer(old); +	if (ret) +		goto abort_trans_dput;  	/* see comments in should_cow_block() */  	root->force_cow = 1; @@ -966,7 +1014,8 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,  	ret = btrfs_insert_root(trans, tree_root, &key, new_root_item);  	btrfs_tree_unlock(tmp);  	free_extent_buffer(tmp); -	BUG_ON(ret); +	if (ret) +		goto abort_trans_dput;  	/*  	 * insert root back/forward references @@ -975,19 +1024,32 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,  				 parent_root->root_key.objectid,  				 btrfs_ino(parent_inode), index,  				 dentry->d_name.name, dentry->d_name.len); -	BUG_ON(ret);  	dput(parent); +	if (ret) +		goto fail;  	key.offset = (u64)-1;  	pending->snap = btrfs_read_fs_root_no_name(root->fs_info, &key); -	BUG_ON(IS_ERR(pending->snap)); +	if (IS_ERR(pending->snap)) { +		ret = PTR_ERR(pending->snap); +		goto abort_trans; +	} -	btrfs_reloc_post_snapshot(trans, pending); +	ret = btrfs_reloc_post_snapshot(trans, pending); +	if (ret) +		goto abort_trans; +	ret = 0;  fail:  	kfree(new_root_item);  	trans->block_rsv = rsv;  	btrfs_block_rsv_release(root, &pending->block_rsv, (u64)-1); -	return 0; +	return ret; + +abort_trans_dput: +	dput(parent); +abort_trans: +	btrfs_abort_transaction(trans, root, ret); +	goto fail;  }  /* @@ -1124,6 +1186,33 @@ int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans,  	return 0;  } + +static void cleanup_transaction(struct btrfs_trans_handle *trans, +				struct btrfs_root *root) +{ +	struct btrfs_transaction *cur_trans = trans->transaction; + +	WARN_ON(trans->use_count > 1); + +	spin_lock(&root->fs_info->trans_lock); +	list_del_init(&cur_trans->list); +	spin_unlock(&root->fs_info->trans_lock); + +	btrfs_cleanup_one_transaction(trans->transaction, root); + +	put_transaction(cur_trans); +	put_transaction(cur_trans); + +	trace_btrfs_transaction_commit(root); + +	btrfs_scrub_continue(root); + +	if (current->journal_info == trans) +		current->journal_info = NULL; + +	kmem_cache_free(btrfs_trans_handle_cachep, trans); +} +  /*   * btrfs_transaction state sequence:   *    in_commit = 0, blocked = 0  (initial) @@ -1135,10 +1224,10 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,  			     struct btrfs_root *root)  {  	unsigned long joined = 0; -	struct btrfs_transaction *cur_trans; +	struct btrfs_transaction *cur_trans = trans->transaction;  	struct btrfs_transaction *prev_trans = NULL;  	DEFINE_WAIT(wait); -	int ret; +	int ret = -EIO;  	int should_grow = 0;  	unsigned long now = get_seconds();  	int flush_on_commit = btrfs_test_opt(root, FLUSHONCOMMIT); @@ -1148,13 +1237,18 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,  	btrfs_trans_release_metadata(trans, root);  	trans->block_rsv = NULL; +	if (cur_trans->aborted) +		goto cleanup_transaction; +  	/* make a pass through all the delayed refs we have so far  	 * any runnings procs may add more while we are here  	 */  	ret = btrfs_run_delayed_refs(trans, root, 0); -	BUG_ON(ret); +	if (ret) +		goto cleanup_transaction;  	cur_trans = trans->transaction; +  	/*  	 * set the flushing flag so procs in this transaction have to  	 * start sending their work down. @@ -1162,19 +1256,20 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,  	cur_trans->delayed_refs.flushing = 1;  	ret = btrfs_run_delayed_refs(trans, root, 0); -	BUG_ON(ret); +	if (ret) +		goto cleanup_transaction;  	spin_lock(&cur_trans->commit_lock);  	if (cur_trans->in_commit) {  		spin_unlock(&cur_trans->commit_lock);  		atomic_inc(&cur_trans->use_count); -		btrfs_end_transaction(trans, root); +		ret = btrfs_end_transaction(trans, root);  		wait_for_commit(root, cur_trans);  		put_transaction(cur_trans); -		return 0; +		return ret;  	}  	trans->transaction->in_commit = 1; @@ -1214,12 +1309,12 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,  		if (flush_on_commit || snap_pending) {  			btrfs_start_delalloc_inodes(root, 1); -			ret = btrfs_wait_ordered_extents(root, 0, 1); -			BUG_ON(ret); +			btrfs_wait_ordered_extents(root, 0, 1);  		}  		ret = btrfs_run_delayed_items(trans, root); -		BUG_ON(ret); +		if (ret) +			goto cleanup_transaction;  		/*  		 * rename don't use btrfs_join_transaction, so, once we @@ -1261,13 +1356,22 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,  	mutex_lock(&root->fs_info->reloc_mutex);  	ret = btrfs_run_delayed_items(trans, root); -	BUG_ON(ret); +	if (ret) { +		mutex_unlock(&root->fs_info->reloc_mutex); +		goto cleanup_transaction; +	}  	ret = create_pending_snapshots(trans, root->fs_info); -	BUG_ON(ret); +	if (ret) { +		mutex_unlock(&root->fs_info->reloc_mutex); +		goto cleanup_transaction; +	}  	ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1); -	BUG_ON(ret); +	if (ret) { +		mutex_unlock(&root->fs_info->reloc_mutex); +		goto cleanup_transaction; +	}  	/*  	 * make sure none of the code above managed to slip in a @@ -1294,7 +1398,10 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,  	mutex_lock(&root->fs_info->tree_log_mutex);  	ret = commit_fs_roots(trans, root); -	BUG_ON(ret); +	if (ret) { +		mutex_unlock(&root->fs_info->tree_log_mutex); +		goto cleanup_transaction; +	}  	/* commit_fs_roots gets rid of all the tree log roots, it is now  	 * safe to free the root of tree log roots @@ -1302,7 +1409,10 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,  	btrfs_free_log_root_tree(trans, root->fs_info);  	ret = commit_cowonly_roots(trans, root); -	BUG_ON(ret); +	if (ret) { +		mutex_unlock(&root->fs_info->tree_log_mutex); +		goto cleanup_transaction; +	}  	btrfs_prepare_extent_commit(trans, root); @@ -1336,8 +1446,18 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,  	wake_up(&root->fs_info->transaction_wait);  	ret = btrfs_write_and_wait_transaction(trans, root); -	BUG_ON(ret); -	write_ctree_super(trans, root, 0); +	if (ret) { +		btrfs_error(root->fs_info, ret, +			    "Error while writing out transaction."); +		mutex_unlock(&root->fs_info->tree_log_mutex); +		goto cleanup_transaction; +	} + +	ret = write_ctree_super(trans, root, 0); +	if (ret) { +		mutex_unlock(&root->fs_info->tree_log_mutex); +		goto cleanup_transaction; +	}  	/*  	 * the super is written, we can safely allow the tree-loggers @@ -1373,6 +1493,15 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,  		btrfs_run_delayed_iputs(root);  	return ret; + +cleanup_transaction: +	btrfs_printk(root->fs_info, "Skipping commit of aborted transaction.\n"); +//	WARN_ON(1); +	if (current->journal_info == trans) +		current->journal_info = NULL; +	cleanup_transaction(trans, root); + +	return ret;  }  /* @@ -1388,6 +1517,8 @@ int btrfs_clean_old_snapshots(struct btrfs_root *root)  	spin_unlock(&fs_info->trans_lock);  	while (!list_empty(&list)) { +		int ret; +  		root = list_entry(list.next, struct btrfs_root, root_list);  		list_del(&root->root_list); @@ -1395,9 +1526,10 @@ int btrfs_clean_old_snapshots(struct btrfs_root *root)  		if (btrfs_header_backref_rev(root->node) <  		    BTRFS_MIXED_BACKREF_REV) -			btrfs_drop_snapshot(root, NULL, 0, 0); +			ret = btrfs_drop_snapshot(root, NULL, 0, 0);  		else -			btrfs_drop_snapshot(root, NULL, 1, 0); +			ret =btrfs_drop_snapshot(root, NULL, 1, 0); +		BUG_ON(ret < 0);  	}  	return 0;  } diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h index 02564e6230a..fe27379e368 100644 --- a/fs/btrfs/transaction.h +++ b/fs/btrfs/transaction.h @@ -43,6 +43,7 @@ struct btrfs_transaction {  	wait_queue_head_t commit_wait;  	struct list_head pending_snapshots;  	struct btrfs_delayed_ref_root delayed_refs; +	int aborted;  };  struct btrfs_trans_handle { @@ -55,6 +56,7 @@ struct btrfs_trans_handle {  	struct btrfs_transaction *transaction;  	struct btrfs_block_rsv *block_rsv;  	struct btrfs_block_rsv *orig_rsv; +	int aborted;  };  struct btrfs_pending_snapshot { @@ -114,4 +116,5 @@ int btrfs_wait_marked_extents(struct btrfs_root *root,  				struct extent_io_tree *dirty_pages, int mark);  int btrfs_transaction_blocked(struct btrfs_fs_info *info);  int btrfs_transaction_in_commit(struct btrfs_fs_info *info); +void put_transaction(struct btrfs_transaction *transaction);  #endif diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index 966cc74f5d6..d017283ae6f 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -212,14 +212,13 @@ int btrfs_pin_log_trans(struct btrfs_root *root)   * indicate we're done making changes to the log tree   * and wake up anyone waiting to do a sync   */ -int btrfs_end_log_trans(struct btrfs_root *root) +void btrfs_end_log_trans(struct btrfs_root *root)  {  	if (atomic_dec_and_test(&root->log_writers)) {  		smp_mb();  		if (waitqueue_active(&root->log_writer_wait))  			wake_up(&root->log_writer_wait);  	} -	return 0;  } @@ -378,12 +377,11 @@ insert:  		u32 found_size;  		found_size = btrfs_item_size_nr(path->nodes[0],  						path->slots[0]); -		if (found_size > item_size) { +		if (found_size > item_size)  			btrfs_truncate_item(trans, root, path, item_size, 1); -		} else if (found_size < item_size) { -			ret = btrfs_extend_item(trans, root, path, -						item_size - found_size); -		} +		else if (found_size < item_size) +			btrfs_extend_item(trans, root, path, +					  item_size - found_size);  	} else if (ret) {  		return ret;  	} @@ -1763,7 +1761,7 @@ static noinline int walk_down_log_tree(struct btrfs_trans_handle *trans,  					BTRFS_TREE_LOG_OBJECTID);  				ret = btrfs_free_and_pin_reserved_extent(root,  							 bytenr, blocksize); -				BUG_ON(ret); +				BUG_ON(ret); /* -ENOMEM or logic errors */  			}  			free_extent_buffer(next);  			continue; @@ -1871,20 +1869,26 @@ static int walk_log_tree(struct btrfs_trans_handle *trans,  		wret = walk_down_log_tree(trans, log, path, &level, wc);  		if (wret > 0)  			break; -		if (wret < 0) +		if (wret < 0) {  			ret = wret; +			goto out; +		}  		wret = walk_up_log_tree(trans, log, path, &level, wc);  		if (wret > 0)  			break; -		if (wret < 0) +		if (wret < 0) {  			ret = wret; +			goto out; +		}  	}  	/* was the root node processed? if not, catch it here */  	if (path->nodes[orig_level]) { -		wc->process_func(log, path->nodes[orig_level], wc, +		ret = wc->process_func(log, path->nodes[orig_level], wc,  			 btrfs_header_generation(path->nodes[orig_level])); +		if (ret) +			goto out;  		if (wc->free) {  			struct extent_buffer *next; @@ -1900,10 +1904,11 @@ static int walk_log_tree(struct btrfs_trans_handle *trans,  				BTRFS_TREE_LOG_OBJECTID);  			ret = btrfs_free_and_pin_reserved_extent(log, next->start,  							 next->len); -			BUG_ON(ret); +			BUG_ON(ret); /* -ENOMEM or logic errors */  		}  	} +out:  	for (i = 0; i <= orig_level; i++) {  		if (path->nodes[i]) {  			free_extent_buffer(path->nodes[i]); @@ -1963,8 +1968,8 @@ static int wait_log_commit(struct btrfs_trans_handle *trans,  	return 0;  } -static int wait_for_writer(struct btrfs_trans_handle *trans, -			   struct btrfs_root *root) +static void wait_for_writer(struct btrfs_trans_handle *trans, +			    struct btrfs_root *root)  {  	DEFINE_WAIT(wait);  	while (root->fs_info->last_trans_log_full_commit != @@ -1978,7 +1983,6 @@ static int wait_for_writer(struct btrfs_trans_handle *trans,  		mutex_lock(&root->log_mutex);  		finish_wait(&root->log_writer_wait, &wait);  	} -	return 0;  }  /* @@ -2046,7 +2050,11 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,  	 * wait for them until later.  	 */  	ret = btrfs_write_marked_extents(log, &log->dirty_log_pages, mark); -	BUG_ON(ret); +	if (ret) { +		btrfs_abort_transaction(trans, root, ret); +		mutex_unlock(&root->log_mutex); +		goto out; +	}  	btrfs_set_root_node(&log->root_item, log->node); @@ -2077,7 +2085,11 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,  	}  	if (ret) { -		BUG_ON(ret != -ENOSPC); +		if (ret != -ENOSPC) { +			btrfs_abort_transaction(trans, root, ret); +			mutex_unlock(&log_root_tree->log_mutex); +			goto out; +		}  		root->fs_info->last_trans_log_full_commit = trans->transid;  		btrfs_wait_marked_extents(log, &log->dirty_log_pages, mark);  		mutex_unlock(&log_root_tree->log_mutex); @@ -2117,7 +2129,11 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,  	ret = btrfs_write_and_wait_marked_extents(log_root_tree,  				&log_root_tree->dirty_log_pages,  				EXTENT_DIRTY | EXTENT_NEW); -	BUG_ON(ret); +	if (ret) { +		btrfs_abort_transaction(trans, root, ret); +		mutex_unlock(&log_root_tree->log_mutex); +		goto out_wake_log_root; +	}  	btrfs_wait_marked_extents(log, &log->dirty_log_pages, mark);  	btrfs_set_super_log_root(root->fs_info->super_for_commit, @@ -2326,7 +2342,9 @@ out_unlock:  	if (ret == -ENOSPC) {  		root->fs_info->last_trans_log_full_commit = trans->transid;  		ret = 0; -	} +	} else if (ret < 0) +		btrfs_abort_transaction(trans, root, ret); +  	btrfs_end_log_trans(root);  	return err; @@ -2357,7 +2375,8 @@ int btrfs_del_inode_ref_in_log(struct btrfs_trans_handle *trans,  	if (ret == -ENOSPC) {  		root->fs_info->last_trans_log_full_commit = trans->transid;  		ret = 0; -	} +	} else if (ret < 0 && ret != -ENOENT) +		btrfs_abort_transaction(trans, root, ret);  	btrfs_end_log_trans(root);  	return ret; @@ -3169,13 +3188,20 @@ int btrfs_recover_log_trees(struct btrfs_root *log_root_tree)  	fs_info->log_root_recovering = 1;  	trans = btrfs_start_transaction(fs_info->tree_root, 0); -	BUG_ON(IS_ERR(trans)); +	if (IS_ERR(trans)) { +		ret = PTR_ERR(trans); +		goto error; +	}  	wc.trans = trans;  	wc.pin = 1;  	ret = walk_log_tree(trans, log_root_tree, &wc); -	BUG_ON(ret); +	if (ret) { +		btrfs_error(fs_info, ret, "Failed to pin buffers while " +			    "recovering log root tree."); +		goto error; +	}  again:  	key.objectid = BTRFS_TREE_LOG_OBJECTID; @@ -3184,8 +3210,12 @@ again:  	while (1) {  		ret = btrfs_search_slot(NULL, log_root_tree, &key, path, 0, 0); -		if (ret < 0) -			break; + +		if (ret < 0) { +			btrfs_error(fs_info, ret, +				    "Couldn't find tree log root."); +			goto error; +		}  		if (ret > 0) {  			if (path->slots[0] == 0)  				break; @@ -3199,14 +3229,24 @@ again:  		log = btrfs_read_fs_root_no_radix(log_root_tree,  						  &found_key); -		BUG_ON(IS_ERR(log)); +		if (IS_ERR(log)) { +			ret = PTR_ERR(log); +			btrfs_error(fs_info, ret, +				    "Couldn't read tree log root."); +			goto error; +		}  		tmp_key.objectid = found_key.offset;  		tmp_key.type = BTRFS_ROOT_ITEM_KEY;  		tmp_key.offset = (u64)-1;  		wc.replay_dest = btrfs_read_fs_root_no_name(fs_info, &tmp_key); -		BUG_ON(IS_ERR_OR_NULL(wc.replay_dest)); +		if (IS_ERR(wc.replay_dest)) { +			ret = PTR_ERR(wc.replay_dest); +			btrfs_error(fs_info, ret, "Couldn't read target root " +				    "for tree log recovery."); +			goto error; +		}  		wc.replay_dest->log_root = log;  		btrfs_record_root_in_trans(trans, wc.replay_dest); @@ -3254,6 +3294,10 @@ again:  	kfree(log_root_tree);  	return 0; + +error: +	btrfs_free_path(path); +	return ret;  }  /* diff --git a/fs/btrfs/tree-log.h b/fs/btrfs/tree-log.h index 2270ac58d74..862ac813f6b 100644 --- a/fs/btrfs/tree-log.h +++ b/fs/btrfs/tree-log.h @@ -38,7 +38,7 @@ int btrfs_del_inode_ref_in_log(struct btrfs_trans_handle *trans,  			       struct btrfs_root *root,  			       const char *name, int name_len,  			       struct inode *inode, u64 dirid); -int btrfs_end_log_trans(struct btrfs_root *root); +void btrfs_end_log_trans(struct btrfs_root *root);  int btrfs_pin_log_trans(struct btrfs_root *root);  int btrfs_log_inode_parent(struct btrfs_trans_handle *trans,  		    struct btrfs_root *root, struct inode *inode, diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 58aad63e1ad..d64cd6cbdbb 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -67,7 +67,7 @@ static void free_fs_devices(struct btrfs_fs_devices *fs_devices)  	kfree(fs_devices);  } -int btrfs_cleanup_fs_uuids(void) +void btrfs_cleanup_fs_uuids(void)  {  	struct btrfs_fs_devices *fs_devices; @@ -77,7 +77,6 @@ int btrfs_cleanup_fs_uuids(void)  		list_del(&fs_devices->list);  		free_fs_devices(fs_devices);  	} -	return 0;  }  static noinline struct btrfs_device *__find_device(struct list_head *head, @@ -130,7 +129,7 @@ static void requeue_list(struct btrfs_pending_bios *pending_bios,   * the list if the block device is congested.  This way, multiple devices   * can make progress from a single worker thread.   */ -static noinline int run_scheduled_bios(struct btrfs_device *device) +static noinline void run_scheduled_bios(struct btrfs_device *device)  {  	struct bio *pending;  	struct backing_dev_info *bdi; @@ -316,7 +315,6 @@ loop_lock:  done:  	blk_finish_plug(&plug); -	return 0;  }  static void pending_bios_fn(struct btrfs_work *work) @@ -455,7 +453,7 @@ error:  	return ERR_PTR(-ENOMEM);  } -int btrfs_close_extra_devices(struct btrfs_fs_devices *fs_devices) +void btrfs_close_extra_devices(struct btrfs_fs_devices *fs_devices)  {  	struct btrfs_device *device, *next; @@ -503,7 +501,6 @@ again:  	fs_devices->latest_trans = latest_transid;  	mutex_unlock(&uuid_mutex); -	return 0;  }  static void __free_device(struct work_struct *work) @@ -552,10 +549,10 @@ static int __btrfs_close_devices(struct btrfs_fs_devices *fs_devices)  			fs_devices->num_can_discard--;  		new_device = kmalloc(sizeof(*new_device), GFP_NOFS); -		BUG_ON(!new_device); +		BUG_ON(!new_device); /* -ENOMEM */  		memcpy(new_device, device, sizeof(*new_device));  		new_device->name = kstrdup(device->name, GFP_NOFS); -		BUG_ON(device->name && !new_device->name); +		BUG_ON(device->name && !new_device->name); /* -ENOMEM */  		new_device->bdev = NULL;  		new_device->writeable = 0;  		new_device->in_fs_metadata = 0; @@ -1039,8 +1036,10 @@ again:  		leaf = path->nodes[0];  		extent = btrfs_item_ptr(leaf, path->slots[0],  					struct btrfs_dev_extent); +	} else { +		btrfs_error(root->fs_info, ret, "Slot search failed"); +		goto out;  	} -	BUG_ON(ret);  	if (device->bytes_used > 0) {  		u64 len = btrfs_dev_extent_length(leaf, extent); @@ -1050,7 +1049,10 @@ again:  		spin_unlock(&root->fs_info->free_chunk_lock);  	}  	ret = btrfs_del_item(trans, root, path); - +	if (ret) { +		btrfs_error(root->fs_info, ret, +			    "Failed to remove dev extent item"); +	}  out:  	btrfs_free_path(path);  	return ret; @@ -1078,7 +1080,8 @@ int btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans,  	key.type = BTRFS_DEV_EXTENT_KEY;  	ret = btrfs_insert_empty_item(trans, root, path, &key,  				      sizeof(*extent)); -	BUG_ON(ret); +	if (ret) +		goto out;  	leaf = path->nodes[0];  	extent = btrfs_item_ptr(leaf, path->slots[0], @@ -1093,6 +1096,7 @@ int btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans,  	btrfs_set_dev_extent_length(leaf, extent, num_bytes);  	btrfs_mark_buffer_dirty(leaf); +out:  	btrfs_free_path(path);  	return ret;  } @@ -1118,7 +1122,7 @@ static noinline int find_next_chunk(struct btrfs_root *root,  	if (ret < 0)  		goto error; -	BUG_ON(ret == 0); +	BUG_ON(ret == 0); /* Corruption */  	ret = btrfs_previous_item(root, path, 0, BTRFS_CHUNK_ITEM_KEY);  	if (ret) { @@ -1162,7 +1166,7 @@ static noinline int find_next_devid(struct btrfs_root *root, u64 *objectid)  	if (ret < 0)  		goto error; -	BUG_ON(ret == 0); +	BUG_ON(ret == 0); /* Corruption */  	ret = btrfs_previous_item(root, path, BTRFS_DEV_ITEMS_OBJECTID,  				  BTRFS_DEV_ITEM_KEY); @@ -1596,7 +1600,7 @@ next_slot:  				   (unsigned long)btrfs_device_fsid(dev_item),  				   BTRFS_UUID_SIZE);  		device = btrfs_find_device(root, devid, dev_uuid, fs_uuid); -		BUG_ON(!device); +		BUG_ON(!device); /* Logic error */  		if (device->fs_devices->seeding) {  			btrfs_set_device_generation(leaf, dev_item, @@ -1706,7 +1710,7 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path)  	if (seeding_dev) {  		sb->s_flags &= ~MS_RDONLY;  		ret = btrfs_prepare_sprout(root); -		BUG_ON(ret); +		BUG_ON(ret); /* -ENOMEM */  	}  	device->fs_devices = root->fs_info->fs_devices; @@ -1744,11 +1748,15 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path)  	if (seeding_dev) {  		ret = init_first_rw_device(trans, root, device); -		BUG_ON(ret); +		if (ret) +			goto error_trans;  		ret = btrfs_finish_sprout(trans, root); -		BUG_ON(ret); +		if (ret) +			goto error_trans;  	} else {  		ret = btrfs_add_device(trans, root, device); +		if (ret) +			goto error_trans;  	}  	/* @@ -1758,17 +1766,31 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path)  	btrfs_clear_space_info_full(root->fs_info);  	unlock_chunks(root); -	btrfs_commit_transaction(trans, root); +	ret = btrfs_commit_transaction(trans, root);  	if (seeding_dev) {  		mutex_unlock(&uuid_mutex);  		up_write(&sb->s_umount); +		if (ret) /* transaction commit */ +			return ret; +  		ret = btrfs_relocate_sys_chunks(root); -		BUG_ON(ret); +		if (ret < 0) +			btrfs_error(root->fs_info, ret, +				    "Failed to relocate sys chunks after " +				    "device initialization. This can be fixed " +				    "using the \"btrfs balance\" command.");  	}  	return ret; + +error_trans: +	unlock_chunks(root); +	btrfs_abort_transaction(trans, root, ret); +	btrfs_end_transaction(trans, root); +	kfree(device->name); +	kfree(device);  error:  	blkdev_put(bdev, FMODE_EXCL);  	if (seeding_dev) { @@ -1876,10 +1898,20 @@ static int btrfs_free_chunk(struct btrfs_trans_handle *trans,  	key.type = BTRFS_CHUNK_ITEM_KEY;  	ret = btrfs_search_slot(trans, root, &key, path, -1, 1); -	BUG_ON(ret); +	if (ret < 0) +		goto out; +	else if (ret > 0) { /* Logic error or corruption */ +		btrfs_error(root->fs_info, -ENOENT, +			    "Failed lookup while freeing chunk."); +		ret = -ENOENT; +		goto out; +	}  	ret = btrfs_del_item(trans, root, path); - +	if (ret < 0) +		btrfs_error(root->fs_info, ret, +			    "Failed to delete chunk item."); +out:  	btrfs_free_path(path);  	return ret;  } @@ -2041,7 +2073,7 @@ again:  		ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);  		if (ret < 0)  			goto error; -		BUG_ON(ret == 0); +		BUG_ON(ret == 0); /* Corruption */  		ret = btrfs_previous_item(chunk_root, path, key.objectid,  					  key.type); @@ -3328,13 +3360,15 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,  	write_lock(&em_tree->lock);  	ret = add_extent_mapping(em_tree, em);  	write_unlock(&em_tree->lock); -	BUG_ON(ret);  	free_extent_map(em); +	if (ret) +		goto error;  	ret = btrfs_make_block_group(trans, extent_root, 0, type,  				     BTRFS_FIRST_CHUNK_TREE_OBJECTID,  				     start, num_bytes); -	BUG_ON(ret); +	if (ret) +		goto error;  	for (i = 0; i < map->num_stripes; ++i) {  		struct btrfs_device *device; @@ -3347,7 +3381,10 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,  				info->chunk_root->root_key.objectid,  				BTRFS_FIRST_CHUNK_TREE_OBJECTID,  				start, dev_offset, stripe_size); -		BUG_ON(ret); +		if (ret) { +			btrfs_abort_transaction(trans, extent_root, ret); +			goto error; +		}  	}  	kfree(devices_info); @@ -3383,7 +3420,8 @@ static int __finish_chunk_alloc(struct btrfs_trans_handle *trans,  		device = map->stripes[index].dev;  		device->bytes_used += stripe_size;  		ret = btrfs_update_device(trans, device); -		BUG_ON(ret); +		if (ret) +			goto out_free;  		index++;  	} @@ -3420,16 +3458,19 @@ static int __finish_chunk_alloc(struct btrfs_trans_handle *trans,  	key.offset = chunk_offset;  	ret = btrfs_insert_item(trans, chunk_root, &key, chunk, item_size); -	BUG_ON(ret); -	if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) { +	if (ret == 0 && map->type & BTRFS_BLOCK_GROUP_SYSTEM) { +		/* +		 * TODO: Cleanup of inserted chunk root in case of +		 * failure. +		 */  		ret = btrfs_add_system_chunk(chunk_root, &key, chunk,  					     item_size); -		BUG_ON(ret);  	} +out_free:  	kfree(chunk); -	return 0; +	return ret;  }  /* @@ -3461,7 +3502,8 @@ int btrfs_alloc_chunk(struct btrfs_trans_handle *trans,  	ret = __finish_chunk_alloc(trans, extent_root, map, chunk_offset,  				   chunk_size, stripe_size); -	BUG_ON(ret); +	if (ret) +		return ret;  	return 0;  } @@ -3493,7 +3535,8 @@ static noinline int init_first_rw_device(struct btrfs_trans_handle *trans,  	ret = __btrfs_alloc_chunk(trans, extent_root, &map, &chunk_size,  				  &stripe_size, chunk_offset, alloc_profile); -	BUG_ON(ret); +	if (ret) +		return ret;  	sys_chunk_offset = chunk_offset + chunk_size; @@ -3504,10 +3547,12 @@ static noinline int init_first_rw_device(struct btrfs_trans_handle *trans,  	ret = __btrfs_alloc_chunk(trans, extent_root, &sys_map,  				  &sys_chunk_size, &sys_stripe_size,  				  sys_chunk_offset, alloc_profile); -	BUG_ON(ret); +	if (ret) +		goto abort;  	ret = btrfs_add_device(trans, fs_info->chunk_root, device); -	BUG_ON(ret); +	if (ret) +		goto abort;  	/*  	 * Modifying chunk tree needs allocating new blocks from both @@ -3517,13 +3562,20 @@ static noinline int init_first_rw_device(struct btrfs_trans_handle *trans,  	 */  	ret = __finish_chunk_alloc(trans, extent_root, map, chunk_offset,  				   chunk_size, stripe_size); -	BUG_ON(ret); +	if (ret) +		goto abort;  	ret = __finish_chunk_alloc(trans, extent_root, sys_map,  				   sys_chunk_offset, sys_chunk_size,  				   sys_stripe_size); -	BUG_ON(ret); +	if (ret) +		goto abort; +  	return 0; + +abort: +	btrfs_abort_transaction(trans, root, ret); +	return ret;  }  int btrfs_chunk_readonly(struct btrfs_root *root, u64 chunk_offset) @@ -3874,7 +3926,7 @@ int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,  		do_div(length, map->num_stripes);  	buf = kzalloc(sizeof(u64) * map->num_stripes, GFP_NOFS); -	BUG_ON(!buf); +	BUG_ON(!buf); /* -ENOMEM */  	for (i = 0; i < map->num_stripes; i++) {  		if (devid && map->stripes[i].dev->devid != devid) @@ -3967,7 +4019,7 @@ struct async_sched {   * This will add one bio to the pending list for a device and make sure   * the work struct is scheduled.   */ -static noinline int schedule_bio(struct btrfs_root *root, +static noinline void schedule_bio(struct btrfs_root *root,  				 struct btrfs_device *device,  				 int rw, struct bio *bio)  { @@ -3979,7 +4031,7 @@ static noinline int schedule_bio(struct btrfs_root *root,  		bio_get(bio);  		btrfsic_submit_bio(rw, bio);  		bio_put(bio); -		return 0; +		return;  	}  	/* @@ -4013,7 +4065,6 @@ static noinline int schedule_bio(struct btrfs_root *root,  	if (should_queue)  		btrfs_queue_worker(&root->fs_info->submit_workers,  				   &device->work); -	return 0;  }  int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio, @@ -4036,7 +4087,8 @@ int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,  	ret = btrfs_map_block(map_tree, rw, logical, &map_length, &bbio,  			      mirror_num); -	BUG_ON(ret); +	if (ret) /* -ENOMEM */ +		return ret;  	total_devs = bbio->num_stripes;  	if (map_length < length) { @@ -4055,7 +4107,7 @@ int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,  	while (dev_nr < total_devs) {  		if (dev_nr < total_devs - 1) {  			bio = bio_clone(first_bio, GFP_NOFS); -			BUG_ON(!bio); +			BUG_ON(!bio); /* -ENOMEM */  		} else {  			bio = first_bio;  		} @@ -4209,13 +4261,13 @@ static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key,  	write_lock(&map_tree->map_tree.lock);  	ret = add_extent_mapping(&map_tree->map_tree, em);  	write_unlock(&map_tree->map_tree.lock); -	BUG_ON(ret); +	BUG_ON(ret); /* Tree corruption */  	free_extent_map(em);  	return 0;  } -static int fill_device_from_item(struct extent_buffer *leaf, +static void fill_device_from_item(struct extent_buffer *leaf,  				 struct btrfs_dev_item *dev_item,  				 struct btrfs_device *device)  { @@ -4232,8 +4284,6 @@ static int fill_device_from_item(struct extent_buffer *leaf,  	ptr = (unsigned long)btrfs_device_uuid(dev_item);  	read_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE); - -	return 0;  }  static int open_seed_devices(struct btrfs_root *root, u8 *fsid) diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h index 19ac95048b8..bb6b03f97aa 100644 --- a/fs/btrfs/volumes.h +++ b/fs/btrfs/volumes.h @@ -260,12 +260,12 @@ int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,  int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder,  			  struct btrfs_fs_devices **fs_devices_ret);  int btrfs_close_devices(struct btrfs_fs_devices *fs_devices); -int btrfs_close_extra_devices(struct btrfs_fs_devices *fs_devices); +void btrfs_close_extra_devices(struct btrfs_fs_devices *fs_devices);  int btrfs_add_device(struct btrfs_trans_handle *trans,  		     struct btrfs_root *root,  		     struct btrfs_device *device);  int btrfs_rm_device(struct btrfs_root *root, char *device_path); -int btrfs_cleanup_fs_uuids(void); +void btrfs_cleanup_fs_uuids(void);  int btrfs_num_copies(struct btrfs_mapping_tree *map_tree, u64 logical, u64 len);  int btrfs_grow_device(struct btrfs_trans_handle *trans,  		      struct btrfs_device *device, u64 new_size);  |