diff options
Diffstat (limited to 'fs/btrfs/extent-tree.c')
| -rw-r--r-- | fs/btrfs/extent-tree.c | 284 | 
1 files changed, 186 insertions, 98 deletions
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 5a3327b8f90..5cd44e23959 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -72,8 +72,7 @@ enum {  	RESERVE_ALLOC_NO_ACCOUNT = 2,  }; -static int update_block_group(struct btrfs_trans_handle *trans, -			      struct btrfs_root *root, +static int update_block_group(struct btrfs_root *root,  			      u64 bytenr, u64 num_bytes, int alloc);  static int __btrfs_free_extent(struct btrfs_trans_handle *trans,  				struct btrfs_root *root, @@ -103,6 +102,8 @@ static void dump_space_info(struct btrfs_space_info *info, u64 bytes,  			    int dump_block_groups);  static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,  				       u64 num_bytes, int reserve); +static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv, +			       u64 num_bytes);  static noinline int  block_group_cache_done(struct btrfs_block_group_cache *cache) @@ -162,6 +163,10 @@ static int btrfs_add_block_group_cache(struct btrfs_fs_info *info,  	rb_link_node(&block_group->cache_node, parent, p);  	rb_insert_color(&block_group->cache_node,  			&info->block_group_cache_tree); + +	if (info->first_logical_byte > block_group->key.objectid) +		info->first_logical_byte = block_group->key.objectid; +  	spin_unlock(&info->block_group_cache_lock);  	return 0; @@ -203,8 +208,11 @@ block_group_cache_tree_search(struct btrfs_fs_info *info, u64 bytenr,  			break;  		}  	} -	if (ret) +	if (ret) {  		btrfs_get_block_group(ret); +		if (bytenr == 0 && info->first_logical_byte > ret->key.objectid) +			info->first_logical_byte = ret->key.objectid; +	}  	spin_unlock(&info->block_group_cache_lock);  	return ret; @@ -468,8 +476,6 @@ out:  }  static int cache_block_group(struct btrfs_block_group_cache *cache, -			     struct btrfs_trans_handle *trans, -			     struct btrfs_root *root,  			     int load_cache_only)  {  	DEFINE_WAIT(wait); @@ -527,12 +533,6 @@ static int cache_block_group(struct btrfs_block_group_cache *cache,  	cache->cached = BTRFS_CACHE_FAST;  	spin_unlock(&cache->lock); -	/* -	 * We can't do the read from on-disk cache during a commit since we need -	 * to have the normal tree locking.  Also if we are currently trying to -	 * allocate blocks for the tree root we can't do the fast caching since -	 * we likely hold important locks. -	 */  	if (fs_info->mount_opt & BTRFS_MOUNT_SPACE_CACHE) {  		ret = load_free_space_cache(fs_info, cache); @@ -2143,7 +2143,6 @@ static int run_one_delayed_ref(struct btrfs_trans_handle *trans,  						      node->num_bytes);  			}  		} -		mutex_unlock(&head->mutex);  		return ret;  	} @@ -2258,7 +2257,7 @@ static noinline int run_clustered_refs(struct btrfs_trans_handle *trans,  			 * process of being added. Don't run this ref yet.  			 */  			list_del_init(&locked_ref->cluster); -			mutex_unlock(&locked_ref->mutex); +			btrfs_delayed_ref_unlock(locked_ref);  			locked_ref = NULL;  			delayed_refs->num_heads_ready++;  			spin_unlock(&delayed_refs->lock); @@ -2285,7 +2284,7 @@ static noinline int run_clustered_refs(struct btrfs_trans_handle *trans,  			ref = &locked_ref->node;  			if (extent_op && must_insert_reserved) { -				kfree(extent_op); +				btrfs_free_delayed_extent_op(extent_op);  				extent_op = NULL;  			} @@ -2294,28 +2293,25 @@ static noinline int run_clustered_refs(struct btrfs_trans_handle *trans,  				ret = run_delayed_extent_op(trans, root,  							    ref, extent_op); -				kfree(extent_op); +				btrfs_free_delayed_extent_op(extent_op);  				if (ret) { -					list_del_init(&locked_ref->cluster); -					mutex_unlock(&locked_ref->mutex); - -					printk(KERN_DEBUG "btrfs: run_delayed_extent_op returned %d\n", ret); +					printk(KERN_DEBUG +					       "btrfs: run_delayed_extent_op " +					       "returned %d\n", ret);  					spin_lock(&delayed_refs->lock); +					btrfs_delayed_ref_unlock(locked_ref);  					return ret;  				}  				goto next;  			} - -			list_del_init(&locked_ref->cluster); -			locked_ref = NULL;  		}  		ref->in_tree = 0;  		rb_erase(&ref->rb_node, &delayed_refs->root);  		delayed_refs->num_entries--; -		if (locked_ref) { +		if (!btrfs_delayed_ref_is_head(ref)) {  			/*  			 * when we play the delayed ref, also correct the  			 * ref_mod on head @@ -2337,20 +2333,29 @@ static noinline int run_clustered_refs(struct btrfs_trans_handle *trans,  		ret = run_one_delayed_ref(trans, root, ref, extent_op,  					  must_insert_reserved); -		btrfs_put_delayed_ref(ref); -		kfree(extent_op); -		count++; - +		btrfs_free_delayed_extent_op(extent_op);  		if (ret) { -			if (locked_ref) { -				list_del_init(&locked_ref->cluster); -				mutex_unlock(&locked_ref->mutex); -			} -			printk(KERN_DEBUG "btrfs: run_one_delayed_ref returned %d\n", ret); +			btrfs_delayed_ref_unlock(locked_ref); +			btrfs_put_delayed_ref(ref); +			printk(KERN_DEBUG +			       "btrfs: run_one_delayed_ref returned %d\n", ret);  			spin_lock(&delayed_refs->lock);  			return ret;  		} +		/* +		 * If this node is a head, that means all the refs in this head +		 * have been dealt with, and we will pick the next head to deal +		 * with, so we must unlock the head and drop it from the cluster +		 * list before we release it. +		 */ +		if (btrfs_delayed_ref_is_head(ref)) { +			list_del_init(&locked_ref->cluster); +			btrfs_delayed_ref_unlock(locked_ref); +			locked_ref = NULL; +		} +		btrfs_put_delayed_ref(ref); +		count++;  next:  		cond_resched();  		spin_lock(&delayed_refs->lock); @@ -2500,6 +2505,7 @@ again:  		ret = run_clustered_refs(trans, root, &cluster);  		if (ret < 0) { +			btrfs_release_ref_cluster(&cluster);  			spin_unlock(&delayed_refs->lock);  			btrfs_abort_transaction(trans, root, ret);  			return ret; @@ -2586,7 +2592,7 @@ int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans,  	struct btrfs_delayed_extent_op *extent_op;  	int ret; -	extent_op = kmalloc(sizeof(*extent_op), GFP_NOFS); +	extent_op = btrfs_alloc_delayed_extent_op();  	if (!extent_op)  		return -ENOMEM; @@ -2598,7 +2604,7 @@ int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans,  	ret = btrfs_add_delayed_extent_op(root->fs_info, trans, bytenr,  					  num_bytes, extent_op);  	if (ret) -		kfree(extent_op); +		btrfs_free_delayed_extent_op(extent_op);  	return ret;  } @@ -3223,12 +3229,14 @@ static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)  	u64 extra_flags = chunk_to_extended(flags) &  				BTRFS_EXTENDED_PROFILE_MASK; +	write_seqlock(&fs_info->profiles_lock);  	if (flags & BTRFS_BLOCK_GROUP_DATA)  		fs_info->avail_data_alloc_bits |= extra_flags;  	if (flags & BTRFS_BLOCK_GROUP_METADATA)  		fs_info->avail_metadata_alloc_bits |= extra_flags;  	if (flags & BTRFS_BLOCK_GROUP_SYSTEM)  		fs_info->avail_system_alloc_bits |= extra_flags; +	write_sequnlock(&fs_info->profiles_lock);  }  /* @@ -3320,12 +3328,18 @@ u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags)  static u64 get_alloc_profile(struct btrfs_root *root, u64 flags)  { -	if (flags & BTRFS_BLOCK_GROUP_DATA) -		flags |= root->fs_info->avail_data_alloc_bits; -	else if (flags & BTRFS_BLOCK_GROUP_SYSTEM) -		flags |= root->fs_info->avail_system_alloc_bits; -	else if (flags & BTRFS_BLOCK_GROUP_METADATA) -		flags |= root->fs_info->avail_metadata_alloc_bits; +	unsigned seq; + +	do { +		seq = read_seqbegin(&root->fs_info->profiles_lock); + +		if (flags & BTRFS_BLOCK_GROUP_DATA) +			flags |= root->fs_info->avail_data_alloc_bits; +		else if (flags & BTRFS_BLOCK_GROUP_SYSTEM) +			flags |= root->fs_info->avail_system_alloc_bits; +		else if (flags & BTRFS_BLOCK_GROUP_METADATA) +			flags |= root->fs_info->avail_metadata_alloc_bits; +	} while (read_seqretry(&root->fs_info->profiles_lock, seq));  	return btrfs_reduce_alloc_profile(root, flags);  } @@ -3564,6 +3578,10 @@ static int do_chunk_alloc(struct btrfs_trans_handle *trans,  	int wait_for_alloc = 0;  	int ret = 0; +	/* Don't re-enter if we're already allocating a chunk */ +	if (trans->allocating_chunk) +		return -ENOSPC; +  	space_info = __find_space_info(extent_root->fs_info, flags);  	if (!space_info) {  		ret = update_space_info(extent_root->fs_info, flags, @@ -3606,6 +3624,8 @@ again:  		goto again;  	} +	trans->allocating_chunk = true; +  	/*  	 * If we have mixed data/metadata chunks we want to make sure we keep  	 * allocating mixed chunks instead of individual chunks. @@ -3632,6 +3652,7 @@ again:  	check_system_chunk(trans, extent_root, flags);  	ret = btrfs_alloc_chunk(trans, extent_root, flags); +	trans->allocating_chunk = false;  	if (ret < 0 && ret != -ENOSPC)  		goto out; @@ -3653,13 +3674,31 @@ static int can_overcommit(struct btrfs_root *root,  			  struct btrfs_space_info *space_info, u64 bytes,  			  enum btrfs_reserve_flush_enum flush)  { +	struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;  	u64 profile = btrfs_get_alloc_profile(root, 0); +	u64 rsv_size = 0;  	u64 avail;  	u64 used; +	u64 to_add;  	used = space_info->bytes_used + space_info->bytes_reserved + -		space_info->bytes_pinned + space_info->bytes_readonly + -		space_info->bytes_may_use; +		space_info->bytes_pinned + space_info->bytes_readonly; + +	spin_lock(&global_rsv->lock); +	rsv_size = global_rsv->size; +	spin_unlock(&global_rsv->lock); + +	/* +	 * We only want to allow over committing if we have lots of actual space +	 * free, but if we don't have enough space to handle the global reserve +	 * space then we could end up having a real enospc problem when trying +	 * to allocate a chunk or some other such important allocation. +	 */ +	rsv_size <<= 1; +	if (used + rsv_size >= space_info->total_bytes) +		return 0; + +	used += space_info->bytes_may_use;  	spin_lock(&root->fs_info->free_chunk_lock);  	avail = root->fs_info->free_chunk_space; @@ -3674,27 +3713,38 @@ static int can_overcommit(struct btrfs_root *root,  		       BTRFS_BLOCK_GROUP_RAID10))  		avail >>= 1; +	to_add = space_info->total_bytes; +  	/*  	 * If we aren't flushing all things, let us overcommit up to  	 * 1/2th of the space. If we can flush, don't let us overcommit  	 * too much, let it overcommit up to 1/8 of the space.  	 */  	if (flush == BTRFS_RESERVE_FLUSH_ALL) -		avail >>= 3; +		to_add >>= 3;  	else -		avail >>= 1; +		to_add >>= 1; -	if (used + bytes < space_info->total_bytes + avail) +	/* +	 * Limit the overcommit to the amount of free space we could possibly +	 * allocate for chunks. +	 */ +	to_add = min(avail, to_add); + +	if (used + bytes < space_info->total_bytes + to_add)  		return 1;  	return 0;  } -static int writeback_inodes_sb_nr_if_idle_safe(struct super_block *sb, -					       unsigned long nr_pages, -					       enum wb_reason reason) +static inline int writeback_inodes_sb_nr_if_idle_safe(struct super_block *sb, +						      unsigned long nr_pages, +						      enum wb_reason reason)  { -	if (!writeback_in_progress(sb->s_bdi) && -	    down_read_trylock(&sb->s_umount)) { +	/* the flusher is dealing with the dirty inodes now. */ +	if (writeback_in_progress(sb->s_bdi)) +		return 1; + +	if (down_read_trylock(&sb->s_umount)) {  		writeback_inodes_sb_nr(sb, nr_pages, reason);  		up_read(&sb->s_umount);  		return 1; @@ -3703,6 +3753,28 @@ static int writeback_inodes_sb_nr_if_idle_safe(struct super_block *sb,  	return 0;  } +void btrfs_writeback_inodes_sb_nr(struct btrfs_root *root, +				  unsigned long nr_pages) +{ +	struct super_block *sb = root->fs_info->sb; +	int started; + +	/* If we can not start writeback, just sync all the delalloc file. */ +	started = writeback_inodes_sb_nr_if_idle_safe(sb, nr_pages, +						      WB_REASON_FS_FREE_SPACE); +	if (!started) { +		/* +		 * We needn't worry the filesystem going from r/w to r/o though +		 * we don't acquire ->s_umount mutex, because the filesystem +		 * should guarantee the delalloc inodes list be empty after +		 * the filesystem is readonly(all dirty pages are written to +		 * the disk). +		 */ +		btrfs_start_delalloc_inodes(root, 0); +		btrfs_wait_ordered_extents(root, 0); +	} +} +  /*   * shrink metadata reservation for delalloc   */ @@ -3724,7 +3796,8 @@ static void shrink_delalloc(struct btrfs_root *root, u64 to_reclaim, u64 orig,  	space_info = block_rsv->space_info;  	smp_mb(); -	delalloc_bytes = root->fs_info->delalloc_bytes; +	delalloc_bytes = percpu_counter_sum_positive( +						&root->fs_info->delalloc_bytes);  	if (delalloc_bytes == 0) {  		if (trans)  			return; @@ -3735,10 +3808,7 @@ static void shrink_delalloc(struct btrfs_root *root, u64 to_reclaim, u64 orig,  	while (delalloc_bytes && loops < 3) {  		max_reclaim = min(delalloc_bytes, to_reclaim);  		nr_pages = max_reclaim >> PAGE_CACHE_SHIFT; -		writeback_inodes_sb_nr_if_idle_safe(root->fs_info->sb, -						    nr_pages, -						    WB_REASON_FS_FREE_SPACE); - +		btrfs_writeback_inodes_sb_nr(root, nr_pages);  		/*  		 * We need to wait for the async pages to actually start before  		 * we do anything. @@ -3766,7 +3836,8 @@ static void shrink_delalloc(struct btrfs_root *root, u64 to_reclaim, u64 orig,  				break;  		}  		smp_mb(); -		delalloc_bytes = root->fs_info->delalloc_bytes; +		delalloc_bytes = percpu_counter_sum_positive( +						&root->fs_info->delalloc_bytes);  	}  } @@ -4030,6 +4101,15 @@ again:  		goto again;  out: +	if (ret == -ENOSPC && +	    unlikely(root->orphan_cleanup_state == ORPHAN_CLEANUP_STARTED)) { +		struct btrfs_block_rsv *global_rsv = +			&root->fs_info->global_block_rsv; + +		if (block_rsv != global_rsv && +		    !block_rsv_use_bytes(global_rsv, orig_bytes)) +			ret = 0; +	}  	if (flushing) {  		spin_lock(&space_info->lock);  		space_info->flush = 0; @@ -4668,7 +4748,8 @@ void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes)  	spin_lock(&BTRFS_I(inode)->lock);  	dropped = drop_outstanding_extent(inode); -	to_free = calc_csum_metadata_size(inode, num_bytes, 0); +	if (num_bytes) +		to_free = calc_csum_metadata_size(inode, num_bytes, 0);  	spin_unlock(&BTRFS_I(inode)->lock);  	if (dropped > 0)  		to_free += btrfs_calc_trans_metadata_size(root, dropped); @@ -4735,8 +4816,7 @@ void btrfs_delalloc_release_space(struct inode *inode, u64 num_bytes)  	btrfs_free_reserved_data_space(inode, num_bytes);  } -static int update_block_group(struct btrfs_trans_handle *trans, -			      struct btrfs_root *root, +static int update_block_group(struct btrfs_root *root,  			      u64 bytenr, u64 num_bytes, int alloc)  {  	struct btrfs_block_group_cache *cache = NULL; @@ -4773,7 +4853,7 @@ static int update_block_group(struct btrfs_trans_handle *trans,  		 * space back to the block group, otherwise we will leak space.  		 */  		if (!alloc && cache->cached == BTRFS_CACHE_NO) -			cache_block_group(cache, trans, NULL, 1); +			cache_block_group(cache, 1);  		byte_in_group = bytenr - cache->key.objectid;  		WARN_ON(byte_in_group > cache->key.offset); @@ -4823,6 +4903,13 @@ static u64 first_logical_byte(struct btrfs_root *root, u64 search_start)  	struct btrfs_block_group_cache *cache;  	u64 bytenr; +	spin_lock(&root->fs_info->block_group_cache_lock); +	bytenr = root->fs_info->first_logical_byte; +	spin_unlock(&root->fs_info->block_group_cache_lock); + +	if (bytenr < (u64)-1) +		return bytenr; +  	cache = btrfs_lookup_first_block_group(root->fs_info, search_start);  	if (!cache)  		return 0; @@ -4873,8 +4960,7 @@ int btrfs_pin_extent(struct btrfs_root *root,  /*   * this function must be called within transaction   */ -int btrfs_pin_extent_for_log_replay(struct btrfs_trans_handle *trans, -				    struct btrfs_root *root, +int btrfs_pin_extent_for_log_replay(struct btrfs_root *root,  				    u64 bytenr, u64 num_bytes)  {  	struct btrfs_block_group_cache *cache; @@ -4888,7 +4974,7 @@ int btrfs_pin_extent_for_log_replay(struct btrfs_trans_handle *trans,  	 * to one because the slow code to read in the free extents does check  	 * the pinned extents.  	 */ -	cache_block_group(cache, trans, root, 1); +	cache_block_group(cache, 1);  	pin_down_extent(root, cache, bytenr, num_bytes, 0); @@ -5285,7 +5371,7 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,  			}  		} -		ret = update_block_group(trans, root, bytenr, num_bytes, 0); +		ret = update_block_group(root, bytenr, num_bytes, 0);  		if (ret) {  			btrfs_abort_transaction(trans, extent_root, ret);  			goto out; @@ -5330,7 +5416,7 @@ static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans,  	if (head->extent_op) {  		if (!head->must_insert_reserved)  			goto out; -		kfree(head->extent_op); +		btrfs_free_delayed_extent_op(head->extent_op);  		head->extent_op = NULL;  	} @@ -5476,7 +5562,6 @@ wait_block_group_cache_progress(struct btrfs_block_group_cache *cache,  				u64 num_bytes)  {  	struct btrfs_caching_control *caching_ctl; -	DEFINE_WAIT(wait);  	caching_ctl = get_caching_control(cache);  	if (!caching_ctl) @@ -5493,7 +5578,6 @@ static noinline int  wait_block_group_cache_done(struct btrfs_block_group_cache *cache)  {  	struct btrfs_caching_control *caching_ctl; -	DEFINE_WAIT(wait);  	caching_ctl = get_caching_control(cache);  	if (!caching_ctl) @@ -5507,20 +5591,16 @@ wait_block_group_cache_done(struct btrfs_block_group_cache *cache)  int __get_raid_index(u64 flags)  { -	int index; -  	if (flags & BTRFS_BLOCK_GROUP_RAID10) -		index = 0; +		return BTRFS_RAID_RAID10;  	else if (flags & BTRFS_BLOCK_GROUP_RAID1) -		index = 1; +		return BTRFS_RAID_RAID1;  	else if (flags & BTRFS_BLOCK_GROUP_DUP) -		index = 2; +		return BTRFS_RAID_DUP;  	else if (flags & BTRFS_BLOCK_GROUP_RAID0) -		index = 3; +		return BTRFS_RAID_RAID0;  	else -		index = 4; - -	return index; +		return BTRFS_RAID_SINGLE;  }  static int get_block_group_index(struct btrfs_block_group_cache *cache) @@ -5678,8 +5758,7 @@ have_block_group:  		cached = block_group_cache_done(block_group);  		if (unlikely(!cached)) {  			found_uncached_bg = true; -			ret = cache_block_group(block_group, trans, -						orig_root, 0); +			ret = cache_block_group(block_group, 0);  			BUG_ON(ret < 0);  			ret = 0;  		} @@ -6108,7 +6187,7 @@ static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,  	btrfs_mark_buffer_dirty(path->nodes[0]);  	btrfs_free_path(path); -	ret = update_block_group(trans, root, ins->objectid, ins->offset, 1); +	ret = update_block_group(root, ins->objectid, ins->offset, 1);  	if (ret) { /* -ENOENT, logic error */  		printk(KERN_ERR "btrfs update block group failed for %llu "  		       "%llu\n", (unsigned long long)ins->objectid, @@ -6172,7 +6251,7 @@ static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,  	btrfs_mark_buffer_dirty(leaf);  	btrfs_free_path(path); -	ret = update_block_group(trans, root, ins->objectid, ins->offset, 1); +	ret = update_block_group(root, ins->objectid, ins->offset, 1);  	if (ret) { /* -ENOENT, logic error */  		printk(KERN_ERR "btrfs update block group failed for %llu "  		       "%llu\n", (unsigned long long)ins->objectid, @@ -6215,7 +6294,7 @@ int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,  	u64 num_bytes = ins->offset;  	block_group = btrfs_lookup_block_group(root->fs_info, ins->objectid); -	cache_block_group(block_group, trans, NULL, 0); +	cache_block_group(block_group, 0);  	caching_ctl = get_caching_control(block_group);  	if (!caching_ctl) { @@ -6329,12 +6408,14 @@ use_block_rsv(struct btrfs_trans_handle *trans,  	if (!ret)  		return block_rsv;  	if (ret && !block_rsv->failfast) { -		static DEFINE_RATELIMIT_STATE(_rs, -				DEFAULT_RATELIMIT_INTERVAL, -				/*DEFAULT_RATELIMIT_BURST*/ 2); -		if (__ratelimit(&_rs)) -			WARN(1, KERN_DEBUG "btrfs: block rsv returned %d\n", -			     ret); +		if (btrfs_test_opt(root, ENOSPC_DEBUG)) { +			static DEFINE_RATELIMIT_STATE(_rs, +					DEFAULT_RATELIMIT_INTERVAL * 10, +					/*DEFAULT_RATELIMIT_BURST*/ 1); +			if (__ratelimit(&_rs)) +				WARN(1, KERN_DEBUG +					"btrfs: block rsv returned %d\n", ret); +		}  		ret = reserve_metadata_bytes(root, block_rsv, blocksize,  					     BTRFS_RESERVE_NO_FLUSH);  		if (!ret) { @@ -6400,7 +6481,7 @@ struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans,  	if (root_objectid != BTRFS_TREE_LOG_OBJECTID) {  		struct btrfs_delayed_extent_op *extent_op; -		extent_op = kmalloc(sizeof(*extent_op), GFP_NOFS); +		extent_op = btrfs_alloc_delayed_extent_op();  		BUG_ON(!extent_op); /* -ENOMEM */  		if (key)  			memcpy(&extent_op->key, key, sizeof(extent_op->key)); @@ -7481,16 +7562,16 @@ int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr)  		index = get_block_group_index(block_group);  	} -	if (index == 0) { +	if (index == BTRFS_RAID_RAID10) {  		dev_min = 4;  		/* Divide by 2 */  		min_free >>= 1; -	} else if (index == 1) { +	} else if (index == BTRFS_RAID_RAID1) {  		dev_min = 2; -	} else if (index == 2) { +	} else if (index == BTRFS_RAID_DUP) {  		/* Multiply by 2 */  		min_free <<= 1; -	} else if (index == 3) { +	} else if (index == BTRFS_RAID_RAID0) {  		dev_min = fs_devices->rw_devices;  		do_div(min_free, dev_min);  	} @@ -7651,11 +7732,13 @@ int btrfs_free_block_groups(struct btrfs_fs_info *info)  		space_info = list_entry(info->space_info.next,  					struct btrfs_space_info,  					list); -		if (space_info->bytes_pinned > 0 || -		    space_info->bytes_reserved > 0 || -		    space_info->bytes_may_use > 0) { -			WARN_ON(1); -			dump_space_info(space_info, 0, 0); +		if (btrfs_test_opt(info->tree_root, ENOSPC_DEBUG)) { +			if (space_info->bytes_pinned > 0 || +			    space_info->bytes_reserved > 0 || +			    space_info->bytes_may_use > 0) { +				WARN_ON(1); +				dump_space_info(space_info, 0, 0); +			}  		}  		list_del(&space_info->list);  		kfree(space_info); @@ -7932,12 +8015,14 @@ static void clear_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)  	u64 extra_flags = chunk_to_extended(flags) &  				BTRFS_EXTENDED_PROFILE_MASK; +	write_seqlock(&fs_info->profiles_lock);  	if (flags & BTRFS_BLOCK_GROUP_DATA)  		fs_info->avail_data_alloc_bits &= ~extra_flags;  	if (flags & BTRFS_BLOCK_GROUP_METADATA)  		fs_info->avail_metadata_alloc_bits &= ~extra_flags;  	if (flags & BTRFS_BLOCK_GROUP_SYSTEM)  		fs_info->avail_system_alloc_bits &= ~extra_flags; +	write_sequnlock(&fs_info->profiles_lock);  }  int btrfs_remove_block_group(struct btrfs_trans_handle *trans, @@ -8036,6 +8121,9 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,  	spin_lock(&root->fs_info->block_group_cache_lock);  	rb_erase(&block_group->cache_node,  		 &root->fs_info->block_group_cache_tree); + +	if (root->fs_info->first_logical_byte == block_group->key.objectid) +		root->fs_info->first_logical_byte = (u64)-1;  	spin_unlock(&root->fs_info->block_group_cache_lock);  	down_write(&block_group->space_info->groups_sem); @@ -8158,7 +8246,7 @@ int btrfs_trim_fs(struct btrfs_root *root, struct fstrim_range *range)  		if (end - start >= range->minlen) {  			if (!block_group_cache_done(cache)) { -				ret = cache_block_group(cache, NULL, root, 0); +				ret = cache_block_group(cache, 0);  				if (!ret)  					wait_block_group_cache_done(cache);  			}  |