diff options
Diffstat (limited to 'fs/btrfs/free-space-cache.c')
| -rw-r--r-- | fs/btrfs/free-space-cache.c | 82 | 
1 files changed, 63 insertions, 19 deletions
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index 59ea2e4349c..1f84fc09c1a 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -1356,6 +1356,8 @@ static void recalculate_thresholds(struct btrfs_free_space_ctl *ctl)  	u64 bytes_per_bg = BITS_PER_BITMAP * ctl->unit;  	int max_bitmaps = div64_u64(size + bytes_per_bg - 1, bytes_per_bg); +	max_bitmaps = max(max_bitmaps, 1); +  	BUG_ON(ctl->total_bitmaps > max_bitmaps);  	/* @@ -1463,10 +1465,14 @@ static int search_bitmap(struct btrfs_free_space_ctl *ctl,  }  static struct btrfs_free_space * -find_free_space(struct btrfs_free_space_ctl *ctl, u64 *offset, u64 *bytes) +find_free_space(struct btrfs_free_space_ctl *ctl, u64 *offset, u64 *bytes, +		unsigned long align)  {  	struct btrfs_free_space *entry;  	struct rb_node *node; +	u64 ctl_off; +	u64 tmp; +	u64 align_off;  	int ret;  	if (!ctl->free_space_offset.rb_node) @@ -1481,15 +1487,34 @@ find_free_space(struct btrfs_free_space_ctl *ctl, u64 *offset, u64 *bytes)  		if (entry->bytes < *bytes)  			continue; +		/* make sure the space returned is big enough +		 * to match our requested alignment +		 */ +		if (*bytes >= align) { +			ctl_off = entry->offset - ctl->start; +			tmp = ctl_off + align - 1;; +			do_div(tmp, align); +			tmp = tmp * align + ctl->start; +			align_off = tmp - entry->offset; +		} else { +			align_off = 0; +			tmp = entry->offset; +		} + +		if (entry->bytes < *bytes + align_off) +			continue; +  		if (entry->bitmap) { -			ret = search_bitmap(ctl, entry, offset, bytes); -			if (!ret) +			ret = search_bitmap(ctl, entry, &tmp, bytes); +			if (!ret) { +				*offset = tmp;  				return entry; +			}  			continue;  		} -		*offset = entry->offset; -		*bytes = entry->bytes; +		*offset = tmp; +		*bytes = entry->bytes - align_off;  		return entry;  	} @@ -1636,10 +1661,14 @@ static bool use_bitmap(struct btrfs_free_space_ctl *ctl,  	}  	/* -	 * some block groups are so tiny they can't be enveloped by a bitmap, so -	 * don't even bother to create a bitmap for this +	 * The original block groups from mkfs can be really small, like 8 +	 * megabytes, so don't bother with a bitmap for those entries.  However +	 * some block groups can be smaller than what a bitmap would cover but +	 * are still large enough that they could overflow the 32k memory limit, +	 * so allow those block groups to still be allowed to have a bitmap +	 * entry.  	 */ -	if (BITS_PER_BITMAP * ctl->unit > block_group->key.offset) +	if (((BITS_PER_BITMAP * ctl->unit) >> 1) > block_group->key.offset)  		return false;  	return true; @@ -1862,11 +1891,13 @@ int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group,  {  	struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;  	struct btrfs_free_space *info; -	int ret = 0; +	int ret; +	bool re_search = false;  	spin_lock(&ctl->tree_lock);  again: +	ret = 0;  	if (!bytes)  		goto out_lock; @@ -1879,17 +1910,17 @@ again:  		info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset),  					  1, 0);  		if (!info) { -			/* the tree logging code might be calling us before we -			 * have fully loaded the free space rbtree for this -			 * block group.  So it is possible the entry won't -			 * be in the rbtree yet at all.  The caching code -			 * will make sure not to put it in the rbtree if -			 * the logging code has pinned it. +			/* +			 * If we found a partial bit of our free space in a +			 * bitmap but then couldn't find the other part this may +			 * be a problem, so WARN about it.  			 */ +			WARN_ON(re_search);  			goto out_lock;  		}  	} +	re_search = false;  	if (!info->bitmap) {  		unlink_free_space(ctl, info);  		if (offset == info->offset) { @@ -1935,8 +1966,10 @@ again:  	}  	ret = remove_from_bitmap(ctl, info, &offset, &bytes); -	if (ret == -EAGAIN) +	if (ret == -EAGAIN) { +		re_search = true;  		goto again; +	}  	BUG_ON(ret); /* logic error */  out_lock:  	spin_unlock(&ctl->tree_lock); @@ -2091,9 +2124,12 @@ u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group,  	struct btrfs_free_space *entry = NULL;  	u64 bytes_search = bytes + empty_size;  	u64 ret = 0; +	u64 align_gap = 0; +	u64 align_gap_len = 0;  	spin_lock(&ctl->tree_lock); -	entry = find_free_space(ctl, &offset, &bytes_search); +	entry = find_free_space(ctl, &offset, &bytes_search, +				block_group->full_stripe_len);  	if (!entry)  		goto out; @@ -2103,9 +2139,15 @@ u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group,  		if (!entry->bytes)  			free_bitmap(ctl, entry);  	} else { +  		unlink_free_space(ctl, entry); -		entry->offset += bytes; -		entry->bytes -= bytes; +		align_gap_len = offset - entry->offset; +		align_gap = entry->offset; + +		entry->offset = offset + bytes; +		WARN_ON(entry->bytes < bytes + align_gap_len); + +		entry->bytes -= bytes + align_gap_len;  		if (!entry->bytes)  			kmem_cache_free(btrfs_free_space_cachep, entry);  		else @@ -2115,6 +2157,8 @@ u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group,  out:  	spin_unlock(&ctl->tree_lock); +	if (align_gap_len) +		__btrfs_add_free_space(ctl, align_gap, align_gap_len);  	return ret;  }  |