diff options
Diffstat (limited to 'fs/ext4/mballoc.c')
| -rw-r--r-- | fs/ext4/mballoc.c | 77 | 
1 files changed, 26 insertions, 51 deletions
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c index 1bf6fe785c4..7bb713a46fe 100644 --- a/fs/ext4/mballoc.c +++ b/fs/ext4/mballoc.c @@ -23,11 +23,18 @@  #include "ext4_jbd2.h"  #include "mballoc.h" -#include <linux/debugfs.h>  #include <linux/log2.h> +#include <linux/module.h>  #include <linux/slab.h>  #include <trace/events/ext4.h> +#ifdef CONFIG_EXT4_DEBUG +ushort ext4_mballoc_debug __read_mostly; + +module_param_named(mballoc_debug, ext4_mballoc_debug, ushort, 0644); +MODULE_PARM_DESC(mballoc_debug, "Debugging level for ext4's mballoc"); +#endif +  /*   * MUSTDO:   *   - test ext4_ext_search_left() and ext4_ext_search_right() @@ -1884,15 +1891,19 @@ static int ext4_mb_good_group(struct ext4_allocation_context *ac,  	case 0:  		BUG_ON(ac->ac_2order == 0); -		if (grp->bb_largest_free_order < ac->ac_2order) -			return 0; -  		/* Avoid using the first bg of a flexgroup for data files */  		if ((ac->ac_flags & EXT4_MB_HINT_DATA) &&  		    (flex_size >= EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME) &&  		    ((group % flex_size) == 0))  			return 0; +		if ((ac->ac_2order > ac->ac_sb->s_blocksize_bits+1) || +		    (free / fragments) >= ac->ac_g_ex.fe_len) +			return 1; + +		if (grp->bb_largest_free_order < ac->ac_2order) +			return 0; +  		return 1;  	case 1:  		if ((free / fragments) >= ac->ac_g_ex.fe_len) @@ -2007,7 +2018,7 @@ repeat:  			}  			ac->ac_groups_scanned++; -			if (cr == 0) +			if (cr == 0 && ac->ac_2order < sb->s_blocksize_bits+2)  				ext4_mb_simple_scan_group(ac, &e4b);  			else if (cr == 1 && sbi->s_stripe &&  					!(ac->ac_g_ex.fe_len % sbi->s_stripe)) @@ -2656,40 +2667,6 @@ static void ext4_free_data_callback(struct super_block *sb,  	mb_debug(1, "freed %u blocks in %u structures\n", count, count2);  } -#ifdef CONFIG_EXT4_DEBUG -u8 mb_enable_debug __read_mostly; - -static struct dentry *debugfs_dir; -static struct dentry *debugfs_debug; - -static void __init ext4_create_debugfs_entry(void) -{ -	debugfs_dir = debugfs_create_dir("ext4", NULL); -	if (debugfs_dir) -		debugfs_debug = debugfs_create_u8("mballoc-debug", -						  S_IRUGO | S_IWUSR, -						  debugfs_dir, -						  &mb_enable_debug); -} - -static void ext4_remove_debugfs_entry(void) -{ -	debugfs_remove(debugfs_debug); -	debugfs_remove(debugfs_dir); -} - -#else - -static void __init ext4_create_debugfs_entry(void) -{ -} - -static void ext4_remove_debugfs_entry(void) -{ -} - -#endif -  int __init ext4_init_mballoc(void)  {  	ext4_pspace_cachep = KMEM_CACHE(ext4_prealloc_space, @@ -2711,7 +2688,6 @@ int __init ext4_init_mballoc(void)  		kmem_cache_destroy(ext4_ac_cachep);  		return -ENOMEM;  	} -	ext4_create_debugfs_entry();  	return 0;  } @@ -2726,7 +2702,6 @@ void ext4_exit_mballoc(void)  	kmem_cache_destroy(ext4_ac_cachep);  	kmem_cache_destroy(ext4_free_data_cachep);  	ext4_groupinfo_destroy_slabs(); -	ext4_remove_debugfs_entry();  } @@ -3444,7 +3419,7 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)  			win = offs;  		ac->ac_b_ex.fe_logical = ac->ac_o_ex.fe_logical - -			EXT4_B2C(sbi, win); +			EXT4_NUM_B2C(sbi, win);  		BUG_ON(ac->ac_o_ex.fe_logical < ac->ac_b_ex.fe_logical);  		BUG_ON(ac->ac_o_ex.fe_len > ac->ac_b_ex.fe_len);  	} @@ -3872,7 +3847,7 @@ static void ext4_mb_show_ac(struct ext4_allocation_context *ac)  	struct super_block *sb = ac->ac_sb;  	ext4_group_t ngroups, i; -	if (!mb_enable_debug || +	if (!ext4_mballoc_debug ||  	    (EXT4_SB(sb)->s_mount_flags & EXT4_MF_FS_ABORTED))  		return; @@ -4005,8 +3980,8 @@ ext4_mb_initialize_context(struct ext4_allocation_context *ac,  	len = ar->len;  	/* just a dirty hack to filter too big requests  */ -	if (len >= EXT4_CLUSTERS_PER_GROUP(sb) - 10) -		len = EXT4_CLUSTERS_PER_GROUP(sb) - 10; +	if (len >= EXT4_CLUSTERS_PER_GROUP(sb)) +		len = EXT4_CLUSTERS_PER_GROUP(sb);  	/* start searching from the goal */  	goal = ar->goal; @@ -4136,7 +4111,7 @@ static void ext4_mb_add_n_trim(struct ext4_allocation_context *ac)  		/* The max size of hash table is PREALLOC_TB_SIZE */  		order = PREALLOC_TB_SIZE - 1;  	/* Add the prealloc space to lg */ -	rcu_read_lock(); +	spin_lock(&lg->lg_prealloc_lock);  	list_for_each_entry_rcu(tmp_pa, &lg->lg_prealloc_list[order],  						pa_inode_list) {  		spin_lock(&tmp_pa->pa_lock); @@ -4160,12 +4135,12 @@ static void ext4_mb_add_n_trim(struct ext4_allocation_context *ac)  	if (!added)  		list_add_tail_rcu(&pa->pa_inode_list,  					&lg->lg_prealloc_list[order]); -	rcu_read_unlock(); +	spin_unlock(&lg->lg_prealloc_lock);  	/* Now trim the list to be not more than 8 elements */  	if (lg_prealloc_count > 8) {  		ext4_mb_discard_lg_preallocations(sb, lg, -						order, lg_prealloc_count); +						  order, lg_prealloc_count);  		return;  	}  	return ; @@ -4590,7 +4565,7 @@ do_more:  			EXT4_BLOCKS_PER_GROUP(sb);  		count -= overflow;  	} -	count_clusters = EXT4_B2C(sbi, count); +	count_clusters = EXT4_NUM_B2C(sbi, count);  	bitmap_bh = ext4_read_block_bitmap(sb, block_group);  	if (!bitmap_bh) {  		err = -EIO; @@ -4832,11 +4807,11 @@ int ext4_group_add_blocks(handle_t *handle, struct super_block *sb,  	ext4_group_desc_csum_set(sb, block_group, desc);  	ext4_unlock_group(sb, block_group);  	percpu_counter_add(&sbi->s_freeclusters_counter, -			   EXT4_B2C(sbi, blocks_freed)); +			   EXT4_NUM_B2C(sbi, blocks_freed));  	if (sbi->s_log_groups_per_flex) {  		ext4_group_t flex_group = ext4_flex_group(sbi, block_group); -		atomic_add(EXT4_B2C(sbi, blocks_freed), +		atomic_add(EXT4_NUM_B2C(sbi, blocks_freed),  			   &sbi->s_flex_groups[flex_group].free_clusters);  	}  |