diff options
Diffstat (limited to 'fs')
| -rw-r--r-- | fs/btrfs/btrfs_inode.h | 4 | ||||
| -rw-r--r-- | fs/btrfs/delayed-inode.c | 58 | ||||
| -rw-r--r-- | fs/btrfs/disk-io.c | 42 | ||||
| -rw-r--r-- | fs/btrfs/extent-tree.c | 50 | ||||
| -rw-r--r-- | fs/btrfs/free-space-cache.c | 17 | ||||
| -rw-r--r-- | fs/btrfs/inode-map.c | 28 | ||||
| -rw-r--r-- | fs/btrfs/inode.c | 84 | ||||
| -rw-r--r-- | fs/btrfs/relocation.c | 2 | ||||
| -rw-r--r-- | fs/btrfs/scrub.c | 64 | ||||
| -rw-r--r-- | fs/btrfs/super.c | 49 | ||||
| -rw-r--r-- | fs/btrfs/transaction.c | 4 | ||||
| -rw-r--r-- | fs/btrfs/volumes.c | 5 | ||||
| -rw-r--r-- | fs/hfs/trans.c | 2 | ||||
| -rw-r--r-- | fs/proc/base.c | 142 | ||||
| -rw-r--r-- | fs/xfs/xfs_aops.c | 2 | ||||
| -rw-r--r-- | fs/xfs/xfs_buf_item.c | 2 | ||||
| -rw-r--r-- | fs/xfs/xfs_dquot_item.c | 6 | ||||
| -rw-r--r-- | fs/xfs/xfs_extfree_item.c | 4 | ||||
| -rw-r--r-- | fs/xfs/xfs_inode_item.c | 2 | ||||
| -rw-r--r-- | fs/xfs/xfs_log.c | 2 | ||||
| -rw-r--r-- | fs/xfs/xfs_log.h | 2 | ||||
| -rw-r--r-- | fs/xfs/xfs_trans.h | 6 | ||||
| -rw-r--r-- | fs/xfs/xfs_vnodeops.c | 14 | 
23 files changed, 328 insertions, 263 deletions
diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h index 5a5d325a393..634608d2a6d 100644 --- a/fs/btrfs/btrfs_inode.h +++ b/fs/btrfs/btrfs_inode.h @@ -147,14 +147,12 @@ struct btrfs_inode {  	 * the btrfs file release call will add this inode to the  	 * ordered operations list so that we make sure to flush out any  	 * new data the application may have written before commit. -	 * -	 * yes, its silly to have a single bitflag, but we might grow more -	 * of these.  	 */  	unsigned ordered_data_close:1;  	unsigned orphan_meta_reserved:1;  	unsigned dummy_inode:1;  	unsigned in_defrag:1; +	unsigned delalloc_meta_reserved:1;  	/*  	 * always compress this one file diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c index 3a1b939c9ae..5b163572e0c 100644 --- a/fs/btrfs/delayed-inode.c +++ b/fs/btrfs/delayed-inode.c @@ -617,12 +617,14 @@ static void btrfs_delayed_item_release_metadata(struct btrfs_root *root,  static int btrfs_delayed_inode_reserve_metadata(  					struct btrfs_trans_handle *trans,  					struct btrfs_root *root, +					struct inode *inode,  					struct btrfs_delayed_node *node)  {  	struct btrfs_block_rsv *src_rsv;  	struct btrfs_block_rsv *dst_rsv;  	u64 num_bytes;  	int ret; +	int release = false;  	src_rsv = trans->block_rsv;  	dst_rsv = &root->fs_info->delayed_block_rsv; @@ -652,12 +654,65 @@ static int btrfs_delayed_inode_reserve_metadata(  		if (!ret)  			node->bytes_reserved = num_bytes;  		return ret; +	} else if (src_rsv == &root->fs_info->delalloc_block_rsv) { +		spin_lock(&BTRFS_I(inode)->lock); +		if (BTRFS_I(inode)->delalloc_meta_reserved) { +			BTRFS_I(inode)->delalloc_meta_reserved = 0; +			spin_unlock(&BTRFS_I(inode)->lock); +			release = true; +			goto migrate; +		} +		spin_unlock(&BTRFS_I(inode)->lock); + +		/* Ok we didn't have space pre-reserved.  This shouldn't happen +		 * too often but it can happen if we do delalloc to an existing +		 * inode which gets dirtied because of the time update, and then +		 * isn't touched again until after the transaction commits and +		 * then we try to write out the data.  First try to be nice and +		 * reserve something strictly for us.  If not be a pain and try +		 * to steal from the delalloc block rsv. +		 */ +		ret = btrfs_block_rsv_add_noflush(root, dst_rsv, num_bytes); +		if (!ret) +			goto out; + +		ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes); +		if (!ret) +			goto out; + +		/* +		 * Ok this is a problem, let's just steal from the global rsv +		 * since this really shouldn't happen that often. +		 */ +		WARN_ON(1); +		ret = btrfs_block_rsv_migrate(&root->fs_info->global_block_rsv, +					      dst_rsv, num_bytes); +		goto out;  	} +migrate:  	ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes); + +out: +	/* +	 * Migrate only takes a reservation, it doesn't touch the size of the +	 * block_rsv.  This is to simplify people who don't normally have things +	 * migrated from their block rsv.  If they go to release their +	 * reservation, that will decrease the size as well, so if migrate +	 * reduced size we'd end up with a negative size.  But for the +	 * delalloc_meta_reserved stuff we will only know to drop 1 reservation, +	 * but we could in fact do this reserve/migrate dance several times +	 * between the time we did the original reservation and we'd clean it +	 * up.  So to take care of this, release the space for the meta +	 * reservation here.  I think it may be time for a documentation page on +	 * how block rsvs. work. +	 */  	if (!ret)  		node->bytes_reserved = num_bytes; +	if (release) +		btrfs_block_rsv_release(root, src_rsv, num_bytes); +  	return ret;  } @@ -1708,7 +1763,8 @@ int btrfs_delayed_update_inode(struct btrfs_trans_handle *trans,  		goto release_node;  	} -	ret = btrfs_delayed_inode_reserve_metadata(trans, root, delayed_node); +	ret = btrfs_delayed_inode_reserve_metadata(trans, root, inode, +						   delayed_node);  	if (ret)  		goto release_node; diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 102c176fc29..62afe5c5694 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -1890,31 +1890,32 @@ struct btrfs_root *open_ctree(struct super_block *sb,  	u64 features;  	struct btrfs_key location;  	struct buffer_head *bh; -	struct btrfs_root *extent_root = kzalloc(sizeof(struct btrfs_root), -						 GFP_NOFS); -	struct btrfs_root *csum_root = kzalloc(sizeof(struct btrfs_root), -						 GFP_NOFS); +	struct btrfs_super_block *disk_super;  	struct btrfs_root *tree_root = btrfs_sb(sb); -	struct btrfs_fs_info *fs_info = NULL; -	struct btrfs_root *chunk_root = kzalloc(sizeof(struct btrfs_root), -						GFP_NOFS); -	struct btrfs_root *dev_root = kzalloc(sizeof(struct btrfs_root), -					      GFP_NOFS); +	struct btrfs_fs_info *fs_info = tree_root->fs_info; +	struct btrfs_root *extent_root; +	struct btrfs_root *csum_root; +	struct btrfs_root *chunk_root; +	struct btrfs_root *dev_root;  	struct btrfs_root *log_tree_root; -  	int ret;  	int err = -EINVAL;  	int num_backups_tried = 0;  	int backup_index = 0; -	struct btrfs_super_block *disk_super; +	extent_root = fs_info->extent_root = +		kzalloc(sizeof(struct btrfs_root), GFP_NOFS); +	csum_root = fs_info->csum_root = +		kzalloc(sizeof(struct btrfs_root), GFP_NOFS); +	chunk_root = fs_info->chunk_root = +		kzalloc(sizeof(struct btrfs_root), GFP_NOFS); +	dev_root = fs_info->dev_root = +		kzalloc(sizeof(struct btrfs_root), GFP_NOFS); -	if (!extent_root || !tree_root || !tree_root->fs_info || -	    !chunk_root || !dev_root || !csum_root) { +	if (!extent_root || !csum_root || !chunk_root || !dev_root) {  		err = -ENOMEM;  		goto fail;  	} -	fs_info = tree_root->fs_info;  	ret = init_srcu_struct(&fs_info->subvol_srcu);  	if (ret) { @@ -1954,12 +1955,6 @@ struct btrfs_root *open_ctree(struct super_block *sb,  	mutex_init(&fs_info->reloc_mutex);  	init_completion(&fs_info->kobj_unregister); -	fs_info->tree_root = tree_root; -	fs_info->extent_root = extent_root; -	fs_info->csum_root = csum_root; -	fs_info->chunk_root = chunk_root; -	fs_info->dev_root = dev_root; -	fs_info->fs_devices = fs_devices;  	INIT_LIST_HEAD(&fs_info->dirty_cowonly_roots);  	INIT_LIST_HEAD(&fs_info->space_info);  	btrfs_mapping_init(&fs_info->mapping_tree); @@ -2465,21 +2460,20 @@ fail_sb_buffer:  	btrfs_stop_workers(&fs_info->caching_workers);  fail_alloc:  fail_iput: +	btrfs_mapping_tree_free(&fs_info->mapping_tree); +  	invalidate_inode_pages2(fs_info->btree_inode->i_mapping);  	iput(fs_info->btree_inode); - -	btrfs_close_devices(fs_info->fs_devices); -	btrfs_mapping_tree_free(&fs_info->mapping_tree);  fail_bdi:  	bdi_destroy(&fs_info->bdi);  fail_srcu:  	cleanup_srcu_struct(&fs_info->subvol_srcu);  fail: +	btrfs_close_devices(fs_info->fs_devices);  	free_fs_info(fs_info);  	return ERR_PTR(err);  recovery_tree_root: -  	if (!btrfs_test_opt(tree_root, RECOVERY))  		goto fail_tree_roots; diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 9879bd47463..b232150b5b6 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -3797,16 +3797,16 @@ void btrfs_free_block_rsv(struct btrfs_root *root,  	kfree(rsv);  } -int btrfs_block_rsv_add(struct btrfs_root *root, -			struct btrfs_block_rsv *block_rsv, -			u64 num_bytes) +static inline int __block_rsv_add(struct btrfs_root *root, +				  struct btrfs_block_rsv *block_rsv, +				  u64 num_bytes, int flush)  {  	int ret;  	if (num_bytes == 0)  		return 0; -	ret = reserve_metadata_bytes(root, block_rsv, num_bytes, 1); +	ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);  	if (!ret) {  		block_rsv_add_bytes(block_rsv, num_bytes, 1);  		return 0; @@ -3815,22 +3815,18 @@ int btrfs_block_rsv_add(struct btrfs_root *root,  	return ret;  } +int btrfs_block_rsv_add(struct btrfs_root *root, +			struct btrfs_block_rsv *block_rsv, +			u64 num_bytes) +{ +	return __block_rsv_add(root, block_rsv, num_bytes, 1); +} +  int btrfs_block_rsv_add_noflush(struct btrfs_root *root,  				struct btrfs_block_rsv *block_rsv,  				u64 num_bytes)  { -	int ret; - -	if (num_bytes == 0) -		return 0; - -	ret = reserve_metadata_bytes(root, block_rsv, num_bytes, 0); -	if (!ret) { -		block_rsv_add_bytes(block_rsv, num_bytes, 1); -		return 0; -	} - -	return ret; +	return __block_rsv_add(root, block_rsv, num_bytes, 0);  }  int btrfs_block_rsv_check(struct btrfs_root *root, @@ -4064,23 +4060,30 @@ int btrfs_snap_reserve_metadata(struct btrfs_trans_handle *trans,   */  static unsigned drop_outstanding_extent(struct inode *inode)  { +	unsigned drop_inode_space = 0;  	unsigned dropped_extents = 0;  	BUG_ON(!BTRFS_I(inode)->outstanding_extents);  	BTRFS_I(inode)->outstanding_extents--; +	if (BTRFS_I(inode)->outstanding_extents == 0 && +	    BTRFS_I(inode)->delalloc_meta_reserved) { +		drop_inode_space = 1; +		BTRFS_I(inode)->delalloc_meta_reserved = 0; +	} +  	/*  	 * If we have more or the same amount of outsanding extents than we have  	 * reserved then we need to leave the reserved extents count alone.  	 */  	if (BTRFS_I(inode)->outstanding_extents >=  	    BTRFS_I(inode)->reserved_extents) -		return 0; +		return drop_inode_space;  	dropped_extents = BTRFS_I(inode)->reserved_extents -  		BTRFS_I(inode)->outstanding_extents;  	BTRFS_I(inode)->reserved_extents -= dropped_extents; -	return dropped_extents; +	return dropped_extents + drop_inode_space;  }  /** @@ -4166,9 +4169,18 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)  		nr_extents = BTRFS_I(inode)->outstanding_extents -  			BTRFS_I(inode)->reserved_extents;  		BTRFS_I(inode)->reserved_extents += nr_extents; +	} -		to_reserve = btrfs_calc_trans_metadata_size(root, nr_extents); +	/* +	 * Add an item to reserve for updating the inode when we complete the +	 * delalloc io. +	 */ +	if (!BTRFS_I(inode)->delalloc_meta_reserved) { +		nr_extents++; +		BTRFS_I(inode)->delalloc_meta_reserved = 1;  	} + +	to_reserve = btrfs_calc_trans_metadata_size(root, nr_extents);  	to_reserve += calc_csum_metadata_size(inode, num_bytes, 1);  	spin_unlock(&BTRFS_I(inode)->lock); diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index 7a15fcfb3e1..181760f9d2a 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -537,6 +537,13 @@ static int io_ctl_read_entry(struct io_ctl *io_ctl,  			    struct btrfs_free_space *entry, u8 *type)  {  	struct btrfs_free_space_entry *e; +	int ret; + +	if (!io_ctl->cur) { +		ret = io_ctl_check_crc(io_ctl, io_ctl->index); +		if (ret) +			return ret; +	}  	e = io_ctl->cur;  	entry->offset = le64_to_cpu(e->offset); @@ -550,10 +557,7 @@ static int io_ctl_read_entry(struct io_ctl *io_ctl,  	io_ctl_unmap_page(io_ctl); -	if (io_ctl->index >= io_ctl->num_pages) -		return 0; - -	return io_ctl_check_crc(io_ctl, io_ctl->index); +	return 0;  }  static int io_ctl_read_bitmap(struct io_ctl *io_ctl, @@ -561,9 +565,6 @@ static int io_ctl_read_bitmap(struct io_ctl *io_ctl,  {  	int ret; -	if (io_ctl->cur && io_ctl->cur != io_ctl->orig) -		io_ctl_unmap_page(io_ctl); -  	ret = io_ctl_check_crc(io_ctl, io_ctl->index);  	if (ret)  		return ret; @@ -699,6 +700,8 @@ int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,  		num_entries--;  	} +	io_ctl_unmap_page(&io_ctl); +  	/*  	 * We add the bitmaps at the end of the entries in order that  	 * the bitmap entries are added to the cache. diff --git a/fs/btrfs/inode-map.c b/fs/btrfs/inode-map.c index 53dcbdf446c..f8962a957d6 100644 --- a/fs/btrfs/inode-map.c +++ b/fs/btrfs/inode-map.c @@ -398,6 +398,8 @@ int btrfs_save_ino_cache(struct btrfs_root *root,  	struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;  	struct btrfs_path *path;  	struct inode *inode; +	struct btrfs_block_rsv *rsv; +	u64 num_bytes;  	u64 alloc_hint = 0;  	int ret;  	int prealloc; @@ -421,11 +423,26 @@ int btrfs_save_ino_cache(struct btrfs_root *root,  	if (!path)  		return -ENOMEM; +	rsv = trans->block_rsv; +	trans->block_rsv = &root->fs_info->trans_block_rsv; + +	num_bytes = trans->bytes_reserved; +	/* +	 * 1 item for inode item insertion if need +	 * 3 items for inode item update (in the worst case) +	 * 1 item for free space object +	 * 3 items for pre-allocation +	 */ +	trans->bytes_reserved = btrfs_calc_trans_metadata_size(root, 8); +	ret = btrfs_block_rsv_add_noflush(root, trans->block_rsv, +					  trans->bytes_reserved); +	if (ret) +		goto out;  again:  	inode = lookup_free_ino_inode(root, path);  	if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) {  		ret = PTR_ERR(inode); -		goto out; +		goto out_release;  	}  	if (IS_ERR(inode)) { @@ -434,7 +451,7 @@ again:  		ret = create_free_ino_inode(root, trans, path);  		if (ret) -			goto out; +			goto out_release;  		goto again;  	} @@ -477,11 +494,14 @@ again:  	}  	btrfs_free_reserved_data_space(inode, prealloc); +	ret = btrfs_write_out_ino_cache(root, trans, path);  out_put:  	iput(inode); +out_release: +	btrfs_block_rsv_release(root, trans->block_rsv, trans->bytes_reserved);  out: -	if (ret == 0) -		ret = btrfs_write_out_ino_cache(root, trans, path); +	trans->block_rsv = rsv; +	trans->bytes_reserved = num_bytes;  	btrfs_free_path(path);  	return ret; diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 966ddcc4c63..116ab67a06d 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -93,6 +93,8 @@ static noinline int cow_file_range(struct inode *inode,  				   struct page *locked_page,  				   u64 start, u64 end, int *page_started,  				   unsigned long *nr_written, int unlock); +static noinline int btrfs_update_inode_fallback(struct btrfs_trans_handle *trans, +				struct btrfs_root *root, struct inode *inode);  static int btrfs_init_inode_security(struct btrfs_trans_handle *trans,  				     struct inode *inode,  struct inode *dir, @@ -1741,7 +1743,7 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end)  				trans = btrfs_join_transaction(root);  			BUG_ON(IS_ERR(trans));  			trans->block_rsv = &root->fs_info->delalloc_block_rsv; -			ret = btrfs_update_inode(trans, root, inode); +			ret = btrfs_update_inode_fallback(trans, root, inode);  			BUG_ON(ret);  		}  		goto out; @@ -1791,7 +1793,7 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end)  	ret = btrfs_ordered_update_i_size(inode, 0, ordered_extent);  	if (!ret || !test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) { -		ret = btrfs_update_inode(trans, root, inode); +		ret = btrfs_update_inode_fallback(trans, root, inode);  		BUG_ON(ret);  	}  	ret = 0; @@ -2199,6 +2201,9 @@ int btrfs_orphan_cleanup(struct btrfs_root *root)  		if (ret)  			goto out;  	} +	/* release the path since we're done with it */ +	btrfs_release_path(path); +  	root->orphan_cleanup_state = ORPHAN_CLEANUP_DONE;  	if (root->orphan_block_rsv) @@ -2426,7 +2431,7 @@ static void fill_inode_item(struct btrfs_trans_handle *trans,  /*   * copy everything in the in-memory inode into the btree.   */ -noinline int btrfs_update_inode(struct btrfs_trans_handle *trans, +static noinline int btrfs_update_inode_item(struct btrfs_trans_handle *trans,  				struct btrfs_root *root, struct inode *inode)  {  	struct btrfs_inode_item *inode_item; @@ -2434,21 +2439,6 @@ noinline int btrfs_update_inode(struct btrfs_trans_handle *trans,  	struct extent_buffer *leaf;  	int ret; -	/* -	 * If the inode is a free space inode, we can deadlock during commit -	 * if we put it into the delayed code. -	 * -	 * The data relocation inode should also be directly updated -	 * without delay -	 */ -	if (!btrfs_is_free_space_inode(root, inode) -	    && root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID) { -		ret = btrfs_delayed_update_inode(trans, root, inode); -		if (!ret) -			btrfs_set_inode_last_trans(trans, inode); -		return ret; -	} -  	path = btrfs_alloc_path();  	if (!path)  		return -ENOMEM; @@ -2477,6 +2467,43 @@ failed:  }  /* + * copy everything in the in-memory inode into the btree. + */ +noinline int btrfs_update_inode(struct btrfs_trans_handle *trans, +				struct btrfs_root *root, struct inode *inode) +{ +	int ret; + +	/* +	 * If the inode is a free space inode, we can deadlock during commit +	 * if we put it into the delayed code. +	 * +	 * The data relocation inode should also be directly updated +	 * without delay +	 */ +	if (!btrfs_is_free_space_inode(root, inode) +	    && root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID) { +		ret = btrfs_delayed_update_inode(trans, root, inode); +		if (!ret) +			btrfs_set_inode_last_trans(trans, inode); +		return ret; +	} + +	return btrfs_update_inode_item(trans, root, inode); +} + +static noinline int btrfs_update_inode_fallback(struct btrfs_trans_handle *trans, +				struct btrfs_root *root, struct inode *inode) +{ +	int ret; + +	ret = btrfs_update_inode(trans, root, inode); +	if (ret == -ENOSPC) +		return btrfs_update_inode_item(trans, root, inode); +	return ret; +} + +/*   * unlink helper that gets used here in inode.c and in the tree logging   * recovery code.  It remove a link in a directory with a given name, and   * also drops the back refs in the inode to the directory @@ -5632,7 +5659,7 @@ again:  	if (test_bit(BTRFS_ORDERED_NOCOW, &ordered->flags)) {  		ret = btrfs_ordered_update_i_size(inode, 0, ordered);  		if (!ret) -			err = btrfs_update_inode(trans, root, inode); +			err = btrfs_update_inode_fallback(trans, root, inode);  		goto out;  	} @@ -5670,7 +5697,7 @@ again:  	add_pending_csums(trans, inode, ordered->file_offset, &ordered->list);  	ret = btrfs_ordered_update_i_size(inode, 0, ordered);  	if (!ret || !test_bit(BTRFS_ORDERED_PREALLOC, &ordered->flags)) -		btrfs_update_inode(trans, root, inode); +		btrfs_update_inode_fallback(trans, root, inode);  	ret = 0;  out_unlock:  	unlock_extent_cached(&BTRFS_I(inode)->io_tree, ordered->file_offset, @@ -6529,14 +6556,16 @@ end_trans:  		ret = btrfs_orphan_del(NULL, inode);  	} -	trans->block_rsv = &root->fs_info->trans_block_rsv; -	ret = btrfs_update_inode(trans, root, inode); -	if (ret && !err) -		err = ret; +	if (trans) { +		trans->block_rsv = &root->fs_info->trans_block_rsv; +		ret = btrfs_update_inode(trans, root, inode); +		if (ret && !err) +			err = ret; -	nr = trans->blocks_used; -	ret = btrfs_end_transaction_throttle(trans, root); -	btrfs_btree_balance_dirty(root, nr); +		nr = trans->blocks_used; +		ret = btrfs_end_transaction_throttle(trans, root); +		btrfs_btree_balance_dirty(root, nr); +	}  out:  	btrfs_free_block_rsv(root, rsv); @@ -6605,6 +6634,7 @@ struct inode *btrfs_alloc_inode(struct super_block *sb)  	ei->orphan_meta_reserved = 0;  	ei->dummy_inode = 0;  	ei->in_defrag = 0; +	ei->delalloc_meta_reserved = 0;  	ei->force_compress = BTRFS_COMPRESS_NONE;  	ei->delayed_node = NULL; diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c index 24d654ce7a0..dff29d5e151 100644 --- a/fs/btrfs/relocation.c +++ b/fs/btrfs/relocation.c @@ -1174,6 +1174,8 @@ static int clone_backref_node(struct btrfs_trans_handle *trans,  			list_add_tail(&new_edge->list[UPPER],  				      &new_node->lower);  		} +	} else { +		list_add_tail(&new_node->lower, &cache->leaves);  	}  	rb_node = tree_insert(&cache->rb_root, new_node->bytenr, diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c index ed11d3866af..f4190f22edf 100644 --- a/fs/btrfs/scrub.c +++ b/fs/btrfs/scrub.c @@ -944,50 +944,18 @@ static int scrub_checksum_super(struct scrub_bio *sbio, void *buffer)  static int scrub_submit(struct scrub_dev *sdev)  {  	struct scrub_bio *sbio; -	struct bio *bio; -	int i;  	if (sdev->curr == -1)  		return 0;  	sbio = sdev->bios[sdev->curr]; - -	bio = bio_alloc(GFP_NOFS, sbio->count); -	if (!bio) -		goto nomem; - -	bio->bi_private = sbio; -	bio->bi_end_io = scrub_bio_end_io; -	bio->bi_bdev = sdev->dev->bdev; -	bio->bi_sector = sbio->physical >> 9; - -	for (i = 0; i < sbio->count; ++i) { -		struct page *page; -		int ret; - -		page = alloc_page(GFP_NOFS); -		if (!page) -			goto nomem; - -		ret = bio_add_page(bio, page, PAGE_SIZE, 0); -		if (!ret) { -			__free_page(page); -			goto nomem; -		} -	} -  	sbio->err = 0;  	sdev->curr = -1;  	atomic_inc(&sdev->in_flight); -	submit_bio(READ, bio); +	submit_bio(READ, sbio->bio);  	return 0; - -nomem: -	scrub_free_bio(bio); - -	return -ENOMEM;  }  static int scrub_page(struct scrub_dev *sdev, u64 logical, u64 len, @@ -995,6 +963,8 @@ static int scrub_page(struct scrub_dev *sdev, u64 logical, u64 len,  		      u8 *csum, int force)  {  	struct scrub_bio *sbio; +	struct page *page; +	int ret;  again:  	/* @@ -1015,12 +985,22 @@ again:  	}  	sbio = sdev->bios[sdev->curr];  	if (sbio->count == 0) { +		struct bio *bio; +  		sbio->physical = physical;  		sbio->logical = logical; +		bio = bio_alloc(GFP_NOFS, SCRUB_PAGES_PER_BIO); +		if (!bio) +			return -ENOMEM; + +		bio->bi_private = sbio; +		bio->bi_end_io = scrub_bio_end_io; +		bio->bi_bdev = sdev->dev->bdev; +		bio->bi_sector = sbio->physical >> 9; +		sbio->err = 0; +		sbio->bio = bio;  	} else if (sbio->physical + sbio->count * PAGE_SIZE != physical ||  		   sbio->logical + sbio->count * PAGE_SIZE != logical) { -		int ret; -  		ret = scrub_submit(sdev);  		if (ret)  			return ret; @@ -1030,6 +1010,20 @@ again:  	sbio->spag[sbio->count].generation = gen;  	sbio->spag[sbio->count].have_csum = 0;  	sbio->spag[sbio->count].mirror_num = mirror_num; + +	page = alloc_page(GFP_NOFS); +	if (!page) +		return -ENOMEM; + +	ret = bio_add_page(sbio->bio, page, PAGE_SIZE, 0); +	if (!ret) { +		__free_page(page); +		ret = scrub_submit(sdev); +		if (ret) +			return ret; +		goto again; +	} +  	if (csum) {  		sbio->spag[sbio->count].have_csum = 1;  		memcpy(sbio->spag[sbio->count].csum, csum, sdev->csum_size); diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index 57080dffdfc..8bd9d6d0e07 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c @@ -197,7 +197,7 @@ static match_table_t tokens = {  	{Opt_subvolrootid, "subvolrootid=%d"},  	{Opt_defrag, "autodefrag"},  	{Opt_inode_cache, "inode_cache"}, -	{Opt_no_space_cache, "no_space_cache"}, +	{Opt_no_space_cache, "nospace_cache"},  	{Opt_recovery, "recovery"},  	{Opt_err, NULL},  }; @@ -448,6 +448,7 @@ static int btrfs_parse_early_options(const char *options, fmode_t flags,  		token = match_token(p, tokens, args);  		switch (token) {  		case Opt_subvol: +			kfree(*subvol_name);  			*subvol_name = match_strdup(&args[0]);  			break;  		case Opt_subvolid: @@ -710,7 +711,7 @@ static int btrfs_show_options(struct seq_file *seq, struct vfsmount *vfs)  	if (btrfs_test_opt(root, SPACE_CACHE))  		seq_puts(seq, ",space_cache");  	else -		seq_puts(seq, ",no_space_cache"); +		seq_puts(seq, ",nospace_cache");  	if (btrfs_test_opt(root, CLEAR_CACHE))  		seq_puts(seq, ",clear_cache");  	if (btrfs_test_opt(root, USER_SUBVOL_RM_ALLOWED)) @@ -890,7 +891,6 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags,  	struct super_block *s;  	struct dentry *root;  	struct btrfs_fs_devices *fs_devices = NULL; -	struct btrfs_root *tree_root = NULL;  	struct btrfs_fs_info *fs_info = NULL;  	fmode_t mode = FMODE_READ;  	char *subvol_name = NULL; @@ -904,8 +904,10 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags,  	error = btrfs_parse_early_options(data, mode, fs_type,  					  &subvol_name, &subvol_objectid,  					  &subvol_rootid, &fs_devices); -	if (error) +	if (error) { +		kfree(subvol_name);  		return ERR_PTR(error); +	}  	if (subvol_name) {  		root = mount_subvol(subvol_name, flags, device_name, data); @@ -917,15 +919,6 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags,  	if (error)  		return ERR_PTR(error); -	error = btrfs_open_devices(fs_devices, mode, fs_type); -	if (error) -		return ERR_PTR(error); - -	if (!(flags & MS_RDONLY) && fs_devices->rw_devices == 0) { -		error = -EACCES; -		goto error_close_devices; -	} -  	/*  	 * Setup a dummy root and fs_info for test/set super.  This is because  	 * we don't actually fill this stuff out until open_ctree, but we need @@ -933,24 +926,36 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags,  	 * then open_ctree will properly initialize everything later.  	 */  	fs_info = kzalloc(sizeof(struct btrfs_fs_info), GFP_NOFS); -	tree_root = kzalloc(sizeof(struct btrfs_root), GFP_NOFS); -	if (!fs_info || !tree_root) { +	if (!fs_info) +		return ERR_PTR(-ENOMEM); + +	fs_info->tree_root = kzalloc(sizeof(struct btrfs_root), GFP_NOFS); +	if (!fs_info->tree_root) {  		error = -ENOMEM; -		goto error_close_devices; +		goto error_fs_info;  	} -	fs_info->tree_root = tree_root; +	fs_info->tree_root->fs_info = fs_info;  	fs_info->fs_devices = fs_devices; -	tree_root->fs_info = fs_info;  	fs_info->super_copy = kzalloc(BTRFS_SUPER_INFO_SIZE, GFP_NOFS);  	fs_info->super_for_commit = kzalloc(BTRFS_SUPER_INFO_SIZE, GFP_NOFS);  	if (!fs_info->super_copy || !fs_info->super_for_commit) {  		error = -ENOMEM; +		goto error_fs_info; +	} + +	error = btrfs_open_devices(fs_devices, mode, fs_type); +	if (error) +		goto error_fs_info; + +	if (!(flags & MS_RDONLY) && fs_devices->rw_devices == 0) { +		error = -EACCES;  		goto error_close_devices;  	}  	bdev = fs_devices->latest_bdev; -	s = sget(fs_type, btrfs_test_super, btrfs_set_super, tree_root); +	s = sget(fs_type, btrfs_test_super, btrfs_set_super, +		 fs_info->tree_root);  	if (IS_ERR(s)) {  		error = PTR_ERR(s);  		goto error_close_devices; @@ -959,12 +964,12 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags,  	if (s->s_root) {  		if ((flags ^ s->s_flags) & MS_RDONLY) {  			deactivate_locked_super(s); -			return ERR_PTR(-EBUSY); +			error = -EBUSY; +			goto error_close_devices;  		}  		btrfs_close_devices(fs_devices);  		free_fs_info(fs_info); -		kfree(tree_root);  	} else {  		char b[BDEVNAME_SIZE]; @@ -991,8 +996,8 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags,  error_close_devices:  	btrfs_close_devices(fs_devices); +error_fs_info:  	free_fs_info(fs_info); -	kfree(tree_root);  	return ERR_PTR(error);  } diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index 960835eaf4d..6a0574e923b 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -882,8 +882,8 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,  	btrfs_reloc_pre_snapshot(trans, pending, &to_reserve);  	if (to_reserve > 0) { -		ret = btrfs_block_rsv_add(root, &pending->block_rsv, -					  to_reserve); +		ret = btrfs_block_rsv_add_noflush(root, &pending->block_rsv, +						  to_reserve);  		if (ret) {  			pending->error = ret;  			goto fail; diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index f8e2943101a..c37433d3cd8 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -999,7 +999,7 @@ static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans,  	key.objectid = device->devid;  	key.offset = start;  	key.type = BTRFS_DEV_EXTENT_KEY; - +again:  	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);  	if (ret > 0) {  		ret = btrfs_previous_item(root, path, key.objectid, @@ -1012,6 +1012,9 @@ static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans,  					struct btrfs_dev_extent);  		BUG_ON(found_key.offset > start || found_key.offset +  		       btrfs_dev_extent_length(leaf, extent) < start); +		key = found_key; +		btrfs_release_path(path); +		goto again;  	} else if (ret == 0) {  		leaf = path->nodes[0];  		extent = btrfs_item_ptr(leaf, path->slots[0], diff --git a/fs/hfs/trans.c b/fs/hfs/trans.c index e673a88b8ae..b1ce4c7ad3f 100644 --- a/fs/hfs/trans.c +++ b/fs/hfs/trans.c @@ -40,6 +40,8 @@ int hfs_mac2asc(struct super_block *sb, char *out, const struct hfs_name *in)  	src = in->name;  	srclen = in->len; +	if (srclen > HFS_NAMELEN) +		srclen = HFS_NAMELEN;  	dst = out;  	dstlen = HFS_MAX_NAMELEN;  	if (nls_io) { diff --git a/fs/proc/base.c b/fs/proc/base.c index 2db1bd3173b..851ba3dcdc2 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c @@ -1652,46 +1652,12 @@ out:  	return error;  } -static int proc_pid_fd_link_getattr(struct vfsmount *mnt, struct dentry *dentry, -		struct kstat *stat) -{ -	struct inode *inode = dentry->d_inode; -	struct task_struct *task = get_proc_task(inode); -	int rc; - -	if (task == NULL) -		return -ESRCH; - -	rc = -EACCES; -	if (lock_trace(task)) -		goto out_task; - -	generic_fillattr(inode, stat); -	unlock_trace(task); -	rc = 0; -out_task: -	put_task_struct(task); -	return rc; -} -  static const struct inode_operations proc_pid_link_inode_operations = {  	.readlink	= proc_pid_readlink,  	.follow_link	= proc_pid_follow_link,  	.setattr	= proc_setattr,  }; -static const struct inode_operations proc_fdinfo_link_inode_operations = { -	.setattr	= proc_setattr, -	.getattr	= proc_pid_fd_link_getattr, -}; - -static const struct inode_operations proc_fd_link_inode_operations = { -	.readlink	= proc_pid_readlink, -	.follow_link	= proc_pid_follow_link, -	.setattr	= proc_setattr, -	.getattr	= proc_pid_fd_link_getattr, -}; -  /* building an inode */ @@ -1923,61 +1889,49 @@ out:  static int proc_fd_info(struct inode *inode, struct path *path, char *info)  { -	struct task_struct *task; -	struct files_struct *files; +	struct task_struct *task = get_proc_task(inode); +	struct files_struct *files = NULL;  	struct file *file;  	int fd = proc_fd(inode); -	int rc; - -	task = get_proc_task(inode); -	if (!task) -		return -ENOENT; - -	rc = -EACCES; -	if (lock_trace(task)) -		goto out_task; - -	rc = -ENOENT; -	files = get_files_struct(task); -	if (files == NULL) -		goto out_unlock; -	/* -	 * We are not taking a ref to the file structure, so we must -	 * hold ->file_lock. -	 */ -	spin_lock(&files->file_lock); -	file = fcheck_files(files, fd); -	if (file) { -		unsigned int f_flags; -		struct fdtable *fdt; +	if (task) { +		files = get_files_struct(task); +		put_task_struct(task); +	} +	if (files) { +		/* +		 * We are not taking a ref to the file structure, so we must +		 * hold ->file_lock. +		 */ +		spin_lock(&files->file_lock); +		file = fcheck_files(files, fd); +		if (file) { +			unsigned int f_flags; +			struct fdtable *fdt; -		fdt = files_fdtable(files); -		f_flags = file->f_flags & ~O_CLOEXEC; -		if (FD_ISSET(fd, fdt->close_on_exec)) -			f_flags |= O_CLOEXEC; +			fdt = files_fdtable(files); +			f_flags = file->f_flags & ~O_CLOEXEC; +			if (FD_ISSET(fd, fdt->close_on_exec)) +				f_flags |= O_CLOEXEC; -		if (path) { -			*path = file->f_path; -			path_get(&file->f_path); +			if (path) { +				*path = file->f_path; +				path_get(&file->f_path); +			} +			if (info) +				snprintf(info, PROC_FDINFO_MAX, +					 "pos:\t%lli\n" +					 "flags:\t0%o\n", +					 (long long) file->f_pos, +					 f_flags); +			spin_unlock(&files->file_lock); +			put_files_struct(files); +			return 0;  		} -		if (info) -			snprintf(info, PROC_FDINFO_MAX, -				 "pos:\t%lli\n" -				 "flags:\t0%o\n", -				 (long long) file->f_pos, -				 f_flags); -		rc = 0; -	} else -		rc = -ENOENT; -	spin_unlock(&files->file_lock); -	put_files_struct(files); - -out_unlock: -	unlock_trace(task); -out_task: -	put_task_struct(task); -	return rc; +		spin_unlock(&files->file_lock); +		put_files_struct(files); +	} +	return -ENOENT;  }  static int proc_fd_link(struct inode *inode, struct path *path) @@ -2072,7 +2026,7 @@ static struct dentry *proc_fd_instantiate(struct inode *dir,  	spin_unlock(&files->file_lock);  	put_files_struct(files); -	inode->i_op = &proc_fd_link_inode_operations; +	inode->i_op = &proc_pid_link_inode_operations;  	inode->i_size = 64;  	ei->op.proc_get_link = proc_fd_link;  	d_set_d_op(dentry, &tid_fd_dentry_operations); @@ -2104,12 +2058,7 @@ static struct dentry *proc_lookupfd_common(struct inode *dir,  	if (fd == ~0U)  		goto out; -	result = ERR_PTR(-EACCES); -	if (lock_trace(task)) -		goto out; -  	result = instantiate(dir, dentry, task, &fd); -	unlock_trace(task);  out:  	put_task_struct(task);  out_no_task: @@ -2129,28 +2078,23 @@ static int proc_readfd_common(struct file * filp, void * dirent,  	retval = -ENOENT;  	if (!p)  		goto out_no_task; - -	retval = -EACCES; -	if (lock_trace(p)) -		goto out; -  	retval = 0;  	fd = filp->f_pos;  	switch (fd) {  		case 0:  			if (filldir(dirent, ".", 1, 0, inode->i_ino, DT_DIR) < 0) -				goto out_unlock; +				goto out;  			filp->f_pos++;  		case 1:  			ino = parent_ino(dentry);  			if (filldir(dirent, "..", 2, 1, ino, DT_DIR) < 0) -				goto out_unlock; +				goto out;  			filp->f_pos++;  		default:  			files = get_files_struct(p);  			if (!files) -				goto out_unlock; +				goto out;  			rcu_read_lock();  			for (fd = filp->f_pos-2;  			     fd < files_fdtable(files)->max_fds; @@ -2174,9 +2118,6 @@ static int proc_readfd_common(struct file * filp, void * dirent,  			rcu_read_unlock();  			put_files_struct(files);  	} - -out_unlock: -	unlock_trace(p);  out:  	put_task_struct(p);  out_no_task: @@ -2254,7 +2195,6 @@ static struct dentry *proc_fdinfo_instantiate(struct inode *dir,  	ei->fd = fd;  	inode->i_mode = S_IFREG | S_IRUSR;  	inode->i_fop = &proc_fdinfo_file_operations; -	inode->i_op = &proc_fdinfo_link_inode_operations;  	d_set_d_op(dentry, &tid_fd_dentry_operations);  	d_add(dentry, inode);  	/* Close the race of the process dying before we return the dentry */ diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c index 33b13310ee0..574d4ee9b62 100644 --- a/fs/xfs/xfs_aops.c +++ b/fs/xfs/xfs_aops.c @@ -189,7 +189,7 @@ xfs_end_io(  	int		error = 0;  	if (XFS_FORCED_SHUTDOWN(ip->i_mount)) { -		error = -EIO; +		ioend->io_error = -EIO;  		goto done;  	}  	if (ioend->io_error) diff --git a/fs/xfs/xfs_buf_item.c b/fs/xfs/xfs_buf_item.c index 1a3513881bc..eac97ef81e2 100644 --- a/fs/xfs/xfs_buf_item.c +++ b/fs/xfs/xfs_buf_item.c @@ -656,7 +656,7 @@ xfs_buf_item_committing(  /*   * This is the ops vector shared by all buf log items.   */ -static struct xfs_item_ops xfs_buf_item_ops = { +static const struct xfs_item_ops xfs_buf_item_ops = {  	.iop_size	= xfs_buf_item_size,  	.iop_format	= xfs_buf_item_format,  	.iop_pin	= xfs_buf_item_pin, diff --git a/fs/xfs/xfs_dquot_item.c b/fs/xfs/xfs_dquot_item.c index bb3f71d236d..0dee0b71029 100644 --- a/fs/xfs/xfs_dquot_item.c +++ b/fs/xfs/xfs_dquot_item.c @@ -295,7 +295,7 @@ xfs_qm_dquot_logitem_committing(  /*   * This is the ops vector for dquots   */ -static struct xfs_item_ops xfs_dquot_item_ops = { +static const struct xfs_item_ops xfs_dquot_item_ops = {  	.iop_size	= xfs_qm_dquot_logitem_size,  	.iop_format	= xfs_qm_dquot_logitem_format,  	.iop_pin	= xfs_qm_dquot_logitem_pin, @@ -483,7 +483,7 @@ xfs_qm_qoff_logitem_committing(  {  } -static struct xfs_item_ops xfs_qm_qoffend_logitem_ops = { +static const struct xfs_item_ops xfs_qm_qoffend_logitem_ops = {  	.iop_size	= xfs_qm_qoff_logitem_size,  	.iop_format	= xfs_qm_qoff_logitem_format,  	.iop_pin	= xfs_qm_qoff_logitem_pin, @@ -498,7 +498,7 @@ static struct xfs_item_ops xfs_qm_qoffend_logitem_ops = {  /*   * This is the ops vector shared by all quotaoff-start log items.   */ -static struct xfs_item_ops xfs_qm_qoff_logitem_ops = { +static const struct xfs_item_ops xfs_qm_qoff_logitem_ops = {  	.iop_size	= xfs_qm_qoff_logitem_size,  	.iop_format	= xfs_qm_qoff_logitem_format,  	.iop_pin	= xfs_qm_qoff_logitem_pin, diff --git a/fs/xfs/xfs_extfree_item.c b/fs/xfs/xfs_extfree_item.c index d22e6262343..35c2aff38b2 100644 --- a/fs/xfs/xfs_extfree_item.c +++ b/fs/xfs/xfs_extfree_item.c @@ -217,7 +217,7 @@ xfs_efi_item_committing(  /*   * This is the ops vector shared by all efi log items.   */ -static struct xfs_item_ops xfs_efi_item_ops = { +static const struct xfs_item_ops xfs_efi_item_ops = {  	.iop_size	= xfs_efi_item_size,  	.iop_format	= xfs_efi_item_format,  	.iop_pin	= xfs_efi_item_pin, @@ -477,7 +477,7 @@ xfs_efd_item_committing(  /*   * This is the ops vector shared by all efd log items.   */ -static struct xfs_item_ops xfs_efd_item_ops = { +static const struct xfs_item_ops xfs_efd_item_ops = {  	.iop_size	= xfs_efd_item_size,  	.iop_format	= xfs_efd_item_format,  	.iop_pin	= xfs_efd_item_pin, diff --git a/fs/xfs/xfs_inode_item.c b/fs/xfs/xfs_inode_item.c index b7cf21ba240..abaafdbb3e6 100644 --- a/fs/xfs/xfs_inode_item.c +++ b/fs/xfs/xfs_inode_item.c @@ -795,7 +795,7 @@ xfs_inode_item_committing(  /*   * This is the ops vector shared by all buf log items.   */ -static struct xfs_item_ops xfs_inode_item_ops = { +static const struct xfs_item_ops xfs_inode_item_ops = {  	.iop_size	= xfs_inode_item_size,  	.iop_format	= xfs_inode_item_format,  	.iop_pin	= xfs_inode_item_pin, diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c index 2758a6277c5..a14cd89fe46 100644 --- a/fs/xfs/xfs_log.c +++ b/fs/xfs/xfs_log.c @@ -626,7 +626,7 @@ xfs_log_item_init(  	struct xfs_mount	*mp,  	struct xfs_log_item	*item,  	int			type, -	struct xfs_item_ops	*ops) +	const struct xfs_item_ops *ops)  {  	item->li_mountp = mp;  	item->li_ailp = mp->m_ail; diff --git a/fs/xfs/xfs_log.h b/fs/xfs/xfs_log.h index 78c9039994a..3f7bf451c03 100644 --- a/fs/xfs/xfs_log.h +++ b/fs/xfs/xfs_log.h @@ -137,7 +137,7 @@ struct xfs_trans;  void	xfs_log_item_init(struct xfs_mount	*mp,  			struct xfs_log_item	*item,  			int			type, -			struct xfs_item_ops	*ops); +			const struct xfs_item_ops *ops);  xfs_lsn_t xfs_log_done(struct xfs_mount *mp,  		       struct xlog_ticket *ticket, diff --git a/fs/xfs/xfs_trans.h b/fs/xfs/xfs_trans.h index 603f3eb5204..3ae713c0abd 100644 --- a/fs/xfs/xfs_trans.h +++ b/fs/xfs/xfs_trans.h @@ -326,7 +326,7 @@ typedef struct xfs_log_item {  						 struct xfs_log_item *);  							/* buffer item iodone */  							/* callback func */ -	struct xfs_item_ops		*li_ops;	/* function list */ +	const struct xfs_item_ops	*li_ops;	/* function list */  	/* delayed logging */  	struct list_head		li_cil;		/* CIL pointers */ @@ -341,7 +341,7 @@ typedef struct xfs_log_item {  	{ XFS_LI_IN_AIL,	"IN_AIL" }, \  	{ XFS_LI_ABORTED,	"ABORTED" } -typedef struct xfs_item_ops { +struct xfs_item_ops {  	uint (*iop_size)(xfs_log_item_t *);  	void (*iop_format)(xfs_log_item_t *, struct xfs_log_iovec *);  	void (*iop_pin)(xfs_log_item_t *); @@ -352,7 +352,7 @@ typedef struct xfs_item_ops {  	void (*iop_push)(xfs_log_item_t *);  	bool (*iop_pushbuf)(xfs_log_item_t *);  	void (*iop_committing)(xfs_log_item_t *, xfs_lsn_t); -} xfs_item_ops_t; +};  #define IOP_SIZE(ip)		(*(ip)->li_ops->iop_size)(ip)  #define IOP_FORMAT(ip,vp)	(*(ip)->li_ops->iop_format)(ip, vp) diff --git a/fs/xfs/xfs_vnodeops.c b/fs/xfs/xfs_vnodeops.c index 4ecf2a54906..ce9268a2f56 100644 --- a/fs/xfs/xfs_vnodeops.c +++ b/fs/xfs/xfs_vnodeops.c @@ -112,7 +112,7 @@ xfs_readlink(  	char		*link)  {  	xfs_mount_t	*mp = ip->i_mount; -	int		pathlen; +	xfs_fsize_t	pathlen;  	int		error = 0;  	trace_xfs_readlink(ip); @@ -122,13 +122,19 @@ xfs_readlink(  	xfs_ilock(ip, XFS_ILOCK_SHARED); -	ASSERT(S_ISLNK(ip->i_d.di_mode)); -	ASSERT(ip->i_d.di_size <= MAXPATHLEN); -  	pathlen = ip->i_d.di_size;  	if (!pathlen)  		goto out; +	if (pathlen < 0 || pathlen > MAXPATHLEN) { +		xfs_alert(mp, "%s: inode (%llu) bad symlink length (%lld)", +			 __func__, (unsigned long long) ip->i_ino, +			 (long long) pathlen); +		ASSERT(0); +		return XFS_ERROR(EFSCORRUPTED); +	} + +  	if (ip->i_df.if_flags & XFS_IFINLINE) {  		memcpy(link, ip->i_df.if_u1.if_data, pathlen);  		link[pathlen] = '\0';  |