diff options
Diffstat (limited to 'mm/shmem.c')
| -rw-r--r-- | mm/shmem.c | 51 | 
1 files changed, 29 insertions, 22 deletions
diff --git a/mm/shmem.c b/mm/shmem.c index d221a1cfd7b..f1714758ea9 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -539,7 +539,7 @@ static void shmem_free_pages(struct list_head *next)  	} while (next);  } -static void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end) +void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end)  {  	struct shmem_inode_info *info = SHMEM_I(inode);  	unsigned long idx; @@ -562,6 +562,8 @@ static void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end)  	spinlock_t *punch_lock;  	unsigned long upper_limit; +	truncate_inode_pages_range(inode->i_mapping, start, end); +  	inode->i_ctime = inode->i_mtime = CURRENT_TIME;  	idx = (start + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;  	if (idx >= info->next_index) @@ -738,16 +740,8 @@ done2:  		 * lowered next_index.  Also, though shmem_getpage checks  		 * i_size before adding to cache, no recheck after: so fix the  		 * narrow window there too. -		 * -		 * Recalling truncate_inode_pages_range and unmap_mapping_range -		 * every time for punch_hole (which never got a chance to clear -		 * SHMEM_PAGEIN at the start of vmtruncate_range) is expensive, -		 * yet hardly ever necessary: try to optimize them out later.  		 */  		truncate_inode_pages_range(inode->i_mapping, start, end); -		if (punch_hole) -			unmap_mapping_range(inode->i_mapping, start, -							end - start, 1);  	}  	spin_lock(&info->lock); @@ -766,22 +760,23 @@ done2:  		shmem_free_pages(pages_to_free.next);  	}  } +EXPORT_SYMBOL_GPL(shmem_truncate_range); -static int shmem_notify_change(struct dentry *dentry, struct iattr *attr) +static int shmem_setattr(struct dentry *dentry, struct iattr *attr)  {  	struct inode *inode = dentry->d_inode; -	loff_t newsize = attr->ia_size;  	int error;  	error = inode_change_ok(inode, attr);  	if (error)  		return error; -	if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE) -					&& newsize != inode->i_size) { +	if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) { +		loff_t oldsize = inode->i_size; +		loff_t newsize = attr->ia_size;  		struct page *page = NULL; -		if (newsize < inode->i_size) { +		if (newsize < oldsize) {  			/*  			 * If truncating down to a partial page, then  			 * if that page is already allocated, hold it @@ -810,12 +805,19 @@ static int shmem_notify_change(struct dentry *dentry, struct iattr *attr)  				spin_unlock(&info->lock);  			}  		} - -		/* XXX(truncate): truncate_setsize should be called last */ -		truncate_setsize(inode, newsize); +		if (newsize != oldsize) { +			i_size_write(inode, newsize); +			inode->i_ctime = inode->i_mtime = CURRENT_TIME; +		} +		if (newsize < oldsize) { +			loff_t holebegin = round_up(newsize, PAGE_SIZE); +			unmap_mapping_range(inode->i_mapping, holebegin, 0, 1); +			shmem_truncate_range(inode, newsize, (loff_t)-1); +			/* unmap again to remove racily COWed private pages */ +			unmap_mapping_range(inode->i_mapping, holebegin, 0, 1); +		}  		if (page)  			page_cache_release(page); -		shmem_truncate_range(inode, newsize, (loff_t)-1);  	}  	setattr_copy(inode, attr); @@ -832,7 +834,6 @@ static void shmem_evict_inode(struct inode *inode)  	struct shmem_xattr *xattr, *nxattr;  	if (inode->i_mapping->a_ops == &shmem_aops) { -		truncate_inode_pages(inode->i_mapping, 0);  		shmem_unacct_size(info->flags, inode->i_size);  		inode->i_size = 0;  		shmem_truncate_range(inode, 0, (loff_t)-1); @@ -2706,7 +2707,7 @@ static const struct file_operations shmem_file_operations = {  };  static const struct inode_operations shmem_inode_operations = { -	.setattr	= shmem_notify_change, +	.setattr	= shmem_setattr,  	.truncate_range	= shmem_truncate_range,  #ifdef CONFIG_TMPFS_XATTR  	.setxattr	= shmem_setxattr, @@ -2739,7 +2740,7 @@ static const struct inode_operations shmem_dir_inode_operations = {  	.removexattr	= shmem_removexattr,  #endif  #ifdef CONFIG_TMPFS_POSIX_ACL -	.setattr	= shmem_notify_change, +	.setattr	= shmem_setattr,  	.check_acl	= generic_check_acl,  #endif  }; @@ -2752,7 +2753,7 @@ static const struct inode_operations shmem_special_inode_operations = {  	.removexattr	= shmem_removexattr,  #endif  #ifdef CONFIG_TMPFS_POSIX_ACL -	.setattr	= shmem_notify_change, +	.setattr	= shmem_setattr,  	.check_acl	= generic_check_acl,  #endif  }; @@ -2908,6 +2909,12 @@ int shmem_lock(struct file *file, int lock, struct user_struct *user)  	return 0;  } +void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end) +{ +	truncate_inode_pages_range(inode->i_mapping, start, end); +} +EXPORT_SYMBOL_GPL(shmem_truncate_range); +  #ifdef CONFIG_CGROUP_MEM_RES_CTLR  /**   * mem_cgroup_get_shmem_target - find a page or entry assigned to the shmem file  |