diff options
Diffstat (limited to 'mm/mmap.c')
| -rw-r--r-- | mm/mmap.c | 81 | 
1 files changed, 56 insertions, 25 deletions
diff --git a/mm/mmap.c b/mm/mmap.c index e7a5a68a9c2..74f4d158022 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -410,7 +410,7 @@ void __vma_link_rb(struct mm_struct *mm, struct vm_area_struct *vma,  	rb_insert_color(&vma->vm_rb, &mm->mm_rb);  } -static inline void __vma_link_file(struct vm_area_struct *vma) +static void __vma_link_file(struct vm_area_struct *vma)  {  	struct file * file; @@ -662,8 +662,6 @@ again:			remove_next = 1 + (end > next->vm_end);   * If the vma has a ->close operation then the driver probably needs to release   * per-vma resources, so we don't attempt to merge those.   */ -#define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_RESERVED | VM_PFNMAP) -  static inline int is_mergeable_vma(struct vm_area_struct *vma,  			struct file *file, unsigned long vm_flags)  { @@ -972,6 +970,7 @@ unsigned long do_mmap_pgoff(struct file * file, unsigned long addr,  			return -EPERM;  		vm_flags |= VM_LOCKED;  	} +  	/* mlock MCL_FUTURE? */  	if (vm_flags & VM_LOCKED) {  		unsigned long locked, lock_limit; @@ -1139,10 +1138,12 @@ munmap_back:  	 * The VM_SHARED test is necessary because shmem_zero_setup  	 * will create the file object for a shared anonymous map below.  	 */ -	if (!file && !(vm_flags & VM_SHARED) && -	    vma_merge(mm, prev, addr, addr + len, vm_flags, -					NULL, NULL, pgoff, NULL)) -		goto out; +	if (!file && !(vm_flags & VM_SHARED)) { +		vma = vma_merge(mm, prev, addr, addr + len, vm_flags, +					NULL, NULL, pgoff, NULL); +		if (vma) +			goto out; +	}  	/*  	 * Determine the object being mapped and call the appropriate @@ -1224,10 +1225,14 @@ out:  	mm->total_vm += len >> PAGE_SHIFT;  	vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);  	if (vm_flags & VM_LOCKED) { -		mm->locked_vm += len >> PAGE_SHIFT; -		make_pages_present(addr, addr + len); -	} -	if ((flags & MAP_POPULATE) && !(flags & MAP_NONBLOCK)) +		/* +		 * makes pages present; downgrades, drops, reacquires mmap_sem +		 */ +		long nr_pages = mlock_vma_pages_range(vma, addr, addr + len); +		if (nr_pages < 0) +			return nr_pages;	/* vma gone! */ +		mm->locked_vm += (len >> PAGE_SHIFT) - nr_pages; +	} else if ((flags & MAP_POPULATE) && !(flags & MAP_NONBLOCK))  		make_pages_present(addr, addr + len);  	return addr; @@ -1586,7 +1591,7 @@ static int acct_stack_growth(struct vm_area_struct * vma, unsigned long size, un   * vma is the last one with address > vma->vm_end.  Have to extend vma.   */  #ifndef CONFIG_IA64 -static inline +static  #endif  int expand_upwards(struct vm_area_struct *vma, unsigned long address)  { @@ -1636,7 +1641,7 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)  /*   * vma is the first one with address < vma->vm_start.  Have to extend vma.   */ -static inline int expand_downwards(struct vm_area_struct *vma, +static int expand_downwards(struct vm_area_struct *vma,  				   unsigned long address)  {  	int error; @@ -1698,10 +1703,12 @@ find_extend_vma(struct mm_struct *mm, unsigned long addr)  	vma = find_vma_prev(mm, addr, &prev);  	if (vma && (vma->vm_start <= addr))  		return vma; -	if (!prev || expand_stack(prev, addr)) +	if (expand_stack(prev, addr))  		return NULL; -	if (prev->vm_flags & VM_LOCKED) -		make_pages_present(addr, prev->vm_end); +	if (prev->vm_flags & VM_LOCKED) { +		if (mlock_vma_pages_range(prev, addr, prev->vm_end) < 0) +			return NULL;	/* vma gone! */ +	}  	return prev;  }  #else @@ -1727,8 +1734,10 @@ find_extend_vma(struct mm_struct * mm, unsigned long addr)  	start = vma->vm_start;  	if (expand_stack(vma, addr))  		return NULL; -	if (vma->vm_flags & VM_LOCKED) -		make_pages_present(addr, start); +	if (vma->vm_flags & VM_LOCKED) { +		if (mlock_vma_pages_range(vma, addr, start) < 0) +			return NULL;	/* vma gone! */ +	}  	return vma;  }  #endif @@ -1747,8 +1756,6 @@ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)  		long nrpages = vma_pages(vma);  		mm->total_vm -= nrpages; -		if (vma->vm_flags & VM_LOCKED) -			mm->locked_vm -= nrpages;  		vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);  		vma = remove_vma(vma);  	} while (vma); @@ -1914,6 +1921,20 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)  	vma = prev? prev->vm_next: mm->mmap;  	/* +	 * unlock any mlock()ed ranges before detaching vmas +	 */ +	if (mm->locked_vm) { +		struct vm_area_struct *tmp = vma; +		while (tmp && tmp->vm_start < end) { +			if (tmp->vm_flags & VM_LOCKED) { +				mm->locked_vm -= vma_pages(tmp); +				munlock_vma_pages_all(tmp); +			} +			tmp = tmp->vm_next; +		} +	} + +	/*  	 * Remove the vma's, and unmap the actual pages  	 */  	detach_vmas_to_be_unmapped(mm, vma, prev, end); @@ -2025,8 +2046,9 @@ unsigned long do_brk(unsigned long addr, unsigned long len)  		return -ENOMEM;  	/* Can we just expand an old private anonymous mapping? */ -	if (vma_merge(mm, prev, addr, addr + len, flags, -					NULL, NULL, pgoff, NULL)) +	vma = vma_merge(mm, prev, addr, addr + len, flags, +					NULL, NULL, pgoff, NULL); +	if (vma)  		goto out;  	/* @@ -2048,8 +2070,8 @@ unsigned long do_brk(unsigned long addr, unsigned long len)  out:  	mm->total_vm += len >> PAGE_SHIFT;  	if (flags & VM_LOCKED) { -		mm->locked_vm += len >> PAGE_SHIFT; -		make_pages_present(addr, addr + len); +		if (!mlock_vma_pages_range(vma, addr, addr + len)) +			mm->locked_vm += (len >> PAGE_SHIFT);  	}  	return addr;  } @@ -2060,7 +2082,7 @@ EXPORT_SYMBOL(do_brk);  void exit_mmap(struct mm_struct *mm)  {  	struct mmu_gather *tlb; -	struct vm_area_struct *vma = mm->mmap; +	struct vm_area_struct *vma;  	unsigned long nr_accounted = 0;  	unsigned long end; @@ -2068,6 +2090,15 @@ void exit_mmap(struct mm_struct *mm)  	arch_exit_mmap(mm);  	mmu_notifier_release(mm); +	if (mm->locked_vm) { +		vma = mm->mmap; +		while (vma) { +			if (vma->vm_flags & VM_LOCKED) +				munlock_vma_pages_all(vma); +			vma = vma->vm_next; +		} +	} +	vma = mm->mmap;  	lru_add_drain();  	flush_cache_mm(mm);  	tlb = tlb_gather_mmu(mm, 1);  |