diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2012-12-18 15:08:12 -0800 | 
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-12-18 15:08:12 -0800 | 
| commit | 673ab8783b596cda5b616b317b1a1b47480c66fd (patch) | |
| tree | d3fc9bb4279720c53d0dc69c2a34c40635cf05f3 /mm/mprotect.c | |
| parent | d7b96ca5d08a8f2f836feb2b3b3bd721d2837a8e (diff) | |
| parent | 3cf23841b4b76eb94d3f8d0fb3627690e4431413 (diff) | |
| download | olio-linux-3.10-673ab8783b596cda5b616b317b1a1b47480c66fd.tar.xz olio-linux-3.10-673ab8783b596cda5b616b317b1a1b47480c66fd.zip  | |
Merge branch 'akpm' (more patches from Andrew)
Merge patches from Andrew Morton:
 "Most of the rest of MM, plus a few dribs and drabs.
  I still have quite a few irritating patches left around: ones with
  dubious testing results, lack of review, ones which should have gone
  via maintainer trees but the maintainers are slack, etc.
  I need to be more activist in getting these things wrapped up outside
  the merge window, but they're such a PITA."
* emailed patches from Andrew Morton <akpm@linux-foundation.org>: (48 commits)
  mm/vmscan.c: avoid possible deadlock caused by too_many_isolated()
  vmscan: comment too_many_isolated()
  mm/kmemleak.c: remove obsolete simple_strtoul
  mm/memory_hotplug.c: improve comments
  mm/hugetlb: create hugetlb cgroup file in hugetlb_init
  mm/mprotect.c: coding-style cleanups
  Documentation: ABI: /sys/devices/system/node/
  slub: drop mutex before deleting sysfs entry
  memcg: add comments clarifying aspects of cache attribute propagation
  kmem: add slab-specific documentation about the kmem controller
  slub: slub-specific propagation changes
  slab: propagate tunable values
  memcg: aggregate memcg cache values in slabinfo
  memcg/sl[au]b: shrink dead caches
  memcg/sl[au]b: track all the memcg children of a kmem_cache
  memcg: destroy memcg caches
  sl[au]b: allocate objects from memcg cache
  sl[au]b: always get the cache from its page in kmem_cache_free()
  memcg: skip memcg kmem allocations in specified code regions
  memcg: infrastructure to match an allocation to the right cache
  ...
Diffstat (limited to 'mm/mprotect.c')
| -rw-r--r-- | mm/mprotect.c | 30 | 
1 files changed, 16 insertions, 14 deletions
diff --git a/mm/mprotect.c b/mm/mprotect.c index 3dca970367d..94722a4d6b4 100644 --- a/mm/mprotect.c +++ b/mm/mprotect.c @@ -114,7 +114,7 @@ static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,  #ifdef CONFIG_NUMA_BALANCING  static inline void change_pmd_protnuma(struct mm_struct *mm, unsigned long addr, -		pmd_t *pmd) +				       pmd_t *pmd)  {  	spin_lock(&mm->page_table_lock);  	set_pmd_at(mm, addr & PMD_MASK, pmd, pmd_mknuma(*pmd)); @@ -122,15 +122,15 @@ static inline void change_pmd_protnuma(struct mm_struct *mm, unsigned long addr,  }  #else  static inline void change_pmd_protnuma(struct mm_struct *mm, unsigned long addr, -		pmd_t *pmd) +				       pmd_t *pmd)  {  	BUG();  }  #endif /* CONFIG_NUMA_BALANCING */ -static inline unsigned long change_pmd_range(struct vm_area_struct *vma, pud_t *pud, -		unsigned long addr, unsigned long end, pgprot_t newprot, -		int dirty_accountable, int prot_numa) +static inline unsigned long change_pmd_range(struct vm_area_struct *vma, +		pud_t *pud, unsigned long addr, unsigned long end, +		pgprot_t newprot, int dirty_accountable, int prot_numa)  {  	pmd_t *pmd;  	unsigned long next; @@ -143,7 +143,8 @@ static inline unsigned long change_pmd_range(struct vm_area_struct *vma, pud_t *  		if (pmd_trans_huge(*pmd)) {  			if (next - addr != HPAGE_PMD_SIZE)  				split_huge_page_pmd(vma, addr, pmd); -			else if (change_huge_pmd(vma, pmd, addr, newprot, prot_numa)) { +			else if (change_huge_pmd(vma, pmd, addr, newprot, +						 prot_numa)) {  				pages += HPAGE_PMD_NR;  				continue;  			} @@ -167,9 +168,9 @@ static inline unsigned long change_pmd_range(struct vm_area_struct *vma, pud_t *  	return pages;  } -static inline unsigned long change_pud_range(struct vm_area_struct *vma, pgd_t *pgd, -		unsigned long addr, unsigned long end, pgprot_t newprot, -		int dirty_accountable, int prot_numa) +static inline unsigned long change_pud_range(struct vm_area_struct *vma, +		pgd_t *pgd, unsigned long addr, unsigned long end, +		pgprot_t newprot, int dirty_accountable, int prot_numa)  {  	pud_t *pud;  	unsigned long next; @@ -304,7 +305,8 @@ success:  		dirty_accountable = 1;  	} -	change_protection(vma, start, end, vma->vm_page_prot, dirty_accountable, 0); +	change_protection(vma, start, end, vma->vm_page_prot, +			  dirty_accountable, 0);  	vm_stat_account(mm, oldflags, vma->vm_file, -nrpages);  	vm_stat_account(mm, newflags, vma->vm_file, nrpages); @@ -361,8 +363,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,  		error = -EINVAL;  		if (!(vma->vm_flags & VM_GROWSDOWN))  			goto out; -	} -	else { +	} else {  		if (vma->vm_start > start)  			goto out;  		if (unlikely(grows & PROT_GROWSUP)) { @@ -378,9 +379,10 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,  	for (nstart = start ; ; ) {  		unsigned long newflags; -		/* Here we know that  vma->vm_start <= nstart < vma->vm_end. */ +		/* Here we know that vma->vm_start <= nstart < vma->vm_end. */ -		newflags = vm_flags | (vma->vm_flags & ~(VM_READ | VM_WRITE | VM_EXEC)); +		newflags = vm_flags; +		newflags |= (vma->vm_flags & ~(VM_READ | VM_WRITE | VM_EXEC));  		/* newflags >> 4 shift VM_MAY% in place of VM_% */  		if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {  |