diff options
Diffstat (limited to 'mm/hugetlb.c')
| -rw-r--r-- | mm/hugetlb.c | 22 | 
1 files changed, 11 insertions, 11 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c index bbb4a5bbb95..6402458fee3 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -1033,10 +1033,10 @@ static struct page *alloc_huge_page(struct vm_area_struct *vma,  	 */  	chg = vma_needs_reservation(h, vma, addr);  	if (chg < 0) -		return ERR_PTR(chg); +		return ERR_PTR(-VM_FAULT_OOM);  	if (chg)  		if (hugetlb_get_quota(inode->i_mapping, chg)) -			return ERR_PTR(-ENOSPC); +			return ERR_PTR(-VM_FAULT_SIGBUS);  	spin_lock(&hugetlb_lock);  	page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve); @@ -2205,7 +2205,7 @@ void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,  	unsigned long sz = huge_page_size(h);  	/* -	 * A page gathering list, protected by per file i_mmap_lock. The +	 * A page gathering list, protected by per file i_mmap_mutex. The  	 * lock is used to avoid list corruption from multiple unmapping  	 * of the same page since we are using page->lru.  	 */ @@ -2274,9 +2274,9 @@ void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,  void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,  			  unsigned long end, struct page *ref_page)  { -	spin_lock(&vma->vm_file->f_mapping->i_mmap_lock); +	mutex_lock(&vma->vm_file->f_mapping->i_mmap_mutex);  	__unmap_hugepage_range(vma, start, end, ref_page); -	spin_unlock(&vma->vm_file->f_mapping->i_mmap_lock); +	mutex_unlock(&vma->vm_file->f_mapping->i_mmap_mutex);  }  /* @@ -2308,7 +2308,7 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,  	 * this mapping should be shared between all the VMAs,  	 * __unmap_hugepage_range() is called as the lock is already held  	 */ -	spin_lock(&mapping->i_mmap_lock); +	mutex_lock(&mapping->i_mmap_mutex);  	vma_prio_tree_foreach(iter_vma, &iter, &mapping->i_mmap, pgoff, pgoff) {  		/* Do not unmap the current VMA */  		if (iter_vma == vma) @@ -2326,7 +2326,7 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,  				address, address + huge_page_size(h),  				page);  	} -	spin_unlock(&mapping->i_mmap_lock); +	mutex_unlock(&mapping->i_mmap_mutex);  	return 1;  } @@ -2810,7 +2810,7 @@ void hugetlb_change_protection(struct vm_area_struct *vma,  	BUG_ON(address >= end);  	flush_cache_range(vma, address, end); -	spin_lock(&vma->vm_file->f_mapping->i_mmap_lock); +	mutex_lock(&vma->vm_file->f_mapping->i_mmap_mutex);  	spin_lock(&mm->page_table_lock);  	for (; address < end; address += huge_page_size(h)) {  		ptep = huge_pte_offset(mm, address); @@ -2825,7 +2825,7 @@ void hugetlb_change_protection(struct vm_area_struct *vma,  		}  	}  	spin_unlock(&mm->page_table_lock); -	spin_unlock(&vma->vm_file->f_mapping->i_mmap_lock); +	mutex_unlock(&vma->vm_file->f_mapping->i_mmap_mutex);  	flush_tlb_range(vma, start, end);  } @@ -2833,7 +2833,7 @@ void hugetlb_change_protection(struct vm_area_struct *vma,  int hugetlb_reserve_pages(struct inode *inode,  					long from, long to,  					struct vm_area_struct *vma, -					int acctflag) +					vm_flags_t vm_flags)  {  	long ret, chg;  	struct hstate *h = hstate_inode(inode); @@ -2843,7 +2843,7 @@ int hugetlb_reserve_pages(struct inode *inode,  	 * attempt will be made for VM_NORESERVE to allocate a page  	 * and filesystem quota without using reserves  	 */ -	if (acctflag & VM_NORESERVE) +	if (vm_flags & VM_NORESERVE)  		return 0;  	/*  |