diff options
Diffstat (limited to 'mm')
| -rw-r--r-- | mm/hugetlb.c | 12 | ||||
| -rw-r--r-- | mm/memory.c | 48 | ||||
| -rw-r--r-- | mm/mmap.c | 6 | ||||
| -rw-r--r-- | mm/nommu.c | 2 | ||||
| -rw-r--r-- | mm/vmscan.c | 2 | 
5 files changed, 64 insertions, 6 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c index ca9a7c6d7e9..1a12f5b9a0a 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -2961,7 +2961,17 @@ long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,  			break;  		} -		if (absent || +		/* +		 * We need call hugetlb_fault for both hugepages under migration +		 * (in which case hugetlb_fault waits for the migration,) and +		 * hwpoisoned hugepages (in which case we need to prevent the +		 * caller from accessing to them.) In order to do this, we use +		 * here is_swap_pte instead of is_hugetlb_entry_migration and +		 * is_hugetlb_entry_hwpoisoned. This is because it simply covers +		 * both cases, and because we can't follow correct pages +		 * directly from any kind of swap entries. +		 */ +		if (absent || is_swap_pte(huge_ptep_get(pte)) ||  		    ((flags & FOLL_WRITE) && !pte_write(huge_ptep_get(pte)))) {  			int ret; diff --git a/mm/memory.c b/mm/memory.c index 494526ae024..ba94dec5b25 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -216,6 +216,7 @@ void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, bool fullmm)  	tlb->mm = mm;  	tlb->fullmm     = fullmm; +	tlb->need_flush_all = 0;  	tlb->start	= -1UL;  	tlb->end	= 0;  	tlb->need_flush = 0; @@ -2392,6 +2393,53 @@ int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,  }  EXPORT_SYMBOL(remap_pfn_range); +/** + * vm_iomap_memory - remap memory to userspace + * @vma: user vma to map to + * @start: start of area + * @len: size of area + * + * This is a simplified io_remap_pfn_range() for common driver use. The + * driver just needs to give us the physical memory range to be mapped, + * we'll figure out the rest from the vma information. + * + * NOTE! Some drivers might want to tweak vma->vm_page_prot first to get + * whatever write-combining details or similar. + */ +int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len) +{ +	unsigned long vm_len, pfn, pages; + +	/* Check that the physical memory area passed in looks valid */ +	if (start + len < start) +		return -EINVAL; +	/* +	 * You *really* shouldn't map things that aren't page-aligned, +	 * but we've historically allowed it because IO memory might +	 * just have smaller alignment. +	 */ +	len += start & ~PAGE_MASK; +	pfn = start >> PAGE_SHIFT; +	pages = (len + ~PAGE_MASK) >> PAGE_SHIFT; +	if (pfn + pages < pfn) +		return -EINVAL; + +	/* We start the mapping 'vm_pgoff' pages into the area */ +	if (vma->vm_pgoff > pages) +		return -EINVAL; +	pfn += vma->vm_pgoff; +	pages -= vma->vm_pgoff; + +	/* Can we fit all of the mapping? */ +	vm_len = vma->vm_end - vma->vm_start; +	if (vm_len >> PAGE_SHIFT > pages) +		return -EINVAL; + +	/* Ok, let it rip */ +	return io_remap_pfn_range(vma, vma->vm_start, pfn, vm_len, vma->vm_page_prot); +} +EXPORT_SYMBOL(vm_iomap_memory); +  static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd,  				     unsigned long addr, unsigned long end,  				     pte_fn_t fn, void *data) diff --git a/mm/mmap.c b/mm/mmap.c index 6466699b16c..033094ba62d 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -1940,7 +1940,7 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)  	/* Check the cache first. */  	/* (Cache hit rate is typically around 35%.) */ -	vma = mm->mmap_cache; +	vma = ACCESS_ONCE(mm->mmap_cache);  	if (!(vma && vma->vm_end > addr && vma->vm_start <= addr)) {  		struct rb_node *rb_node; @@ -2305,7 +2305,7 @@ static void unmap_region(struct mm_struct *mm,  	update_hiwater_rss(mm);  	unmap_vmas(&tlb, vma, start, end);  	free_pgtables(&tlb, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS, -				 next ? next->vm_start : 0); +				 next ? next->vm_start : USER_PGTABLES_CEILING);  	tlb_finish_mmu(&tlb, start, end);  } @@ -2685,7 +2685,7 @@ void exit_mmap(struct mm_struct *mm)  	/* Use -1 here to ensure all VMAs in the mm are unmapped */  	unmap_vmas(&tlb, vma, 0, -1); -	free_pgtables(&tlb, vma, FIRST_USER_ADDRESS, 0); +	free_pgtables(&tlb, vma, FIRST_USER_ADDRESS, USER_PGTABLES_CEILING);  	tlb_finish_mmu(&tlb, 0, -1);  	/* diff --git a/mm/nommu.c b/mm/nommu.c index e1932808753..2f3ea749c31 100644 --- a/mm/nommu.c +++ b/mm/nommu.c @@ -821,7 +821,7 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)  	struct vm_area_struct *vma;  	/* check the cache first */ -	vma = mm->mmap_cache; +	vma = ACCESS_ONCE(mm->mmap_cache);  	if (vma && vma->vm_start <= addr && vma->vm_end > addr)  		return vma; diff --git a/mm/vmscan.c b/mm/vmscan.c index 88c5fed8b9a..669fba39be1 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -3188,9 +3188,9 @@ int kswapd_run(int nid)  	if (IS_ERR(pgdat->kswapd)) {  		/* failure at boot is fatal */  		BUG_ON(system_state == SYSTEM_BOOTING); -		pgdat->kswapd = NULL;  		pr_err("Failed to start kswapd on node %d\n", nid);  		ret = PTR_ERR(pgdat->kswapd); +		pgdat->kswapd = NULL;  	}  	return ret;  }  |