diff options
Diffstat (limited to 'mm/huge_memory.c')
| -rw-r--r-- | mm/huge_memory.c | 28 | 
1 files changed, 19 insertions, 9 deletions
diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 3e29781ee76..dbe99a5f207 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -650,10 +650,10 @@ static inline gfp_t alloc_hugepage_gfpmask(int defrag)  static inline struct page *alloc_hugepage_vma(int defrag,  					      struct vm_area_struct *vma, -					      unsigned long haddr) +					      unsigned long haddr, int nd)  {  	return alloc_pages_vma(alloc_hugepage_gfpmask(defrag), -			       HPAGE_PMD_ORDER, vma, haddr); +			       HPAGE_PMD_ORDER, vma, haddr, nd);  }  #ifndef CONFIG_NUMA @@ -678,7 +678,7 @@ int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,  		if (unlikely(khugepaged_enter(vma)))  			return VM_FAULT_OOM;  		page = alloc_hugepage_vma(transparent_hugepage_defrag(vma), -					  vma, haddr); +					  vma, haddr, numa_node_id());  		if (unlikely(!page))  			goto out;  		if (unlikely(mem_cgroup_newpage_charge(page, mm, GFP_KERNEL))) { @@ -799,8 +799,8 @@ static int do_huge_pmd_wp_page_fallback(struct mm_struct *mm,  	}  	for (i = 0; i < HPAGE_PMD_NR; i++) { -		pages[i] = alloc_page_vma(GFP_HIGHUSER_MOVABLE, -					  vma, address); +		pages[i] = alloc_page_vma_node(GFP_HIGHUSER_MOVABLE, +					       vma, address, page_to_nid(page));  		if (unlikely(!pages[i] ||  			     mem_cgroup_newpage_charge(pages[i], mm,  						       GFP_KERNEL))) { @@ -902,7 +902,7 @@ int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,  	if (transparent_hugepage_enabled(vma) &&  	    !transparent_hugepage_debug_cow())  		new_page = alloc_hugepage_vma(transparent_hugepage_defrag(vma), -					      vma, haddr); +					      vma, haddr, numa_node_id());  	else  		new_page = NULL; @@ -1745,7 +1745,8 @@ static void __collapse_huge_page_copy(pte_t *pte, struct page *page,  static void collapse_huge_page(struct mm_struct *mm,  			       unsigned long address,  			       struct page **hpage, -			       struct vm_area_struct *vma) +			       struct vm_area_struct *vma, +			       int node)  {  	pgd_t *pgd;  	pud_t *pud; @@ -1773,7 +1774,8 @@ static void collapse_huge_page(struct mm_struct *mm,  	 * mmap_sem in read mode is good idea also to allow greater  	 * scalability.  	 */ -	new_page = alloc_hugepage_vma(khugepaged_defrag(), vma, address); +	new_page = alloc_hugepage_vma(khugepaged_defrag(), vma, address, +				      node);  	if (unlikely(!new_page)) {  		up_read(&mm->mmap_sem);  		*hpage = ERR_PTR(-ENOMEM); @@ -1919,6 +1921,7 @@ static int khugepaged_scan_pmd(struct mm_struct *mm,  	struct page *page;  	unsigned long _address;  	spinlock_t *ptl; +	int node = -1;  	VM_BUG_ON(address & ~HPAGE_PMD_MASK); @@ -1949,6 +1952,13 @@ static int khugepaged_scan_pmd(struct mm_struct *mm,  		page = vm_normal_page(vma, _address, pteval);  		if (unlikely(!page))  			goto out_unmap; +		/* +		 * Chose the node of the first page. This could +		 * be more sophisticated and look at more pages, +		 * but isn't for now. +		 */ +		if (node == -1) +			node = page_to_nid(page);  		VM_BUG_ON(PageCompound(page));  		if (!PageLRU(page) || PageLocked(page) || !PageAnon(page))  			goto out_unmap; @@ -1965,7 +1975,7 @@ out_unmap:  	pte_unmap_unlock(pte, ptl);  	if (ret)  		/* collapse_huge_page will return with the mmap_sem released */ -		collapse_huge_page(mm, address, hpage, vma); +		collapse_huge_page(mm, address, hpage, vma, node);  out:  	return ret;  }  |