diff options
Diffstat (limited to 'fs/proc/task_mmu.c')
| -rw-r--r-- | fs/proc/task_mmu.c | 108 | 
1 files changed, 72 insertions, 36 deletions
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index e73314afc53..7c708a418ac 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c @@ -1,5 +1,6 @@  #include <linux/mm.h>  #include <linux/hugetlb.h> +#include <linux/huge_mm.h>  #include <linux/mount.h>  #include <linux/seq_file.h>  #include <linux/highmem.h> @@ -7,6 +8,7 @@  #include <linux/slab.h>  #include <linux/pagemap.h>  #include <linux/mempolicy.h> +#include <linux/rmap.h>  #include <linux/swap.h>  #include <linux/swapops.h> @@ -249,8 +251,8 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)  		const char *name = arch_vma_name(vma);  		if (!name) {  			if (mm) { -				if (vma->vm_start <= mm->start_brk && -						vma->vm_end >= mm->brk) { +				if (vma->vm_start <= mm->brk && +						vma->vm_end >= mm->start_brk) {  					name = "[heap]";  				} else if (vma->vm_start <= mm->start_stack &&  					   vma->vm_end >= mm->start_stack) { @@ -330,58 +332,86 @@ struct mem_size_stats {  	unsigned long private_dirty;  	unsigned long referenced;  	unsigned long anonymous; +	unsigned long anonymous_thp;  	unsigned long swap;  	u64 pss;  }; -static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, -			   struct mm_walk *walk) + +static void smaps_pte_entry(pte_t ptent, unsigned long addr, +		unsigned long ptent_size, struct mm_walk *walk)  {  	struct mem_size_stats *mss = walk->private;  	struct vm_area_struct *vma = mss->vma; -	pte_t *pte, ptent; -	spinlock_t *ptl;  	struct page *page;  	int mapcount; -	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); -	for (; addr != end; pte++, addr += PAGE_SIZE) { -		ptent = *pte; +	if (is_swap_pte(ptent)) { +		mss->swap += ptent_size; +		return; +	} -		if (is_swap_pte(ptent)) { -			mss->swap += PAGE_SIZE; -			continue; -		} +	if (!pte_present(ptent)) +		return; -		if (!pte_present(ptent)) -			continue; +	page = vm_normal_page(vma, addr, ptent); +	if (!page) +		return; -		page = vm_normal_page(vma, addr, ptent); -		if (!page) -			continue; +	if (PageAnon(page)) +		mss->anonymous += ptent_size; -		if (PageAnon(page)) -			mss->anonymous += PAGE_SIZE; +	mss->resident += ptent_size; +	/* Accumulate the size in pages that have been accessed. */ +	if (pte_young(ptent) || PageReferenced(page)) +		mss->referenced += ptent_size; +	mapcount = page_mapcount(page); +	if (mapcount >= 2) { +		if (pte_dirty(ptent) || PageDirty(page)) +			mss->shared_dirty += ptent_size; +		else +			mss->shared_clean += ptent_size; +		mss->pss += (ptent_size << PSS_SHIFT) / mapcount; +	} else { +		if (pte_dirty(ptent) || PageDirty(page)) +			mss->private_dirty += ptent_size; +		else +			mss->private_clean += ptent_size; +		mss->pss += (ptent_size << PSS_SHIFT); +	} +} + +static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, +			   struct mm_walk *walk) +{ +	struct mem_size_stats *mss = walk->private; +	struct vm_area_struct *vma = mss->vma; +	pte_t *pte; +	spinlock_t *ptl; -		mss->resident += PAGE_SIZE; -		/* Accumulate the size in pages that have been accessed. */ -		if (pte_young(ptent) || PageReferenced(page)) -			mss->referenced += PAGE_SIZE; -		mapcount = page_mapcount(page); -		if (mapcount >= 2) { -			if (pte_dirty(ptent) || PageDirty(page)) -				mss->shared_dirty += PAGE_SIZE; -			else -				mss->shared_clean += PAGE_SIZE; -			mss->pss += (PAGE_SIZE << PSS_SHIFT) / mapcount; +	spin_lock(&walk->mm->page_table_lock); +	if (pmd_trans_huge(*pmd)) { +		if (pmd_trans_splitting(*pmd)) { +			spin_unlock(&walk->mm->page_table_lock); +			wait_split_huge_page(vma->anon_vma, pmd);  		} else { -			if (pte_dirty(ptent) || PageDirty(page)) -				mss->private_dirty += PAGE_SIZE; -			else -				mss->private_clean += PAGE_SIZE; -			mss->pss += (PAGE_SIZE << PSS_SHIFT); +			smaps_pte_entry(*(pte_t *)pmd, addr, +					HPAGE_PMD_SIZE, walk); +			spin_unlock(&walk->mm->page_table_lock); +			mss->anonymous_thp += HPAGE_PMD_SIZE; +			return 0;  		} +	} else { +		spin_unlock(&walk->mm->page_table_lock);  	} +	/* +	 * The mmap_sem held all the way back in m_start() is what +	 * keeps khugepaged out of here and from collapsing things +	 * in here. +	 */ +	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); +	for (; addr != end; pte++, addr += PAGE_SIZE) +		smaps_pte_entry(*pte, addr, PAGE_SIZE, walk);  	pte_unmap_unlock(pte - 1, ptl);  	cond_resched();  	return 0; @@ -417,6 +447,7 @@ static int show_smap(struct seq_file *m, void *v)  		   "Private_Dirty:  %8lu kB\n"  		   "Referenced:     %8lu kB\n"  		   "Anonymous:      %8lu kB\n" +		   "AnonHugePages:  %8lu kB\n"  		   "Swap:           %8lu kB\n"  		   "KernelPageSize: %8lu kB\n"  		   "MMUPageSize:    %8lu kB\n" @@ -430,6 +461,7 @@ static int show_smap(struct seq_file *m, void *v)  		   mss.private_dirty >> 10,  		   mss.referenced >> 10,  		   mss.anonymous >> 10, +		   mss.anonymous_thp >> 10,  		   mss.swap >> 10,  		   vma_kernel_pagesize(vma) >> 10,  		   vma_mmu_pagesize(vma) >> 10, @@ -469,6 +501,8 @@ static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr,  	spinlock_t *ptl;  	struct page *page; +	split_huge_page_pmd(walk->mm, pmd); +  	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);  	for (; addr != end; pte++, addr += PAGE_SIZE) {  		ptent = *pte; @@ -625,6 +659,8 @@ static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,  	pte_t *pte;  	int err = 0; +	split_huge_page_pmd(walk->mm, pmd); +  	/* find the first VMA at or above 'addr' */  	vma = find_vma(walk->mm, addr);  	for (; addr != end; addr += PAGE_SIZE) {  |