diff options
| -rw-r--r-- | fs/proc/task_mmu.c | 6 | ||||
| -rw-r--r-- | include/linux/hugetlb.h | 3 | ||||
| -rw-r--r-- | mm/hugetlb.c | 16 | 
3 files changed, 23 insertions, 2 deletions
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index 3a8bdd7f575..41ef5f23e77 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c @@ -396,7 +396,8 @@ static int show_smap(struct seq_file *m, void *v)  		   "Private_Clean:  %8lu kB\n"  		   "Private_Dirty:  %8lu kB\n"  		   "Referenced:     %8lu kB\n" -		   "Swap:           %8lu kB\n", +		   "Swap:           %8lu kB\n" +		   "KernelPageSize: %8lu kB\n",  		   (vma->vm_end - vma->vm_start) >> 10,  		   mss.resident >> 10,  		   (unsigned long)(mss.pss >> (10 + PSS_SHIFT)), @@ -405,7 +406,8 @@ static int show_smap(struct seq_file *m, void *v)  		   mss.private_clean >> 10,  		   mss.private_dirty >> 10,  		   mss.referenced >> 10, -		   mss.swap >> 10); +		   mss.swap >> 10, +		   vma_kernel_pagesize(vma) >> 10);  	if (m->count < m->size)  /* vma is copied successfully */  		m->version = (vma != get_gate_vma(task)) ? vma->vm_start : 0; diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index e1c8afc002c..648e1e25979 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -233,6 +233,8 @@ static inline unsigned long huge_page_size(struct hstate *h)  	return (unsigned long)PAGE_SIZE << h->order;  } +extern unsigned long vma_kernel_pagesize(struct vm_area_struct *vma); +  static inline unsigned long huge_page_mask(struct hstate *h)  {  	return h->mask; @@ -273,6 +275,7 @@ struct hstate {};  #define hstate_inode(i) NULL  #define huge_page_size(h) PAGE_SIZE  #define huge_page_mask(h) PAGE_MASK +#define vma_kernel_pagesize(v) PAGE_SIZE  #define huge_page_order(h) 0  #define huge_page_shift(h) PAGE_SHIFT  static inline unsigned int pages_per_huge_page(struct hstate *h) diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 6058b53dcb8..5cb8bc7c80f 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -220,6 +220,22 @@ static pgoff_t vma_hugecache_offset(struct hstate *h,  }  /* + * Return the size of the pages allocated when backing a VMA. In the majority + * cases this will be same size as used by the page table entries. + */ +unsigned long vma_kernel_pagesize(struct vm_area_struct *vma) +{ +	struct hstate *hstate; + +	if (!is_vm_hugetlb_page(vma)) +		return PAGE_SIZE; + +	hstate = hstate_vma(vma); + +	return 1UL << (hstate->order + PAGE_SHIFT); +} + +/*   * Flags for MAP_PRIVATE reservations.  These are stored in the bottom   * bits of the reservation map pointer, which are always clear due to   * alignment.  |