diff options
Diffstat (limited to 'mm/hugetlb.c')
| -rw-r--r-- | mm/hugetlb.c | 16 | 
1 files changed, 15 insertions, 1 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c index efe29b53daf..16a0f32c482 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -627,6 +627,8 @@ static void free_huge_page(struct page *page)  	BUG_ON(page_mapcount(page));  	spin_lock(&hugetlb_lock); +	hugetlb_cgroup_uncharge_page(hstate_index(h), +				     pages_per_huge_page(h), page);  	if (h->surplus_huge_pages_node[nid] && huge_page_order(h) < MAX_ORDER) {  		/* remove the page from active list */  		list_del(&page->lru); @@ -1115,7 +1117,10 @@ static struct page *alloc_huge_page(struct vm_area_struct *vma,  	struct hstate *h = hstate_vma(vma);  	struct page *page;  	long chg; +	int ret, idx; +	struct hugetlb_cgroup *h_cg; +	idx = hstate_index(h);  	/*  	 * Processes that did not create the mapping will have no  	 * reserves and will not have accounted against subpool @@ -1131,6 +1136,11 @@ static struct page *alloc_huge_page(struct vm_area_struct *vma,  		if (hugepage_subpool_get_pages(spool, chg))  			return ERR_PTR(-ENOSPC); +	ret = hugetlb_cgroup_charge_cgroup(idx, pages_per_huge_page(h), &h_cg); +	if (ret) { +		hugepage_subpool_put_pages(spool, chg); +		return ERR_PTR(-ENOSPC); +	}  	spin_lock(&hugetlb_lock);  	page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve);  	spin_unlock(&hugetlb_lock); @@ -1138,6 +1148,9 @@ static struct page *alloc_huge_page(struct vm_area_struct *vma,  	if (!page) {  		page = alloc_buddy_huge_page(h, NUMA_NO_NODE);  		if (!page) { +			hugetlb_cgroup_uncharge_cgroup(idx, +						       pages_per_huge_page(h), +						       h_cg);  			hugepage_subpool_put_pages(spool, chg);  			return ERR_PTR(-ENOSPC);  		} @@ -1146,7 +1159,8 @@ static struct page *alloc_huge_page(struct vm_area_struct *vma,  	set_page_private(page, (unsigned long)spool);  	vma_commit_reservation(h, vma, addr); - +	/* update page cgroup details */ +	hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h), h_cg, page);  	return page;  }  |