diff options
| -rw-r--r-- | include/linux/huge_mm.h | 8 | ||||
| -rw-r--r-- | include/linux/mm_inline.h | 8 | ||||
| -rw-r--r-- | mm/huge_memory.c | 10 | ||||
| -rw-r--r-- | mm/memcontrol.c | 2 | ||||
| -rw-r--r-- | mm/vmscan.c | 11 | 
5 files changed, 30 insertions, 9 deletions
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index 82759522873..9b48c24df26 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h @@ -117,11 +117,19 @@ static inline void vma_adjust_trans_huge(struct vm_area_struct *vma,  		return;  	__vma_adjust_trans_huge(vma, start, end, adjust_next);  } +static inline int hpage_nr_pages(struct page *page) +{ +	if (unlikely(PageTransHuge(page))) +		return HPAGE_PMD_NR; +	return 1; +}  #else /* CONFIG_TRANSPARENT_HUGEPAGE */  #define HPAGE_PMD_SHIFT ({ BUG(); 0; })  #define HPAGE_PMD_MASK ({ BUG(); 0; })  #define HPAGE_PMD_SIZE ({ BUG(); 0; }) +#define hpage_nr_pages(x) 1 +  #define transparent_hugepage_enabled(__vma) 0  #define transparent_hugepage_flags 0UL diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h index 650f31eabdb..8f7d24712dc 100644 --- a/include/linux/mm_inline.h +++ b/include/linux/mm_inline.h @@ -1,6 +1,8 @@  #ifndef LINUX_MM_INLINE_H  #define LINUX_MM_INLINE_H +#include <linux/huge_mm.h> +  /**   * page_is_file_cache - should the page be on a file LRU or anon LRU?   * @page: the page to test @@ -24,7 +26,7 @@ __add_page_to_lru_list(struct zone *zone, struct page *page, enum lru_list l,  		       struct list_head *head)  {  	list_add(&page->lru, head); -	__inc_zone_state(zone, NR_LRU_BASE + l); +	__mod_zone_page_state(zone, NR_LRU_BASE + l, hpage_nr_pages(page));  	mem_cgroup_add_lru_list(page, l);  } @@ -38,7 +40,7 @@ static inline void  del_page_from_lru_list(struct zone *zone, struct page *page, enum lru_list l)  {  	list_del(&page->lru); -	__dec_zone_state(zone, NR_LRU_BASE + l); +	__mod_zone_page_state(zone, NR_LRU_BASE + l, -hpage_nr_pages(page));  	mem_cgroup_del_lru_list(page, l);  } @@ -73,7 +75,7 @@ del_page_from_lru(struct zone *zone, struct page *page)  			l += LRU_ACTIVE;  		}  	} -	__dec_zone_state(zone, NR_LRU_BASE + l); +	__mod_zone_page_state(zone, NR_LRU_BASE + l, -hpage_nr_pages(page));  	mem_cgroup_del_lru_list(page, l);  } diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 892d8a17a7e..f4f6041176a 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -1143,6 +1143,7 @@ static void __split_huge_page_refcount(struct page *page)  	int i;  	unsigned long head_index = page->index;  	struct zone *zone = page_zone(page); +	int zonestat;  	/* prevent PageLRU to go away from under us, and freeze lru stats */  	spin_lock_irq(&zone->lru_lock); @@ -1207,6 +1208,15 @@ static void __split_huge_page_refcount(struct page *page)  	__dec_zone_page_state(page, NR_ANON_TRANSPARENT_HUGEPAGES);  	__mod_zone_page_state(zone, NR_ANON_PAGES, HPAGE_PMD_NR); +	/* +	 * A hugepage counts for HPAGE_PMD_NR pages on the LRU statistics, +	 * so adjust those appropriately if this page is on the LRU. +	 */ +	if (PageLRU(page)) { +		zonestat = NR_LRU_BASE + page_lru(page); +		__mod_zone_page_state(zone, zonestat, -(HPAGE_PMD_NR-1)); +	} +  	ClearPageCompound(page);  	compound_unlock(page);  	spin_unlock_irq(&zone->lru_lock); diff --git a/mm/memcontrol.c b/mm/memcontrol.c index a1bb59d4c9d..f4ea3410fb4 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -1091,7 +1091,7 @@ unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,  		case 0:  			list_move(&page->lru, dst);  			mem_cgroup_del_lru(page); -			nr_taken++; +			nr_taken += hpage_nr_pages(page);  			break;  		case -EBUSY:  			/* we don't affect global LRU but rotate in our LRU */ diff --git a/mm/vmscan.c b/mm/vmscan.c index f5b762ae23a..0882014d2ce 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -1045,7 +1045,7 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,  		case 0:  			list_move(&page->lru, dst);  			mem_cgroup_del_lru(page); -			nr_taken++; +			nr_taken += hpage_nr_pages(page);  			break;  		case -EBUSY: @@ -1103,7 +1103,7 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,  			if (__isolate_lru_page(cursor_page, mode, file) == 0) {  				list_move(&cursor_page->lru, dst);  				mem_cgroup_del_lru(cursor_page); -				nr_taken++; +				nr_taken += hpage_nr_pages(page);  				nr_lumpy_taken++;  				if (PageDirty(cursor_page))  					nr_lumpy_dirty++; @@ -1158,14 +1158,15 @@ static unsigned long clear_active_flags(struct list_head *page_list,  	struct page *page;  	list_for_each_entry(page, page_list, lru) { +		int numpages = hpage_nr_pages(page);  		lru = page_lru_base_type(page);  		if (PageActive(page)) {  			lru += LRU_ACTIVE;  			ClearPageActive(page); -			nr_active++; +			nr_active += numpages;  		}  		if (count) -			count[lru]++; +			count[lru] += numpages;  	}  	return nr_active; @@ -1483,7 +1484,7 @@ static void move_active_pages_to_lru(struct zone *zone,  		list_move(&page->lru, &zone->lru[lru].list);  		mem_cgroup_add_lru_list(page, lru); -		pgmoved++; +		pgmoved += hpage_nr_pages(page);  		if (!pagevec_add(&pvec, page) || list_empty(list)) {  			spin_unlock_irq(&zone->lru_lock);  |