diff options
| author | Grant Likely <grant.likely@secretlab.ca> | 2012-12-07 17:02:47 +0000 | 
|---|---|---|
| committer | Grant Likely <grant.likely@secretlab.ca> | 2012-12-07 17:02:47 +0000 | 
| commit | 7730cba2a50332c194f50a58b86359ea39a82bd1 (patch) | |
| tree | 64c8d7228da1454c02288068e57a9c61f8b0acd0 /mm/page_alloc.c | |
| parent | c20151dff8a6d503c0d0cc4387c33a618cdabcb7 (diff) | |
| parent | b69f0859dc8e633c5d8c06845811588fe17e68b3 (diff) | |
| download | olio-linux-3.10-7730cba2a50332c194f50a58b86359ea39a82bd1.tar.xz olio-linux-3.10-7730cba2a50332c194f50a58b86359ea39a82bd1.zip  | |
Merge tag 'v3.7-rc8' into spi/next
Linux 3.7-rc8
Diffstat (limited to 'mm/page_alloc.c')
| -rw-r--r-- | mm/page_alloc.c | 77 | 
1 files changed, 30 insertions, 47 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 5b74de6702e..a8f2c87792c 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -1405,7 +1405,7 @@ int capture_free_page(struct page *page, int alloc_order, int migratetype)  	mt = get_pageblock_migratetype(page);  	if (unlikely(mt != MIGRATE_ISOLATE)) -		__mod_zone_freepage_state(zone, -(1UL << order), mt); +		__mod_zone_freepage_state(zone, -(1UL << alloc_order), mt);  	if (alloc_order != order)  		expand(zone, page, alloc_order, order, @@ -1422,7 +1422,7 @@ int capture_free_page(struct page *page, int alloc_order, int migratetype)  		}  	} -	return 1UL << order; +	return 1UL << alloc_order;  }  /* @@ -2378,6 +2378,15 @@ bool gfp_pfmemalloc_allowed(gfp_t gfp_mask)  	return !!(gfp_to_alloc_flags(gfp_mask) & ALLOC_NO_WATERMARKS);  } +/* Returns true if the allocation is likely for THP */ +static bool is_thp_alloc(gfp_t gfp_mask, unsigned int order) +{ +	if (order == pageblock_order && +	    (gfp_mask & (__GFP_MOVABLE|__GFP_REPEAT)) == __GFP_MOVABLE) +		return true; +	return false; +} +  static inline struct page *  __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,  	struct zonelist *zonelist, enum zone_type high_zoneidx, @@ -2416,7 +2425,9 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,  		goto nopage;  restart: -	wake_all_kswapd(order, zonelist, high_zoneidx, +	/* The decision whether to wake kswapd for THP is made later */ +	if (!is_thp_alloc(gfp_mask, order)) +		wake_all_kswapd(order, zonelist, high_zoneidx,  					zone_idx(preferred_zone));  	/* @@ -2487,15 +2498,21 @@ rebalance:  		goto got_pg;  	sync_migration = true; -	/* -	 * If compaction is deferred for high-order allocations, it is because -	 * sync compaction recently failed. In this is the case and the caller -	 * requested a movable allocation that does not heavily disrupt the -	 * system then fail the allocation instead of entering direct reclaim. -	 */ -	if ((deferred_compaction || contended_compaction) && -	    (gfp_mask & (__GFP_MOVABLE|__GFP_REPEAT)) == __GFP_MOVABLE) -		goto nopage; +	if (is_thp_alloc(gfp_mask, order)) { +		/* +		 * If compaction is deferred for high-order allocations, it is +		 * because sync compaction recently failed. If this is the case +		 * and the caller requested a movable allocation that does not +		 * heavily disrupt the system then fail the allocation instead +		 * of entering direct reclaim. +		 */ +		if (deferred_compaction || contended_compaction) +			goto nopage; + +		/* If process is willing to reclaim/compact then wake kswapd */ +		wake_all_kswapd(order, zonelist, high_zoneidx, +					zone_idx(preferred_zone)); +	}  	/* Try direct reclaim and then allocating */  	page = __alloc_pages_direct_reclaim(gfp_mask, order, @@ -4505,7 +4522,7 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat,  		zone->zone_pgdat = pgdat;  		zone_pcp_init(zone); -		lruvec_init(&zone->lruvec, zone); +		lruvec_init(&zone->lruvec);  		if (!size)  			continue; @@ -6098,37 +6115,3 @@ void dump_page(struct page *page)  	dump_page_flags(page->flags);  	mem_cgroup_print_bad_page(page);  } - -/* reset zone->present_pages */ -void reset_zone_present_pages(void) -{ -	struct zone *z; -	int i, nid; - -	for_each_node_state(nid, N_HIGH_MEMORY) { -		for (i = 0; i < MAX_NR_ZONES; i++) { -			z = NODE_DATA(nid)->node_zones + i; -			z->present_pages = 0; -		} -	} -} - -/* calculate zone's present pages in buddy system */ -void fixup_zone_present_pages(int nid, unsigned long start_pfn, -				unsigned long end_pfn) -{ -	struct zone *z; -	unsigned long zone_start_pfn, zone_end_pfn; -	int i; - -	for (i = 0; i < MAX_NR_ZONES; i++) { -		z = NODE_DATA(nid)->node_zones + i; -		zone_start_pfn = z->zone_start_pfn; -		zone_end_pfn = zone_start_pfn + z->spanned_pages; - -		/* if the two regions intersect */ -		if (!(zone_start_pfn >= end_pfn	|| zone_end_pfn <= start_pfn)) -			z->present_pages += min(end_pfn, zone_end_pfn) - -					    max(start_pfn, zone_start_pfn); -	} -}  |