diff options
Diffstat (limited to 'mm/page_alloc.c')
| -rw-r--r-- | mm/page_alloc.c | 40 | 
1 files changed, 23 insertions, 17 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 009ac285fea..c13ea753889 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -584,7 +584,7 @@ static inline void __free_one_page(struct page *page,  		combined_idx = buddy_idx & page_idx;  		higher_page = page + (combined_idx - page_idx);  		buddy_idx = __find_buddy_index(combined_idx, order + 1); -		higher_buddy = page + (buddy_idx - combined_idx); +		higher_buddy = higher_page + (buddy_idx - combined_idx);  		if (page_is_buddy(higher_page, higher_buddy, order + 1)) {  			list_add_tail(&page->lru,  				&zone->free_area[order].free_list[migratetype]); @@ -1928,6 +1928,17 @@ this_zone_full:  		zlc_active = 0;  		goto zonelist_scan;  	} + +	if (page) +		/* +		 * page->pfmemalloc is set when ALLOC_NO_WATERMARKS was +		 * necessary to allocate the page. The expectation is +		 * that the caller is taking steps that will free more +		 * memory. The caller should avoid the page being used +		 * for !PFMEMALLOC purposes. +		 */ +		page->pfmemalloc = !!(alloc_flags & ALLOC_NO_WATERMARKS); +  	return page;  } @@ -2091,7 +2102,7 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,  	struct zonelist *zonelist, enum zone_type high_zoneidx,  	nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,  	int migratetype, bool sync_migration, -	bool *deferred_compaction, +	bool *contended_compaction, bool *deferred_compaction,  	unsigned long *did_some_progress)  {  	struct page *page; @@ -2106,7 +2117,8 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,  	current->flags |= PF_MEMALLOC;  	*did_some_progress = try_to_compact_pages(zonelist, order, gfp_mask, -						nodemask, sync_migration); +						nodemask, sync_migration, +						contended_compaction);  	current->flags &= ~PF_MEMALLOC;  	if (*did_some_progress != COMPACT_SKIPPED) { @@ -2152,7 +2164,7 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,  	struct zonelist *zonelist, enum zone_type high_zoneidx,  	nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,  	int migratetype, bool sync_migration, -	bool *deferred_compaction, +	bool *contended_compaction, bool *deferred_compaction,  	unsigned long *did_some_progress)  {  	return NULL; @@ -2325,6 +2337,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,  	unsigned long did_some_progress;  	bool sync_migration = false;  	bool deferred_compaction = false; +	bool contended_compaction = false;  	/*  	 * In the slowpath, we sanity check order to avoid ever trying to @@ -2389,14 +2402,6 @@ rebalance:  				zonelist, high_zoneidx, nodemask,  				preferred_zone, migratetype);  		if (page) { -			/* -			 * page->pfmemalloc is set when ALLOC_NO_WATERMARKS was -			 * necessary to allocate the page. The expectation is -			 * that the caller is taking steps that will free more -			 * memory. The caller should avoid the page being used -			 * for !PFMEMALLOC purposes. -			 */ -			page->pfmemalloc = true;  			goto got_pg;  		}  	} @@ -2422,6 +2427,7 @@ rebalance:  					nodemask,  					alloc_flags, preferred_zone,  					migratetype, sync_migration, +					&contended_compaction,  					&deferred_compaction,  					&did_some_progress);  	if (page) @@ -2431,10 +2437,11 @@ rebalance:  	/*  	 * If compaction is deferred for high-order allocations, it is because  	 * sync compaction recently failed. In this is the case and the caller -	 * has requested the system not be heavily disrupted, fail the -	 * allocation now instead of entering direct reclaim +	 * requested a movable allocation that does not heavily disrupt the +	 * system then fail the allocation instead of entering direct reclaim.  	 */ -	if (deferred_compaction && (gfp_mask & __GFP_NO_KSWAPD)) +	if ((deferred_compaction || contended_compaction) && +						(gfp_mask & __GFP_NO_KSWAPD))  		goto nopage;  	/* Try direct reclaim and then allocating */ @@ -2505,6 +2512,7 @@ rebalance:  					nodemask,  					alloc_flags, preferred_zone,  					migratetype, sync_migration, +					&contended_compaction,  					&deferred_compaction,  					&did_some_progress);  		if (page) @@ -2569,8 +2577,6 @@ retry_cpuset:  		page = __alloc_pages_slowpath(gfp_mask, order,  				zonelist, high_zoneidx, nodemask,  				preferred_zone, migratetype); -	else -		page->pfmemalloc = false;  	trace_mm_page_alloc(page, order, gfp_mask, migratetype);  |