diff options
Diffstat (limited to 'mm/page_alloc.c')
| -rw-r--r-- | mm/page_alloc.c | 45 | 
1 files changed, 25 insertions, 20 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index e0f2cdf9d8b..5cc986eb9f6 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -882,7 +882,7 @@ retry_reserve:   */  static int rmqueue_bulk(struct zone *zone, unsigned int order,   			unsigned long count, struct list_head *list, -			int migratetype) +			int migratetype, int cold)  {  	int i; @@ -901,7 +901,10 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order,  		 * merge IO requests if the physical pages are ordered  		 * properly.  		 */ -		list_add(&page->lru, list); +		if (likely(cold == 0)) +			list_add(&page->lru, list); +		else +			list_add_tail(&page->lru, list);  		set_page_private(page, migratetype);  		list = &page->lru;  	} @@ -1119,7 +1122,8 @@ again:  		local_irq_save(flags);  		if (!pcp->count) {  			pcp->count = rmqueue_bulk(zone, 0, -					pcp->batch, &pcp->list, migratetype); +					pcp->batch, &pcp->list, +					migratetype, cold);  			if (unlikely(!pcp->count))  				goto failed;  		} @@ -1138,7 +1142,8 @@ again:  		/* Allocate more to the pcp list if necessary */  		if (unlikely(&page->lru == &pcp->list)) {  			pcp->count += rmqueue_bulk(zone, 0, -					pcp->batch, &pcp->list, migratetype); +					pcp->batch, &pcp->list, +					migratetype, cold);  			page = list_entry(pcp->list.next, struct page, lru);  		} @@ -1666,7 +1671,7 @@ __alloc_pages_high_priority(gfp_t gfp_mask, unsigned int order,  			preferred_zone, migratetype);  		if (!page && gfp_mask & __GFP_NOFAIL) -			congestion_wait(WRITE, HZ/50); +			congestion_wait(BLK_RW_ASYNC, HZ/50);  	} while (!page && (gfp_mask & __GFP_NOFAIL));  	return page; @@ -1740,8 +1745,10 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,  	 * be using allocators in order of preference for an area that is  	 * too large.  	 */ -	if (WARN_ON_ONCE(order >= MAX_ORDER)) +	if (order >= MAX_ORDER) { +		WARN_ON_ONCE(!(gfp_mask & __GFP_NOWARN));  		return NULL; +	}  	/*  	 * GFP_THISNODE (meaning __GFP_THISNODE, __GFP_NORETRY and @@ -1789,6 +1796,10 @@ rebalance:  	if (p->flags & PF_MEMALLOC)  		goto nopage; +	/* Avoid allocations with no watermarks from looping endlessly */ +	if (test_thread_flag(TIF_MEMDIE) && !(gfp_mask & __GFP_NOFAIL)) +		goto nopage; +  	/* Try direct reclaim and then allocating */  	page = __alloc_pages_direct_reclaim(gfp_mask, order,  					zonelist, high_zoneidx, @@ -1831,7 +1842,7 @@ rebalance:  	pages_reclaimed += did_some_progress;  	if (should_alloc_retry(gfp_mask, order, pages_reclaimed)) {  		/* Wait for some write requests to complete then retry */ -		congestion_wait(WRITE, HZ/50); +		congestion_wait(BLK_RW_ASYNC, HZ/50);  		goto rebalance;  	} @@ -1983,7 +1994,7 @@ void *alloc_pages_exact(size_t size, gfp_t gfp_mask)  		unsigned long alloc_end = addr + (PAGE_SIZE << order);  		unsigned long used = addr + PAGE_ALIGN(size); -		split_page(virt_to_page(addr), order); +		split_page(virt_to_page((void *)addr), order);  		while (used < alloc_end) {  			free_page(used);  			used += PAGE_SIZE; @@ -2533,7 +2544,6 @@ static void build_zonelists(pg_data_t *pgdat)  	prev_node = local_node;  	nodes_clear(used_mask); -	memset(node_load, 0, sizeof(node_load));  	memset(node_order, 0, sizeof(node_order));  	j = 0; @@ -2642,6 +2652,9 @@ static int __build_all_zonelists(void *dummy)  {  	int nid; +#ifdef CONFIG_NUMA +	memset(node_load, 0, sizeof(node_load)); +#endif  	for_each_online_node(nid) {  		pg_data_t *pgdat = NODE_DATA(nid); @@ -4745,8 +4758,10 @@ void *__init alloc_large_system_hash(const char *tablename,  			 * some pages at the end of hash table which  			 * alloc_pages_exact() automatically does  			 */ -			if (get_order(size) < MAX_ORDER) +			if (get_order(size) < MAX_ORDER) {  				table = alloc_pages_exact(size, GFP_ATOMIC); +				kmemleak_alloc(table, size, 1, GFP_ATOMIC); +			}  		}  	} while (!table && size > PAGE_SIZE && --log2qty); @@ -4764,16 +4779,6 @@ void *__init alloc_large_system_hash(const char *tablename,  	if (_hash_mask)  		*_hash_mask = (1 << log2qty) - 1; -	/* -	 * If hashdist is set, the table allocation is done with __vmalloc() -	 * which invokes the kmemleak_alloc() callback. This function may also -	 * be called before the slab and kmemleak are initialised when -	 * kmemleak simply buffers the request to be executed later -	 * (GFP_ATOMIC flag ignored in this case). -	 */ -	if (!hashdist) -		kmemleak_alloc(table, size, 1, GFP_ATOMIC); -  	return table;  }  |