diff options
Diffstat (limited to 'mm/page_alloc.c')
| -rw-r--r-- | mm/page_alloc.c | 60 | 
1 files changed, 44 insertions, 16 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 9f8a97b9a35..9d5498e2d0f 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -54,6 +54,7 @@  #include <trace/events/kmem.h>  #include <linux/ftrace_event.h>  #include <linux/memcontrol.h> +#include <linux/prefetch.h>  #include <asm/tlbflush.h>  #include <asm/div64.h> @@ -2317,6 +2318,21 @@ void free_pages(unsigned long addr, unsigned int order)  EXPORT_SYMBOL(free_pages); +static void *make_alloc_exact(unsigned long addr, unsigned order, size_t size) +{ +	if (addr) { +		unsigned long alloc_end = addr + (PAGE_SIZE << order); +		unsigned long used = addr + PAGE_ALIGN(size); + +		split_page(virt_to_page((void *)addr), order); +		while (used < alloc_end) { +			free_page(used); +			used += PAGE_SIZE; +		} +	} +	return (void *)addr; +} +  /**   * alloc_pages_exact - allocate an exact number physically-contiguous pages.   * @size: the number of bytes to allocate @@ -2336,22 +2352,33 @@ void *alloc_pages_exact(size_t size, gfp_t gfp_mask)  	unsigned long addr;  	addr = __get_free_pages(gfp_mask, order); -	if (addr) { -		unsigned long alloc_end = addr + (PAGE_SIZE << order); -		unsigned long used = addr + PAGE_ALIGN(size); - -		split_page(virt_to_page((void *)addr), order); -		while (used < alloc_end) { -			free_page(used); -			used += PAGE_SIZE; -		} -	} - -	return (void *)addr; +	return make_alloc_exact(addr, order, size);  }  EXPORT_SYMBOL(alloc_pages_exact);  /** + * alloc_pages_exact_nid - allocate an exact number of physically-contiguous + *			   pages on a node. + * @nid: the preferred node ID where memory should be allocated + * @size: the number of bytes to allocate + * @gfp_mask: GFP flags for the allocation + * + * Like alloc_pages_exact(), but try to allocate on node nid first before falling + * back. + * Note this is not alloc_pages_exact_node() which allocates on a specific node, + * but is not exact. + */ +void *alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask) +{ +	unsigned order = get_order(size); +	struct page *p = alloc_pages_node(nid, gfp_mask, order); +	if (!p) +		return NULL; +	return make_alloc_exact((unsigned long)page_address(p), order, size); +} +EXPORT_SYMBOL(alloc_pages_exact_nid); + +/**   * free_pages_exact - release memory allocated via alloc_pages_exact()   * @virt: the value returned by alloc_pages_exact.   * @size: size of allocation, same value as passed to alloc_pages_exact(). @@ -3514,7 +3541,7 @@ static void setup_pagelist_highmark(struct per_cpu_pageset *p,  		pcp->batch = PAGE_SHIFT * 8;  } -static __meminit void setup_zone_pageset(struct zone *zone) +static void setup_zone_pageset(struct zone *zone)  {  	int cpu; @@ -3564,7 +3591,7 @@ int zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages)  	if (!slab_is_available()) {  		zone->wait_table = (wait_queue_head_t *) -			alloc_bootmem_node(pgdat, alloc_size); +			alloc_bootmem_node_nopanic(pgdat, alloc_size);  	} else {  		/*  		 * This case means that a zone whose size was 0 gets new memory @@ -4141,7 +4168,8 @@ static void __init setup_usemap(struct pglist_data *pgdat,  	unsigned long usemapsize = usemap_size(zonesize);  	zone->pageblock_flags = NULL;  	if (usemapsize) -		zone->pageblock_flags = alloc_bootmem_node(pgdat, usemapsize); +		zone->pageblock_flags = alloc_bootmem_node_nopanic(pgdat, +								   usemapsize);  }  #else  static inline void setup_usemap(struct pglist_data *pgdat, @@ -4307,7 +4335,7 @@ static void __init_refok alloc_node_mem_map(struct pglist_data *pgdat)  		size =  (end - start) * sizeof(struct page);  		map = alloc_remap(pgdat->node_id, size);  		if (!map) -			map = alloc_bootmem_node(pgdat, size); +			map = alloc_bootmem_node_nopanic(pgdat, size);  		pgdat->node_mem_map = map + (pgdat->node_start_pfn - start);  	}  #ifndef CONFIG_NEED_MULTIPLE_NODES  |