diff options
| author | Tang Chen <tangchen@cn.fujitsu.com> | 2013-02-22 16:33:39 -0800 | 
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-02-23 17:50:14 -0800 | 
| commit | 6981ec31146cf19454c55c130625f6cee89aab95 (patch) | |
| tree | c7f54f81d45c499103268ab95b88d0d617be9c59 /mm/page_alloc.c | |
| parent | 34b71f1e04fcba578e719e675b4882eeeb2a1f6f (diff) | |
| download | olio-linux-3.10-6981ec31146cf19454c55c130625f6cee89aab95.tar.xz olio-linux-3.10-6981ec31146cf19454c55c130625f6cee89aab95.zip  | |
page_alloc: introduce zone_movable_limit[] to keep movable limit for nodes
Introduce a new array zone_movable_limit[] to store the ZONE_MOVABLE
limit from movablemem_map boot option for all nodes.  The function
sanitize_zone_movable_limit() will find out to which node the ranges in
movable_map.map[] belongs, and calculates the low boundary of
ZONE_MOVABLE for each node.
Signed-off-by: Tang Chen <tangchen@cn.fujitsu.com>
Signed-off-by: Liu Jiang <jiang.liu@huawei.com>
Reviewed-by: Wen Congyang <wency@cn.fujitsu.com>
Cc: Wu Jianguo <wujianguo@huawei.com>
Reviewed-by: Lai Jiangshan <laijs@cn.fujitsu.com>
Tested-by: Lin Feng <linfeng@cn.fujitsu.com>
Cc: Mel Gorman <mel@csn.ul.ie>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/page_alloc.c')
| -rw-r--r-- | mm/page_alloc.c | 76 | 
1 files changed, 74 insertions, 2 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index aa1cc5fe990..0f267d9c73f 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -210,6 +210,7 @@ static unsigned long __meminitdata arch_zone_highest_possible_pfn[MAX_NR_ZONES];  static unsigned long __initdata required_kernelcore;  static unsigned long __initdata required_movablecore;  static unsigned long __meminitdata zone_movable_pfn[MAX_NUMNODES]; +static unsigned long __meminitdata zone_movable_limit[MAX_NUMNODES];  /* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */  int movable_zone; @@ -4375,6 +4376,77 @@ static unsigned long __meminit zone_absent_pages_in_node(int nid,  	return __absent_pages_in_range(nid, zone_start_pfn, zone_end_pfn);  } +/** + * sanitize_zone_movable_limit - Sanitize the zone_movable_limit array. + * + * zone_movable_limit is initialized as 0. This function will try to get + * the first ZONE_MOVABLE pfn of each node from movablemem_map, and + * assigne them to zone_movable_limit. + * zone_movable_limit[nid] == 0 means no limit for the node. + * + * Note: Each range is represented as [start_pfn, end_pfn) + */ +static void __meminit sanitize_zone_movable_limit(void) +{ +	int map_pos = 0, i, nid; +	unsigned long start_pfn, end_pfn; + +	if (!movablemem_map.nr_map) +		return; + +	/* Iterate all ranges from minimum to maximum */ +	for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) { +		/* +		 * If we have found lowest pfn of ZONE_MOVABLE of the node +		 * specified by user, just go on to check next range. +		 */ +		if (zone_movable_limit[nid]) +			continue; + +#ifdef CONFIG_ZONE_DMA +		/* Skip DMA memory. */ +		if (start_pfn < arch_zone_highest_possible_pfn[ZONE_DMA]) +			start_pfn = arch_zone_highest_possible_pfn[ZONE_DMA]; +#endif + +#ifdef CONFIG_ZONE_DMA32 +		/* Skip DMA32 memory. */ +		if (start_pfn < arch_zone_highest_possible_pfn[ZONE_DMA32]) +			start_pfn = arch_zone_highest_possible_pfn[ZONE_DMA32]; +#endif + +#ifdef CONFIG_HIGHMEM +		/* Skip lowmem if ZONE_MOVABLE is highmem. */ +		if (zone_movable_is_highmem() && +		    start_pfn < arch_zone_lowest_possible_pfn[ZONE_HIGHMEM]) +			start_pfn = arch_zone_lowest_possible_pfn[ZONE_HIGHMEM]; +#endif + +		if (start_pfn >= end_pfn) +			continue; + +		while (map_pos < movablemem_map.nr_map) { +			if (end_pfn <= movablemem_map.map[map_pos].start_pfn) +				break; + +			if (start_pfn >= movablemem_map.map[map_pos].end_pfn) { +				map_pos++; +				continue; +			} + +			/* +			 * The start_pfn of ZONE_MOVABLE is either the minimum +			 * pfn specified by movablemem_map, or 0, which means +			 * the node has no ZONE_MOVABLE. +			 */ +			zone_movable_limit[nid] = max(start_pfn, +					movablemem_map.map[map_pos].start_pfn); + +			break; +		} +	} +} +  #else /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */  static inline unsigned long __meminit zone_spanned_pages_in_node(int nid,  					unsigned long zone_type, @@ -4392,7 +4464,6 @@ static inline unsigned long __meminit zone_absent_pages_in_node(int nid,  	return zholes_size[zone_type];  } -  #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */  static void __meminit calculate_node_totalpages(struct pglist_data *pgdat, @@ -4839,7 +4910,6 @@ static void __init find_zone_movable_pfns_for_nodes(void)  		goto out;  	/* usable_startpfn is the lowest possible pfn ZONE_MOVABLE can be at */ -	find_usable_zone_for_movable();  	usable_startpfn = arch_zone_lowest_possible_pfn[movable_zone];  restart: @@ -4998,6 +5068,8 @@ void __init free_area_init_nodes(unsigned long *max_zone_pfn)  	/* Find the PFNs that ZONE_MOVABLE begins at in each node */  	memset(zone_movable_pfn, 0, sizeof(zone_movable_pfn)); +	find_usable_zone_for_movable(); +	sanitize_zone_movable_limit();  	find_zone_movable_pfns_for_nodes();  	/* Print out the zone ranges */  |