diff options
| author | Tang Chen <tangchen@cn.fujitsu.com> | 2013-02-22 16:33:40 -0800 | 
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-02-23 17:50:14 -0800 | 
| commit | 42f47e27e761fee07da69e04612ec7dd0d490edd (patch) | |
| tree | 15cab008a1759590cd50628b760ffc403f0f3dfc | |
| parent | 6981ec31146cf19454c55c130625f6cee89aab95 (diff) | |
| download | olio-linux-3.10-42f47e27e761fee07da69e04612ec7dd0d490edd.tar.xz olio-linux-3.10-42f47e27e761fee07da69e04612ec7dd0d490edd.zip  | |
page_alloc: make movablemem_map have higher priority
If kernelcore or movablecore is specified at the same time with
movablemem_map, movablemem_map will have higher priority to be
satisfied.  This patch will make find_zone_movable_pfns_for_nodes()
calculate zone_movable_pfn[] with the limit from zone_movable_limit[].
Signed-off-by: Tang Chen <tangchen@cn.fujitsu.com>
Reviewed-by: Wen Congyang <wency@cn.fujitsu.com>
Cc: Wu Jianguo <wujianguo@huawei.com>
Reviewed-by: Lai Jiangshan <laijs@cn.fujitsu.com>
Tested-by: Lin Feng <linfeng@cn.fujitsu.com>
Cc: Mel Gorman <mel@csn.ul.ie>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
| -rw-r--r-- | mm/page_alloc.c | 28 | 
1 files changed, 25 insertions, 3 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 0f267d9c73f..88b9962c99b 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -4905,9 +4905,17 @@ static void __init find_zone_movable_pfns_for_nodes(void)  		required_kernelcore = max(required_kernelcore, corepages);  	} -	/* If kernelcore was not specified, there is no ZONE_MOVABLE */ -	if (!required_kernelcore) +	/* +	 * If neither kernelcore/movablecore nor movablemem_map is specified, +	 * there is no ZONE_MOVABLE. But if movablemem_map is specified, the +	 * start pfn of ZONE_MOVABLE has been stored in zone_movable_limit[]. +	 */ +	if (!required_kernelcore) { +		if (movablemem_map.nr_map) +			memcpy(zone_movable_pfn, zone_movable_limit, +				sizeof(zone_movable_pfn));  		goto out; +	}  	/* usable_startpfn is the lowest possible pfn ZONE_MOVABLE can be at */  	usable_startpfn = arch_zone_lowest_possible_pfn[movable_zone]; @@ -4937,10 +4945,24 @@ restart:  		for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {  			unsigned long size_pages; +			/* +			 * Find more memory for kernelcore in +			 * [zone_movable_pfn[nid], zone_movable_limit[nid]). +			 */  			start_pfn = max(start_pfn, zone_movable_pfn[nid]);  			if (start_pfn >= end_pfn)  				continue; +			if (zone_movable_limit[nid]) { +				end_pfn = min(end_pfn, zone_movable_limit[nid]); +				/* No range left for kernelcore in this node */ +				if (start_pfn >= end_pfn) { +					zone_movable_pfn[nid] = +							zone_movable_limit[nid]; +					break; +				} +			} +  			/* Account for what is only usable for kernelcore */  			if (start_pfn < usable_startpfn) {  				unsigned long kernel_pages; @@ -5000,12 +5022,12 @@ restart:  	if (usable_nodes && required_kernelcore > usable_nodes)  		goto restart; +out:  	/* Align start of ZONE_MOVABLE on all nids to MAX_ORDER_NR_PAGES */  	for (nid = 0; nid < MAX_NUMNODES; nid++)  		zone_movable_pfn[nid] =  			roundup(zone_movable_pfn[nid], MAX_ORDER_NR_PAGES); -out:  	/* restore the node_state */  	node_states[N_MEMORY] = saved_node_state;  }  |