diff options
Diffstat (limited to 'arch/x86/mm/numa_emulation.c')
| -rw-r--r-- | arch/x86/mm/numa_emulation.c | 36 | 
1 files changed, 21 insertions, 15 deletions
diff --git a/arch/x86/mm/numa_emulation.c b/arch/x86/mm/numa_emulation.c index d0ed086b624..46db56845f1 100644 --- a/arch/x86/mm/numa_emulation.c +++ b/arch/x86/mm/numa_emulation.c @@ -28,6 +28,16 @@ static int __init emu_find_memblk_by_nid(int nid, const struct numa_meminfo *mi)  	return -ENOENT;  } +static u64 mem_hole_size(u64 start, u64 end) +{ +	unsigned long start_pfn = PFN_UP(start); +	unsigned long end_pfn = PFN_DOWN(end); + +	if (start_pfn < end_pfn) +		return PFN_PHYS(absent_pages_in_range(start_pfn, end_pfn)); +	return 0; +} +  /*   * Sets up nid to range from @start to @end.  The return value is -errno if   * something went wrong, 0 otherwise. @@ -89,7 +99,7 @@ static int __init split_nodes_interleave(struct numa_meminfo *ei,  	 * Calculate target node size.  x86_32 freaks on __udivdi3() so do  	 * the division in ulong number of pages and convert back.  	 */ -	size = max_addr - addr - memblock_x86_hole_size(addr, max_addr); +	size = max_addr - addr - mem_hole_size(addr, max_addr);  	size = PFN_PHYS((unsigned long)(size >> PAGE_SHIFT) / nr_nodes);  	/* @@ -135,8 +145,7 @@ static int __init split_nodes_interleave(struct numa_meminfo *ei,  			 * Continue to add memory to this fake node if its  			 * non-reserved memory is less than the per-node size.  			 */ -			while (end - start - -			       memblock_x86_hole_size(start, end) < size) { +			while (end - start - mem_hole_size(start, end) < size) {  				end += FAKE_NODE_MIN_SIZE;  				if (end > limit) {  					end = limit; @@ -150,7 +159,7 @@ static int __init split_nodes_interleave(struct numa_meminfo *ei,  			 * this one must extend to the boundary.  			 */  			if (end < dma32_end && dma32_end - end - -			    memblock_x86_hole_size(end, dma32_end) < FAKE_NODE_MIN_SIZE) +			    mem_hole_size(end, dma32_end) < FAKE_NODE_MIN_SIZE)  				end = dma32_end;  			/* @@ -158,8 +167,7 @@ static int __init split_nodes_interleave(struct numa_meminfo *ei,  			 * next node, this one must extend to the end of the  			 * physical node.  			 */ -			if (limit - end - -			    memblock_x86_hole_size(end, limit) < size) +			if (limit - end - mem_hole_size(end, limit) < size)  				end = limit;  			ret = emu_setup_memblk(ei, pi, nid++ % nr_nodes, @@ -180,7 +188,7 @@ static u64 __init find_end_of_node(u64 start, u64 max_addr, u64 size)  {  	u64 end = start + size; -	while (end - start - memblock_x86_hole_size(start, end) < size) { +	while (end - start - mem_hole_size(start, end) < size) {  		end += FAKE_NODE_MIN_SIZE;  		if (end > max_addr) {  			end = max_addr; @@ -211,8 +219,7 @@ static int __init split_nodes_size_interleave(struct numa_meminfo *ei,  	 * creates a uniform distribution of node sizes across the entire  	 * machine (but not necessarily over physical nodes).  	 */ -	min_size = (max_addr - addr - memblock_x86_hole_size(addr, max_addr)) / -						MAX_NUMNODES; +	min_size = (max_addr - addr - mem_hole_size(addr, max_addr)) / MAX_NUMNODES;  	min_size = max(min_size, FAKE_NODE_MIN_SIZE);  	if ((min_size & FAKE_NODE_MIN_HASH_MASK) < min_size)  		min_size = (min_size + FAKE_NODE_MIN_SIZE) & @@ -252,7 +259,7 @@ static int __init split_nodes_size_interleave(struct numa_meminfo *ei,  			 * this one must extend to the boundary.  			 */  			if (end < dma32_end && dma32_end - end - -			    memblock_x86_hole_size(end, dma32_end) < FAKE_NODE_MIN_SIZE) +			    mem_hole_size(end, dma32_end) < FAKE_NODE_MIN_SIZE)  				end = dma32_end;  			/* @@ -260,8 +267,7 @@ static int __init split_nodes_size_interleave(struct numa_meminfo *ei,  			 * next node, this one must extend to the end of the  			 * physical node.  			 */ -			if (limit - end - -			    memblock_x86_hole_size(end, limit) < size) +			if (limit - end - mem_hole_size(end, limit) < size)  				end = limit;  			ret = emu_setup_memblk(ei, pi, nid++ % MAX_NUMNODES, @@ -351,11 +357,11 @@ void __init numa_emulation(struct numa_meminfo *numa_meminfo, int numa_dist_cnt)  		phys = memblock_find_in_range(0, PFN_PHYS(max_pfn_mapped),  					      phys_size, PAGE_SIZE); -		if (phys == MEMBLOCK_ERROR) { +		if (!phys) {  			pr_warning("NUMA: Warning: can't allocate copy of distance table, disabling emulation\n");  			goto no_emu;  		} -		memblock_x86_reserve_range(phys, phys + phys_size, "TMP NUMA DIST"); +		memblock_reserve(phys, phys_size);  		phys_dist = __va(phys);  		for (i = 0; i < numa_dist_cnt; i++) @@ -424,7 +430,7 @@ void __init numa_emulation(struct numa_meminfo *numa_meminfo, int numa_dist_cnt)  	/* free the copied physical distance table */  	if (phys_dist) -		memblock_x86_free_range(__pa(phys_dist), __pa(phys_dist) + phys_size); +		memblock_free(__pa(phys_dist), phys_size);  	return;  no_emu:  |