diff options
Diffstat (limited to 'arch/powerpc/mm/mem.c')
| -rw-r--r-- | arch/powerpc/mm/mem.c | 78 | 
1 files changed, 39 insertions, 39 deletions
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c index 0f594d774bf..1a84a8d0000 100644 --- a/arch/powerpc/mm/mem.c +++ b/arch/powerpc/mm/mem.c @@ -32,7 +32,7 @@  #include <linux/initrd.h>  #include <linux/pagemap.h>  #include <linux/suspend.h> -#include <linux/lmb.h> +#include <linux/memblock.h>  #include <linux/hugetlb.h>  #include <asm/pgalloc.h> @@ -83,13 +83,13 @@ int page_is_ram(unsigned long pfn)  #else  	unsigned long paddr = (pfn << PAGE_SHIFT);  	int i; -	for (i=0; i < lmb.memory.cnt; i++) { +	for (i=0; i < memblock.memory.cnt; i++) {  		unsigned long base; -		base = lmb.memory.region[i].base; +		base = memblock.memory.region[i].base;  		if ((paddr >= base) && -			(paddr < (base + lmb.memory.region[i].size))) { +			(paddr < (base + memblock.memory.region[i].size))) {  			return 1;  		}  	} @@ -142,14 +142,14 @@ int arch_add_memory(int nid, u64 start, u64 size)  /*   * walk_memory_resource() needs to make sure there is no holes in a given   * memory range.  PPC64 does not maintain the memory layout in /proc/iomem. - * Instead it maintains it in lmb.memory structures.  Walk through the + * Instead it maintains it in memblock.memory structures.  Walk through the   * memory regions, find holes and callback for contiguous regions.   */  int  walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages,  		void *arg, int (*func)(unsigned long, unsigned long, void *))  { -	struct lmb_property res; +	struct memblock_property res;  	unsigned long pfn, len;  	u64 end;  	int ret = -1; @@ -158,7 +158,7 @@ walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages,  	res.size = (u64) nr_pages << PAGE_SHIFT;  	end = res.base + res.size - 1; -	while ((res.base < end) && (lmb_find(&res) >= 0)) { +	while ((res.base < end) && (memblock_find(&res) >= 0)) {  		pfn = (unsigned long)(res.base >> PAGE_SHIFT);  		len = (unsigned long)(res.size >> PAGE_SHIFT);  		ret = (*func)(pfn, len, arg); @@ -184,8 +184,8 @@ void __init do_init_bootmem(void)  	unsigned long total_pages;  	int boot_mapsize; -	max_low_pfn = max_pfn = lmb_end_of_DRAM() >> PAGE_SHIFT; -	total_pages = (lmb_end_of_DRAM() - memstart_addr) >> PAGE_SHIFT; +	max_low_pfn = max_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT; +	total_pages = (memblock_end_of_DRAM() - memstart_addr) >> PAGE_SHIFT;  #ifdef CONFIG_HIGHMEM  	total_pages = total_lowmem >> PAGE_SHIFT;  	max_low_pfn = lowmem_end_addr >> PAGE_SHIFT; @@ -198,16 +198,16 @@ void __init do_init_bootmem(void)  	 */  	bootmap_pages = bootmem_bootmap_pages(total_pages); -	start = lmb_alloc(bootmap_pages << PAGE_SHIFT, PAGE_SIZE); +	start = memblock_alloc(bootmap_pages << PAGE_SHIFT, PAGE_SIZE);  	min_low_pfn = MEMORY_START >> PAGE_SHIFT;  	boot_mapsize = init_bootmem_node(NODE_DATA(0), start >> PAGE_SHIFT, min_low_pfn, max_low_pfn);  	/* Add active regions with valid PFNs */ -	for (i = 0; i < lmb.memory.cnt; i++) { +	for (i = 0; i < memblock.memory.cnt; i++) {  		unsigned long start_pfn, end_pfn; -		start_pfn = lmb.memory.region[i].base >> PAGE_SHIFT; -		end_pfn = start_pfn + lmb_size_pages(&lmb.memory, i); +		start_pfn = memblock.memory.region[i].base >> PAGE_SHIFT; +		end_pfn = start_pfn + memblock_size_pages(&memblock.memory, i);  		add_active_range(0, start_pfn, end_pfn);  	} @@ -218,17 +218,17 @@ void __init do_init_bootmem(void)  	free_bootmem_with_active_regions(0, lowmem_end_addr >> PAGE_SHIFT);  	/* reserve the sections we're already using */ -	for (i = 0; i < lmb.reserved.cnt; i++) { -		unsigned long addr = lmb.reserved.region[i].base + -				     lmb_size_bytes(&lmb.reserved, i) - 1; +	for (i = 0; i < memblock.reserved.cnt; i++) { +		unsigned long addr = memblock.reserved.region[i].base + +				     memblock_size_bytes(&memblock.reserved, i) - 1;  		if (addr < lowmem_end_addr) -			reserve_bootmem(lmb.reserved.region[i].base, -					lmb_size_bytes(&lmb.reserved, i), +			reserve_bootmem(memblock.reserved.region[i].base, +					memblock_size_bytes(&memblock.reserved, i),  					BOOTMEM_DEFAULT); -		else if (lmb.reserved.region[i].base < lowmem_end_addr) { +		else if (memblock.reserved.region[i].base < lowmem_end_addr) {  			unsigned long adjusted_size = lowmem_end_addr - -				      lmb.reserved.region[i].base; -			reserve_bootmem(lmb.reserved.region[i].base, +				      memblock.reserved.region[i].base; +			reserve_bootmem(memblock.reserved.region[i].base,  					adjusted_size, BOOTMEM_DEFAULT);  		}  	} @@ -236,9 +236,9 @@ void __init do_init_bootmem(void)  	free_bootmem_with_active_regions(0, max_pfn);  	/* reserve the sections we're already using */ -	for (i = 0; i < lmb.reserved.cnt; i++) -		reserve_bootmem(lmb.reserved.region[i].base, -				lmb_size_bytes(&lmb.reserved, i), +	for (i = 0; i < memblock.reserved.cnt; i++) +		reserve_bootmem(memblock.reserved.region[i].base, +				memblock_size_bytes(&memblock.reserved, i),  				BOOTMEM_DEFAULT);  #endif @@ -251,20 +251,20 @@ void __init do_init_bootmem(void)  /* mark pages that don't exist as nosave */  static int __init mark_nonram_nosave(void)  { -	unsigned long lmb_next_region_start_pfn, -		      lmb_region_max_pfn; +	unsigned long memblock_next_region_start_pfn, +		      memblock_region_max_pfn;  	int i; -	for (i = 0; i < lmb.memory.cnt - 1; i++) { -		lmb_region_max_pfn = -			(lmb.memory.region[i].base >> PAGE_SHIFT) + -			(lmb.memory.region[i].size >> PAGE_SHIFT); -		lmb_next_region_start_pfn = -			lmb.memory.region[i+1].base >> PAGE_SHIFT; +	for (i = 0; i < memblock.memory.cnt - 1; i++) { +		memblock_region_max_pfn = +			(memblock.memory.region[i].base >> PAGE_SHIFT) + +			(memblock.memory.region[i].size >> PAGE_SHIFT); +		memblock_next_region_start_pfn = +			memblock.memory.region[i+1].base >> PAGE_SHIFT; -		if (lmb_region_max_pfn < lmb_next_region_start_pfn) -			register_nosave_region(lmb_region_max_pfn, -					       lmb_next_region_start_pfn); +		if (memblock_region_max_pfn < memblock_next_region_start_pfn) +			register_nosave_region(memblock_region_max_pfn, +					       memblock_next_region_start_pfn);  	}  	return 0; @@ -275,8 +275,8 @@ static int __init mark_nonram_nosave(void)   */  void __init paging_init(void)  { -	unsigned long total_ram = lmb_phys_mem_size(); -	phys_addr_t top_of_ram = lmb_end_of_DRAM(); +	unsigned long total_ram = memblock_phys_mem_size(); +	phys_addr_t top_of_ram = memblock_end_of_DRAM();  	unsigned long max_zone_pfns[MAX_NR_ZONES];  #ifdef CONFIG_PPC32 @@ -327,7 +327,7 @@ void __init mem_init(void)  		swiotlb_init(1);  #endif -	num_physpages = lmb.memory.size >> PAGE_SHIFT; +	num_physpages = memblock.memory.size >> PAGE_SHIFT;  	high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);  #ifdef CONFIG_NEED_MULTIPLE_NODES @@ -364,7 +364,7 @@ void __init mem_init(void)  		highmem_mapnr = lowmem_end_addr >> PAGE_SHIFT;  		for (pfn = highmem_mapnr; pfn < max_mapnr; ++pfn) {  			struct page *page = pfn_to_page(pfn); -			if (lmb_is_reserved(pfn << PAGE_SHIFT)) +			if (memblock_is_reserved(pfn << PAGE_SHIFT))  				continue;  			ClearPageReserved(page);  			init_page_count(page);  |