diff options
Diffstat (limited to 'arch/powerpc/kernel/setup_64.c')
| -rw-r--r-- | arch/powerpc/kernel/setup_64.c | 24 | 
1 files changed, 10 insertions, 14 deletions
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index f3fb5a79de5..d135f93cb0f 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c @@ -34,7 +34,7 @@  #include <linux/bootmem.h>  #include <linux/pci.h>  #include <linux/lockdep.h> -#include <linux/lmb.h> +#include <linux/memblock.h>  #include <asm/io.h>  #include <asm/kdump.h>  #include <asm/prom.h> @@ -158,7 +158,7 @@ static void __init setup_paca(struct paca_struct *new_paca)   * the CPU that ignores the top 2 bits of the address in real   * mode so we can access kernel globals normally provided we   * only toy with things in the RMO region. From here, we do - * some early parsing of the device-tree to setup out LMB + * some early parsing of the device-tree to setup out MEMBLOCK   * data structures, and allocate & initialize the hash table   * and segment tables so we can start running with translation   * enabled. @@ -404,7 +404,7 @@ void __init setup_system(void)  	printk("-----------------------------------------------------\n");  	printk("ppc64_pft_size                = 0x%llx\n", ppc64_pft_size); -	printk("physicalMemorySize            = 0x%llx\n", lmb_phys_mem_size()); +	printk("physicalMemorySize            = 0x%llx\n", memblock_phys_mem_size());  	if (ppc64_caches.dline_size != 0x80)  		printk("ppc64_caches.dcache_line_size = 0x%x\n",  		       ppc64_caches.dline_size); @@ -432,7 +432,6 @@ static u64 slb0_limit(void)  	return 1UL << SID_SHIFT;  } -#ifdef CONFIG_IRQSTACKS  static void __init irqstack_early_init(void)  {  	u64 limit = slb0_limit(); @@ -444,16 +443,13 @@ static void __init irqstack_early_init(void)  	 */  	for_each_possible_cpu(i) {  		softirq_ctx[i] = (struct thread_info *) -			__va(lmb_alloc_base(THREAD_SIZE, +			__va(memblock_alloc_base(THREAD_SIZE,  					    THREAD_SIZE, limit));  		hardirq_ctx[i] = (struct thread_info *) -			__va(lmb_alloc_base(THREAD_SIZE, +			__va(memblock_alloc_base(THREAD_SIZE,  					    THREAD_SIZE, limit));  	}  } -#else -#define irqstack_early_init() -#endif  #ifdef CONFIG_PPC_BOOK3E  static void __init exc_lvl_early_init(void) @@ -462,11 +458,11 @@ static void __init exc_lvl_early_init(void)  	for_each_possible_cpu(i) {  		critirq_ctx[i] = (struct thread_info *) -			__va(lmb_alloc(THREAD_SIZE, THREAD_SIZE)); +			__va(memblock_alloc(THREAD_SIZE, THREAD_SIZE));  		dbgirq_ctx[i] = (struct thread_info *) -			__va(lmb_alloc(THREAD_SIZE, THREAD_SIZE)); +			__va(memblock_alloc(THREAD_SIZE, THREAD_SIZE));  		mcheckirq_ctx[i] = (struct thread_info *) -			__va(lmb_alloc(THREAD_SIZE, THREAD_SIZE)); +			__va(memblock_alloc(THREAD_SIZE, THREAD_SIZE));  	}  }  #else @@ -491,11 +487,11 @@ static void __init emergency_stack_init(void)  	 * bringup, we need to get at them in real mode. This means they  	 * must also be within the RMO region.  	 */ -	limit = min(slb0_limit(), lmb.rmo_size); +	limit = min(slb0_limit(), memblock.rmo_size);  	for_each_possible_cpu(i) {  		unsigned long sp; -		sp  = lmb_alloc_base(THREAD_SIZE, THREAD_SIZE, limit); +		sp  = memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit);  		sp += THREAD_SIZE;  		paca[i].emergency_sp = __va(sp);  	}  |