diff options
Diffstat (limited to 'arch/powerpc/kernel')
| -rw-r--r-- | arch/powerpc/kernel/Makefile | 2 | ||||
| -rw-r--r-- | arch/powerpc/kernel/entry_64.S | 62 | ||||
| -rw-r--r-- | arch/powerpc/kernel/exceptions-64s.S | 2 | ||||
| -rw-r--r-- | arch/powerpc/kernel/idle.c | 23 | ||||
| -rw-r--r-- | arch/powerpc/kernel/init_task.c | 29 | ||||
| -rw-r--r-- | arch/powerpc/kernel/irq.c | 21 | ||||
| -rw-r--r-- | arch/powerpc/kernel/process.c | 31 | ||||
| -rw-r--r-- | arch/powerpc/kernel/ptrace.c | 2 | ||||
| -rw-r--r-- | arch/powerpc/kernel/smp.c | 76 | ||||
| -rw-r--r-- | arch/powerpc/kernel/traps.c | 10 | 
10 files changed, 68 insertions, 190 deletions
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile index f5808a35688..83afacd3ba7 100644 --- a/arch/powerpc/kernel/Makefile +++ b/arch/powerpc/kernel/Makefile @@ -28,7 +28,7 @@ endif  obj-y				:= cputable.o ptrace.o syscalls.o \  				   irq.o align.o signal_32.o pmc.o vdso.o \ -				   init_task.o process.o systbl.o idle.o \ +				   process.o systbl.o idle.o \  				   signal.o sysfs.o cacheinfo.o time.o \  				   prom.o traps.o setup-common.o \  				   udbg.o misc.o io.o dma.o \ diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S index f8a7a1a1a9f..ef2074c3e90 100644 --- a/arch/powerpc/kernel/entry_64.S +++ b/arch/powerpc/kernel/entry_64.S @@ -588,23 +588,19 @@ _GLOBAL(ret_from_except_lite)  fast_exc_return_irq:  restore:  	/* -	 * This is the main kernel exit path, we first check if we -	 * have to change our interrupt state. +	 * This is the main kernel exit path. First we check if we +	 * are about to re-enable interrupts  	 */  	ld	r5,SOFTE(r1)  	lbz	r6,PACASOFTIRQEN(r13) -	cmpwi	cr1,r5,0 -	cmpw	cr0,r5,r6 -	beq	cr0,4f +	cmpwi	cr0,r5,0 +	beq	restore_irq_off -	/* We do, handle disable first, which is easy */ -	bne	cr1,3f; - 	li	r0,0 -	stb	r0,PACASOFTIRQEN(r13); -	TRACE_DISABLE_INTS -	b	4f +	/* We are enabling, were we already enabled ? Yes, just return */ +	cmpwi	cr0,r6,1 +	beq	cr0,do_restore -3:	/* +	/*  	 * We are about to soft-enable interrupts (we are hard disabled  	 * at this point). We check if there's anything that needs to  	 * be replayed first. @@ -626,7 +622,7 @@ restore_no_replay:  	/*  	 * Final return path. BookE is handled in a different file  	 */ -4: +do_restore:  #ifdef CONFIG_PPC_BOOK3E  	b	.exception_return_book3e  #else @@ -700,6 +696,25 @@ fast_exception_return:  #endif /* CONFIG_PPC_BOOK3E */  	/* +	 * We are returning to a context with interrupts soft disabled. +	 * +	 * However, we may also about to hard enable, so we need to +	 * make sure that in this case, we also clear PACA_IRQ_HARD_DIS +	 * or that bit can get out of sync and bad things will happen +	 */ +restore_irq_off: +	ld	r3,_MSR(r1) +	lbz	r7,PACAIRQHAPPENED(r13) +	andi.	r0,r3,MSR_EE +	beq	1f +	rlwinm	r7,r7,0,~PACA_IRQ_HARD_DIS +	stb	r7,PACAIRQHAPPENED(r13) +1:	li	r0,0 +	stb	r0,PACASOFTIRQEN(r13); +	TRACE_DISABLE_INTS +	b	do_restore + +	/*  	 * Something did happen, check if a re-emit is needed  	 * (this also clears paca->irq_happened)  	 */ @@ -748,6 +763,9 @@ restore_check_irq_replay:  #endif /* CONFIG_PPC_BOOK3E */  1:	b	.ret_from_except /* What else to do here ? */ + + +3:  do_work:  #ifdef CONFIG_PREEMPT  	andi.	r0,r3,MSR_PR	/* Returning to user mode? */ @@ -767,16 +785,6 @@ do_work:  	SOFT_DISABLE_INTS(r3,r4)  1:	bl	.preempt_schedule_irq -	/* Hard-disable interrupts again (and update PACA) */ -#ifdef CONFIG_PPC_BOOK3E -	wrteei	0 -#else -	ld	r10,PACAKMSR(r13) /* Get kernel MSR without EE */ -	mtmsrd	r10,1 -#endif /* CONFIG_PPC_BOOK3E */ -	li	r0,PACA_IRQ_HARD_DIS -	stb	r0,PACAIRQHAPPENED(r13) -  	/* Re-test flags and eventually loop */  	clrrdi	r9,r1,THREAD_SHIFT  	ld	r4,TI_FLAGS(r9) @@ -787,14 +795,6 @@ do_work:  user_work:  #endif /* CONFIG_PREEMPT */ -	/* Enable interrupts */ -#ifdef CONFIG_PPC_BOOK3E -	wrteei	1 -#else -	ori	r10,r10,MSR_EE -	mtmsrd	r10,1 -#endif /* CONFIG_PPC_BOOK3E */ -  	andi.	r0,r4,_TIF_NEED_RESCHED  	beq	1f  	bl	.restore_interrupts diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index cb705fdbb45..8f880bc77c5 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S @@ -768,8 +768,8 @@ alignment_common:  	std	r3,_DAR(r1)  	std	r4,_DSISR(r1)  	bl	.save_nvgprs +	DISABLE_INTS  	addi	r3,r1,STACK_FRAME_OVERHEAD -	ENABLE_INTS  	bl	.alignment_exception  	b	.ret_from_except diff --git a/arch/powerpc/kernel/idle.c b/arch/powerpc/kernel/idle.c index 6d2209ac0c4..2099d9a879e 100644 --- a/arch/powerpc/kernel/idle.c +++ b/arch/powerpc/kernel/idle.c @@ -113,29 +113,6 @@ void cpu_idle(void)  	}  } - -/* - * cpu_idle_wait - Used to ensure that all the CPUs come out of the old - * idle loop and start using the new idle loop. - * Required while changing idle handler on SMP systems. - * Caller must have changed idle handler to the new value before the call. - * This window may be larger on shared systems. - */ -void cpu_idle_wait(void) -{ -	int cpu; -	smp_mb(); - -	/* kick all the CPUs so that they exit out of old idle routine */ -	get_online_cpus(); -	for_each_online_cpu(cpu) { -		if (cpu != smp_processor_id()) -			smp_send_reschedule(cpu); -	} -	put_online_cpus(); -} -EXPORT_SYMBOL_GPL(cpu_idle_wait); -  int powersave_nap;  #ifdef CONFIG_SYSCTL diff --git a/arch/powerpc/kernel/init_task.c b/arch/powerpc/kernel/init_task.c deleted file mode 100644 index d076d465dbd..00000000000 --- a/arch/powerpc/kernel/init_task.c +++ /dev/null @@ -1,29 +0,0 @@ -#include <linux/mm.h> -#include <linux/export.h> -#include <linux/sched.h> -#include <linux/init.h> -#include <linux/init_task.h> -#include <linux/fs.h> -#include <linux/mqueue.h> -#include <asm/uaccess.h> - -static struct signal_struct init_signals = INIT_SIGNALS(init_signals); -static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); -/* - * Initial thread structure. - * - * We need to make sure that this is 16384-byte aligned due to the - * way process stacks are handled. This is done by having a special - * "init_task" linker map entry.. - */ -union thread_union init_thread_union __init_task_data = -	{ INIT_THREAD_INFO(init_task) }; - -/* - * Initial task structure. - * - * All other task structs will be allocated on slabs in fork.c - */ -struct task_struct init_task = INIT_TASK(init_task); - -EXPORT_SYMBOL(init_task); diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c index 43eb74fcedd..641da9e868c 100644 --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c @@ -229,6 +229,19 @@ notrace void arch_local_irq_restore(unsigned long en)  	 */  	if (unlikely(irq_happened != PACA_IRQ_HARD_DIS))  		__hard_irq_disable(); +#ifdef CONFIG_TRACE_IRQFLAG +	else { +		/* +		 * We should already be hard disabled here. We had bugs +		 * where that wasn't the case so let's dbl check it and +		 * warn if we are wrong. Only do that when IRQ tracing +		 * is enabled as mfmsr() can be costly. +		 */ +		if (WARN_ON(mfmsr() & MSR_EE)) +			__hard_irq_disable(); +	} +#endif /* CONFIG_TRACE_IRQFLAG */ +  	set_soft_enabled(0);  	/* @@ -260,11 +273,17 @@ EXPORT_SYMBOL(arch_local_irq_restore);   * if they are currently disabled. This is typically called before   * schedule() or do_signal() when returning to userspace. We do it   * in C to avoid the burden of dealing with lockdep etc... + * + * NOTE: This is called with interrupts hard disabled but not marked + * as such in paca->irq_happened, so we need to resync this.   */  void restore_interrupts(void)  { -	if (irqs_disabled()) +	if (irqs_disabled()) { +		local_paca->irq_happened |= PACA_IRQ_HARD_DIS;  		local_irq_enable(); +	} else +		__hard_irq_enable();  }  #endif /* CONFIG_PPC64 */ diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index 4937c969009..aa05935b694 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c @@ -1252,37 +1252,6 @@ void __ppc64_runlatch_off(void)  }  #endif /* CONFIG_PPC64 */ -#if THREAD_SHIFT < PAGE_SHIFT - -static struct kmem_cache *thread_info_cache; - -struct thread_info *alloc_thread_info_node(struct task_struct *tsk, int node) -{ -	struct thread_info *ti; - -	ti = kmem_cache_alloc_node(thread_info_cache, GFP_KERNEL, node); -	if (unlikely(ti == NULL)) -		return NULL; -#ifdef CONFIG_DEBUG_STACK_USAGE -	memset(ti, 0, THREAD_SIZE); -#endif -	return ti; -} - -void free_thread_info(struct thread_info *ti) -{ -	kmem_cache_free(thread_info_cache, ti); -} - -void thread_info_cache_init(void) -{ -	thread_info_cache = kmem_cache_create("thread_info", THREAD_SIZE, -					      THREAD_SIZE, 0, NULL); -	BUG_ON(thread_info_cache == NULL); -} - -#endif /* THREAD_SHIFT < PAGE_SHIFT */ -  unsigned long arch_align_stack(unsigned long sp)  {  	if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space) diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c index 8d8e028893b..dd5e214cdf2 100644 --- a/arch/powerpc/kernel/ptrace.c +++ b/arch/powerpc/kernel/ptrace.c @@ -1710,7 +1710,7 @@ long do_syscall_trace_enter(struct pt_regs *regs)  {  	long ret = 0; -	secure_computing(regs->gpr[0]); +	secure_computing_strict(regs->gpr[0]);  	if (test_thread_flag(TIF_SYSCALL_TRACE) &&  	    tracehook_report_syscall_entry(regs)) diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c index d9f94410fd7..e4cb34322de 100644 --- a/arch/powerpc/kernel/smp.c +++ b/arch/powerpc/kernel/smp.c @@ -57,27 +57,9 @@  #define DBG(fmt...)  #endif - -/* Store all idle threads, this can be reused instead of creating -* a new thread. Also avoids complicated thread destroy functionality -* for idle threads. -*/  #ifdef CONFIG_HOTPLUG_CPU -/* - * Needed only for CONFIG_HOTPLUG_CPU because __cpuinitdata is - * removed after init for !CONFIG_HOTPLUG_CPU. - */ -static DEFINE_PER_CPU(struct task_struct *, idle_thread_array); -#define get_idle_for_cpu(x)      (per_cpu(idle_thread_array, x)) -#define set_idle_for_cpu(x, p)   (per_cpu(idle_thread_array, x) = (p)) -  /* State of each CPU during hotplug phases */  static DEFINE_PER_CPU(int, cpu_state) = { 0 }; - -#else -static struct task_struct *idle_thread_array[NR_CPUS] __cpuinitdata ; -#define get_idle_for_cpu(x)      (idle_thread_array[(x)]) -#define set_idle_for_cpu(x, p)   (idle_thread_array[(x)] = (p))  #endif  struct thread_info *secondary_ti; @@ -429,60 +411,19 @@ int generic_check_cpu_restart(unsigned int cpu)  }  #endif -struct create_idle { -	struct work_struct work; -	struct task_struct *idle; -	struct completion done; -	int cpu; -}; - -static void __cpuinit do_fork_idle(struct work_struct *work) +static void cpu_idle_thread_init(unsigned int cpu, struct task_struct *idle)  { -	struct create_idle *c_idle = -		container_of(work, struct create_idle, work); - -	c_idle->idle = fork_idle(c_idle->cpu); -	complete(&c_idle->done); -} - -static int __cpuinit create_idle(unsigned int cpu) -{ -	struct thread_info *ti; -	struct create_idle c_idle = { -		.cpu	= cpu, -		.done	= COMPLETION_INITIALIZER_ONSTACK(c_idle.done), -	}; -	INIT_WORK_ONSTACK(&c_idle.work, do_fork_idle); - -	c_idle.idle = get_idle_for_cpu(cpu); - -	/* We can't use kernel_thread since we must avoid to -	 * reschedule the child. We use a workqueue because -	 * we want to fork from a kernel thread, not whatever -	 * userspace process happens to be trying to online us. -	 */ -	if (!c_idle.idle) { -		schedule_work(&c_idle.work); -		wait_for_completion(&c_idle.done); -	} else -		init_idle(c_idle.idle, cpu); -	if (IS_ERR(c_idle.idle)) {		 -		pr_err("Failed fork for CPU %u: %li", cpu, PTR_ERR(c_idle.idle)); -		return PTR_ERR(c_idle.idle); -	} -	ti = task_thread_info(c_idle.idle); +	struct thread_info *ti = task_thread_info(idle);  #ifdef CONFIG_PPC64 -	paca[cpu].__current = c_idle.idle; +	paca[cpu].__current = idle;  	paca[cpu].kstack = (unsigned long)ti + THREAD_SIZE - STACK_FRAME_OVERHEAD;  #endif  	ti->cpu = cpu; -	current_set[cpu] = ti; - -	return 0; +	secondary_ti = current_set[cpu] = ti;  } -int __cpuinit __cpu_up(unsigned int cpu) +int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *tidle)  {  	int rc, c; @@ -490,12 +431,7 @@ int __cpuinit __cpu_up(unsigned int cpu)  	    (smp_ops->cpu_bootable && !smp_ops->cpu_bootable(cpu)))  		return -EINVAL; -	/* Make sure we have an idle thread */ -	rc = create_idle(cpu); -	if (rc) -		return rc; - -	secondary_ti = current_set[cpu]; +	cpu_idle_thread_init(cpu, tidle);  	/* Make sure callin-map entry is 0 (can be leftover a CPU  	 * hotplug diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c index 6aa0c663e24..158972341a2 100644 --- a/arch/powerpc/kernel/traps.c +++ b/arch/powerpc/kernel/traps.c @@ -248,7 +248,7 @@ void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr)  				   addr, regs->nip, regs->link, code);  	} -	if (!arch_irq_disabled_regs(regs)) +	if (arch_irqs_disabled() && !arch_irq_disabled_regs(regs))  		local_irq_enable();  	memset(&info, 0, sizeof(info)); @@ -1019,7 +1019,9 @@ void __kprobes program_check_exception(struct pt_regs *regs)  		return;  	} -	local_irq_enable(); +	/* We restore the interrupt state now */ +	if (!arch_irq_disabled_regs(regs)) +		local_irq_enable();  #ifdef CONFIG_MATH_EMULATION  	/* (reason & REASON_ILLEGAL) would be the obvious thing here, @@ -1069,6 +1071,10 @@ void alignment_exception(struct pt_regs *regs)  {  	int sig, code, fixed = 0; +	/* We restore the interrupt state now */ +	if (!arch_irq_disabled_regs(regs)) +		local_irq_enable(); +  	/* we don't implement logging of alignment exceptions */  	if (!(current->thread.align_ctl & PR_UNALIGN_SIGBUS))  		fixed = fix_alignment(regs);  |