diff options
Diffstat (limited to 'arch/arm/kernel')
| -rw-r--r-- | arch/arm/kernel/Makefile | 2 | ||||
| -rw-r--r-- | arch/arm/kernel/calls.S | 2 | ||||
| -rw-r--r-- | arch/arm/kernel/elf.c | 9 | ||||
| -rw-r--r-- | arch/arm/kernel/entry-armv.S | 8 | ||||
| -rw-r--r-- | arch/arm/kernel/entry-common.S | 3 | ||||
| -rw-r--r-- | arch/arm/kernel/init_task.c | 4 | ||||
| -rw-r--r-- | arch/arm/kernel/irq.c | 6 | ||||
| -rw-r--r-- | arch/arm/kernel/process.c | 79 | ||||
| -rw-r--r-- | arch/arm/kernel/signal.c | 14 | ||||
| -rw-r--r-- | arch/arm/kernel/smp.c | 131 | ||||
| -rw-r--r-- | arch/arm/kernel/smp_scu.c | 48 | ||||
| -rw-r--r-- | arch/arm/kernel/smp_twd.c | 175 | ||||
| -rw-r--r-- | arch/arm/kernel/unwind.c | 19 | ||||
| -rw-r--r-- | arch/arm/kernel/vmlinux.lds.S | 10 | 
14 files changed, 419 insertions, 91 deletions
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile index 11a5197a221..ff89d0b3abc 100644 --- a/arch/arm/kernel/Makefile +++ b/arch/arm/kernel/Makefile @@ -22,6 +22,8 @@ obj-$(CONFIG_ARTHUR)		+= arthur.o  obj-$(CONFIG_ISA_DMA)		+= dma-isa.o  obj-$(CONFIG_PCI)		+= bios32.o isa.o  obj-$(CONFIG_SMP)		+= smp.o +obj-$(CONFIG_HAVE_ARM_SCU)	+= smp_scu.o +obj-$(CONFIG_HAVE_ARM_TWD)	+= smp_twd.o  obj-$(CONFIG_DYNAMIC_FTRACE)	+= ftrace.o  obj-$(CONFIG_KEXEC)		+= machine_kexec.o relocate_kernel.o  obj-$(CONFIG_KPROBES)		+= kprobes.o kprobes-decode.o diff --git a/arch/arm/kernel/calls.S b/arch/arm/kernel/calls.S index 1680e9e9c83..f776e72a4cb 100644 --- a/arch/arm/kernel/calls.S +++ b/arch/arm/kernel/calls.S @@ -372,6 +372,8 @@  /* 360 */	CALL(sys_inotify_init1)  		CALL(sys_preadv)  		CALL(sys_pwritev) +		CALL(sys_rt_tgsigqueueinfo) +		CALL(sys_perf_counter_open)  #ifndef syscalls_counted  .equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls  #define syscalls_counted diff --git a/arch/arm/kernel/elf.c b/arch/arm/kernel/elf.c index d4a0da1e48f..950391f194c 100644 --- a/arch/arm/kernel/elf.c +++ b/arch/arm/kernel/elf.c @@ -78,6 +78,15 @@ int arm_elf_read_implies_exec(const struct elf32_hdr *x, int executable_stack)  		return 1;  	if (cpu_architecture() < CPU_ARCH_ARMv6)  		return 1; +#if !defined(CONFIG_AEABI) || defined(CONFIG_OABI_COMPAT) +	/* +	 * If we have support for OABI programs, we can never allow NX +	 * support - our signal syscall restart mechanism relies upon +	 * being able to execute code placed on the user stack. +	 */ +	return 1; +#else  	return 0; +#endif  }  EXPORT_SYMBOL(arm_elf_read_implies_exec); diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S index d662a2f1fd8..fc8af43c500 100644 --- a/arch/arm/kernel/entry-armv.S +++ b/arch/arm/kernel/entry-armv.S @@ -482,6 +482,9 @@ __und_usr:  	subeq	r4, r2, #4			@ ARM instr at LR - 4  	subne	r4, r2, #2			@ Thumb instr at LR - 2  1:	ldreqt	r0, [r4] +#ifdef CONFIG_CPU_ENDIAN_BE8 +	reveq	r0, r0				@ little endian instruction +#endif  	beq	call_fpe  	@ Thumb instruction  #if __LINUX_ARM_ARCH__ >= 7 @@ -815,10 +818,7 @@ __kuser_helper_start:   */  __kuser_memory_barrier:				@ 0xffff0fa0 - -#if __LINUX_ARM_ARCH__ >= 6 && defined(CONFIG_SMP) -	mcr	p15, 0, r0, c7, c10, 5	@ dmb -#endif +	smp_dmb  	usr_ret	lr  	.align	5 diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S index b55cb033180..366e5097a41 100644 --- a/arch/arm/kernel/entry-common.S +++ b/arch/arm/kernel/entry-common.S @@ -210,6 +210,9 @@ ENTRY(vector_swi)    A710(	teq	ip, #0x0f000000						)    A710(	bne	.Larm710bug						)  #endif +#ifdef CONFIG_CPU_ENDIAN_BE8 +	rev	r10, r10			@ little endian instruction +#endif  #elif defined(CONFIG_AEABI) diff --git a/arch/arm/kernel/init_task.c b/arch/arm/kernel/init_task.c index e859af34946..3f470866bb8 100644 --- a/arch/arm/kernel/init_task.c +++ b/arch/arm/kernel/init_task.c @@ -14,10 +14,6 @@  static struct signal_struct init_signals = INIT_SIGNALS(init_signals);  static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); -struct mm_struct init_mm = INIT_MM(init_mm); - -EXPORT_SYMBOL(init_mm); -  /*   * Initial thread structure.   * diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c index 6874c7dca75..096f600dc8d 100644 --- a/arch/arm/kernel/irq.c +++ b/arch/arm/kernel/irq.c @@ -167,7 +167,7 @@ void __init init_IRQ(void)  #ifdef CONFIG_SMP  	cpumask_setall(bad_irq_desc.affinity); -	bad_irq_desc.cpu = smp_processor_id(); +	bad_irq_desc.node = smp_processor_id();  #endif  	init_arch_irq();  } @@ -176,7 +176,7 @@ void __init init_IRQ(void)  static void route_irq(struct irq_desc *desc, unsigned int irq, unsigned int cpu)  { -	pr_debug("IRQ%u: moving from cpu%u to cpu%u\n", irq, desc->cpu, cpu); +	pr_debug("IRQ%u: moving from cpu%u to cpu%u\n", irq, desc->node, cpu);  	spin_lock_irq(&desc->lock);  	desc->chip->set_affinity(irq, cpumask_of(cpu)); @@ -195,7 +195,7 @@ void migrate_irqs(void)  	for (i = 0; i < NR_IRQS; i++) {  		struct irq_desc *desc = irq_desc + i; -		if (desc->cpu == cpu) { +		if (desc->node == cpu) {  			unsigned int newcpu = cpumask_any_and(desc->affinity,  							      cpu_online_mask);  			if (newcpu >= nr_cpu_ids) { diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c index c3265a2e7cd..39196dff478 100644 --- a/arch/arm/kernel/process.c +++ b/arch/arm/kernel/process.c @@ -114,9 +114,6 @@ void arm_machine_restart(char mode, const char *cmd)  /*   * Function pointers to optional machine specific functions   */ -void (*pm_idle)(void); -EXPORT_SYMBOL(pm_idle); -  void (*pm_power_off)(void);  EXPORT_SYMBOL(pm_power_off); @@ -130,20 +127,19 @@ EXPORT_SYMBOL_GPL(arm_pm_restart);   */  static void default_idle(void)  { -	if (hlt_counter) -		cpu_relax(); -	else { -		local_irq_disable(); -		if (!need_resched()) -			arch_idle(); -		local_irq_enable(); -	} +	if (!need_resched()) +		arch_idle(); +	local_irq_enable();  } +void (*pm_idle)(void) = default_idle; +EXPORT_SYMBOL(pm_idle); +  /* - * The idle thread.  We try to conserve power, while trying to keep - * overall latency low.  The architecture specific idle is passed - * a value to indicate the level of "idleness" of the system. + * The idle thread, has rather strange semantics for calling pm_idle, + * but this is what x86 does and we need to do the same, so that + * things like cpuidle get called in the same way.  The only difference + * is that we always respect 'hlt_counter' to prevent low power idle.   */  void cpu_idle(void)  { @@ -151,21 +147,31 @@ void cpu_idle(void)  	/* endless idle loop with no priority at all */  	while (1) { -		void (*idle)(void) = pm_idle; - +		tick_nohz_stop_sched_tick(1); +		leds_event(led_idle_start); +		while (!need_resched()) {  #ifdef CONFIG_HOTPLUG_CPU -		if (cpu_is_offline(smp_processor_id())) { -			leds_event(led_idle_start); -			cpu_die(); -		} +			if (cpu_is_offline(smp_processor_id())) +				cpu_die();  #endif -		if (!idle) -			idle = default_idle; -		leds_event(led_idle_start); -		tick_nohz_stop_sched_tick(1); -		while (!need_resched()) -			idle(); +			local_irq_disable(); +			if (hlt_counter) { +				local_irq_enable(); +				cpu_relax(); +			} else { +				stop_critical_timings(); +				pm_idle(); +				start_critical_timings(); +				/* +				 * This will eventually be removed - pm_idle +				 * functions should always return with IRQs +				 * enabled. +				 */ +				WARN_ON(irqs_disabled()); +				local_irq_enable(); +			} +		}  		leds_event(led_idle_end);  		tick_nohz_restart_sched_tick();  		preempt_enable_no_resched(); @@ -352,6 +358,23 @@ asm(	".section .text\n"  "	.size	kernel_thread_helper, . - kernel_thread_helper\n"  "	.previous"); +#ifdef CONFIG_ARM_UNWIND +extern void kernel_thread_exit(long code); +asm(	".section .text\n" +"	.align\n" +"	.type	kernel_thread_exit, #function\n" +"kernel_thread_exit:\n" +"	.fnstart\n" +"	.cantunwind\n" +"	bl	do_exit\n" +"	nop\n" +"	.fnend\n" +"	.size	kernel_thread_exit, . - kernel_thread_exit\n" +"	.previous"); +#else +#define kernel_thread_exit	do_exit +#endif +  /*   * Create a kernel thread.   */ @@ -363,9 +386,9 @@ pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)  	regs.ARM_r1 = (unsigned long)arg;  	regs.ARM_r2 = (unsigned long)fn; -	regs.ARM_r3 = (unsigned long)do_exit; +	regs.ARM_r3 = (unsigned long)kernel_thread_exit;  	regs.ARM_pc = (unsigned long)kernel_thread_helper; -	regs.ARM_cpsr = SVC_MODE; +	regs.ARM_cpsr = SVC_MODE | PSR_ENDSTATE;  	return do_fork(flags|CLONE_VM|CLONE_UNTRACED, 0, ®s, 0, NULL, NULL);  } diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c index 80b8b5c7e07..93bb4247b7e 100644 --- a/arch/arm/kernel/signal.c +++ b/arch/arm/kernel/signal.c @@ -426,9 +426,13 @@ setup_return(struct pt_regs *regs, struct k_sigaction *ka,  		 */  		thumb = handler & 1; -		if (thumb) +		if (thumb) {  			cpsr |= PSR_T_BIT; -		else +#if __LINUX_ARM_ARCH__ >= 7 +			/* clear the If-Then Thumb-2 execution state */ +			cpsr &= ~PSR_IT_MASK; +#endif +		} else  			cpsr &= ~PSR_T_BIT;  	}  #endif @@ -532,7 +536,7 @@ setup_rt_frame(int usig, struct k_sigaction *ka, siginfo_t *info,  	return err;  } -static inline void restart_syscall(struct pt_regs *regs) +static inline void setup_syscall_restart(struct pt_regs *regs)  {  	regs->ARM_r0 = regs->ARM_ORIG_r0;  	regs->ARM_pc -= thumb_mode(regs) ? 2 : 4; @@ -567,7 +571,7 @@ handle_signal(unsigned long sig, struct k_sigaction *ka,  			}  			/* fallthrough */  		case -ERESTARTNOINTR: -			restart_syscall(regs); +			setup_syscall_restart(regs);  		}  	} @@ -691,7 +695,7 @@ static int do_signal(sigset_t *oldset, struct pt_regs *regs, int syscall)  		if (regs->ARM_r0 == -ERESTARTNOHAND ||  		    regs->ARM_r0 == -ERESTARTSYS ||  		    regs->ARM_r0 == -ERESTARTNOINTR) { -			restart_syscall(regs); +			setup_syscall_restart(regs);  		}  	}  	single_step_set(current); diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index 6014dfd22af..de885fd256c 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c @@ -22,16 +22,20 @@  #include <linux/smp.h>  #include <linux/seq_file.h>  #include <linux/irq.h> +#include <linux/percpu.h> +#include <linux/clockchips.h>  #include <asm/atomic.h>  #include <asm/cacheflush.h>  #include <asm/cpu.h> +#include <asm/cputype.h>  #include <asm/mmu_context.h>  #include <asm/pgtable.h>  #include <asm/pgalloc.h>  #include <asm/processor.h>  #include <asm/tlbflush.h>  #include <asm/ptrace.h> +#include <asm/localtimer.h>  /*   * as from 2.5, kernels no longer have an init_tasks structure @@ -163,7 +167,7 @@ int __cpuexit __cpu_disable(void)  	 * Take this CPU offline.  Once we clear this, we can't return,  	 * and we must not schedule until we're ready to give up the cpu.  	 */ -	cpu_clear(cpu, cpu_online_map); +	set_cpu_online(cpu, false);  	/*  	 * OK - migrate IRQs away from this CPU @@ -274,9 +278,9 @@ asmlinkage void __cpuinit secondary_start_kernel(void)  	local_fiq_enable();  	/* -	 * Setup local timer for this CPU. +	 * Setup the percpu timer for this CPU.  	 */ -	local_timer_setup(); +	percpu_timer_setup();  	calibrate_delay(); @@ -285,7 +289,7 @@ asmlinkage void __cpuinit secondary_start_kernel(void)  	/*  	 * OK, now it's safe to let the boot CPU continue  	 */ -	cpu_set(cpu, cpu_online_map); +	set_cpu_online(cpu, true);  	/*  	 * OK, it's off to the idle thread for us @@ -383,10 +387,16 @@ void show_local_irqs(struct seq_file *p)  	seq_putc(p, '\n');  } +/* + * Timer (local or broadcast) support + */ +static DEFINE_PER_CPU(struct clock_event_device, percpu_clockevent); +  static void ipi_timer(void)  { +	struct clock_event_device *evt = &__get_cpu_var(percpu_clockevent);  	irq_enter(); -	local_timer_interrupt(); +	evt->event_handler(evt);  	irq_exit();  } @@ -405,6 +415,42 @@ asmlinkage void __exception do_local_timer(struct pt_regs *regs)  }  #endif +#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST +static void smp_timer_broadcast(const struct cpumask *mask) +{ +	send_ipi_message(mask, IPI_TIMER); +} + +static void broadcast_timer_set_mode(enum clock_event_mode mode, +	struct clock_event_device *evt) +{ +} + +static void local_timer_setup(struct clock_event_device *evt) +{ +	evt->name	= "dummy_timer"; +	evt->features	= CLOCK_EVT_FEAT_ONESHOT | +			  CLOCK_EVT_FEAT_PERIODIC | +			  CLOCK_EVT_FEAT_DUMMY; +	evt->rating	= 400; +	evt->mult	= 1; +	evt->set_mode	= broadcast_timer_set_mode; +	evt->broadcast	= smp_timer_broadcast; + +	clockevents_register_device(evt); +} +#endif + +void __cpuinit percpu_timer_setup(void) +{ +	unsigned int cpu = smp_processor_id(); +	struct clock_event_device *evt = &per_cpu(percpu_clockevent, cpu); + +	evt->cpumask = cpumask_of(cpu); + +	local_timer_setup(evt); +} +  static DEFINE_SPINLOCK(stop_lock);  /* @@ -417,7 +463,7 @@ static void ipi_cpu_stop(unsigned int cpu)  	dump_stack();  	spin_unlock(&stop_lock); -	cpu_clear(cpu, cpu_online_map); +	set_cpu_online(cpu, false);  	local_fiq_disable();  	local_irq_disable(); @@ -501,11 +547,6 @@ void smp_send_reschedule(int cpu)  	send_ipi_message(cpumask_of(cpu), IPI_RESCHEDULE);  } -void smp_timer_broadcast(const struct cpumask *mask) -{ -	send_ipi_message(mask, IPI_TIMER); -} -  void smp_send_stop(void)  {  	cpumask_t mask = cpu_online_map; @@ -545,6 +586,12 @@ struct tlb_args {  	unsigned long ta_end;  }; +/* all SMP configurations have the extended CPUID registers */ +static inline int tlb_ops_need_broadcast(void) +{ +	return ((read_cpuid_ext(CPUID_EXT_MMFR3) >> 12) & 0xf) < 2; +} +  static inline void ipi_flush_tlb_all(void *ignored)  {  	local_flush_tlb_all(); @@ -587,51 +634,61 @@ static inline void ipi_flush_tlb_kernel_range(void *arg)  void flush_tlb_all(void)  { -	on_each_cpu(ipi_flush_tlb_all, NULL, 1); +	if (tlb_ops_need_broadcast()) +		on_each_cpu(ipi_flush_tlb_all, NULL, 1); +	else +		local_flush_tlb_all();  }  void flush_tlb_mm(struct mm_struct *mm)  { -	on_each_cpu_mask(ipi_flush_tlb_mm, mm, 1, &mm->cpu_vm_mask); +	if (tlb_ops_need_broadcast()) +		on_each_cpu_mask(ipi_flush_tlb_mm, mm, 1, &mm->cpu_vm_mask); +	else +		local_flush_tlb_mm(mm);  }  void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)  { -	struct tlb_args ta; - -	ta.ta_vma = vma; -	ta.ta_start = uaddr; - -	on_each_cpu_mask(ipi_flush_tlb_page, &ta, 1, &vma->vm_mm->cpu_vm_mask); +	if (tlb_ops_need_broadcast()) { +		struct tlb_args ta; +		ta.ta_vma = vma; +		ta.ta_start = uaddr; +		on_each_cpu_mask(ipi_flush_tlb_page, &ta, 1, &vma->vm_mm->cpu_vm_mask); +	} else +		local_flush_tlb_page(vma, uaddr);  }  void flush_tlb_kernel_page(unsigned long kaddr)  { -	struct tlb_args ta; - -	ta.ta_start = kaddr; - -	on_each_cpu(ipi_flush_tlb_kernel_page, &ta, 1); +	if (tlb_ops_need_broadcast()) { +		struct tlb_args ta; +		ta.ta_start = kaddr; +		on_each_cpu(ipi_flush_tlb_kernel_page, &ta, 1); +	} else +		local_flush_tlb_kernel_page(kaddr);  }  void flush_tlb_range(struct vm_area_struct *vma,                       unsigned long start, unsigned long end)  { -	struct tlb_args ta; - -	ta.ta_vma = vma; -	ta.ta_start = start; -	ta.ta_end = end; - -	on_each_cpu_mask(ipi_flush_tlb_range, &ta, 1, &vma->vm_mm->cpu_vm_mask); +	if (tlb_ops_need_broadcast()) { +		struct tlb_args ta; +		ta.ta_vma = vma; +		ta.ta_start = start; +		ta.ta_end = end; +		on_each_cpu_mask(ipi_flush_tlb_range, &ta, 1, &vma->vm_mm->cpu_vm_mask); +	} else +		local_flush_tlb_range(vma, start, end);  }  void flush_tlb_kernel_range(unsigned long start, unsigned long end)  { -	struct tlb_args ta; - -	ta.ta_start = start; -	ta.ta_end = end; - -	on_each_cpu(ipi_flush_tlb_kernel_range, &ta, 1); +	if (tlb_ops_need_broadcast()) { +		struct tlb_args ta; +		ta.ta_start = start; +		ta.ta_end = end; +		on_each_cpu(ipi_flush_tlb_kernel_range, &ta, 1); +	} else +		local_flush_tlb_kernel_range(start, end);  } diff --git a/arch/arm/kernel/smp_scu.c b/arch/arm/kernel/smp_scu.c new file mode 100644 index 00000000000..d3831f616ee --- /dev/null +++ b/arch/arm/kernel/smp_scu.c @@ -0,0 +1,48 @@ +/* + *  linux/arch/arm/kernel/smp_scu.c + * + *  Copyright (C) 2002 ARM Ltd. + *  All Rights Reserved + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include <linux/init.h> +#include <linux/io.h> + +#include <asm/smp_scu.h> +#include <asm/cacheflush.h> + +#define SCU_CTRL		0x00 +#define SCU_CONFIG		0x04 +#define SCU_CPU_STATUS		0x08 +#define SCU_INVALIDATE		0x0c +#define SCU_FPGA_REVISION	0x10 + +/* + * Get the number of CPU cores from the SCU configuration + */ +unsigned int __init scu_get_core_count(void __iomem *scu_base) +{ +	unsigned int ncores = __raw_readl(scu_base + SCU_CONFIG); +	return (ncores & 0x03) + 1; +} + +/* + * Enable the SCU + */ +void __init scu_enable(void __iomem *scu_base) +{ +	u32 scu_ctrl; + +	scu_ctrl = __raw_readl(scu_base + SCU_CTRL); +	scu_ctrl |= 1; +	__raw_writel(scu_ctrl, scu_base + SCU_CTRL); + +	/* +	 * Ensure that the data accessed by CPU0 before the SCU was +	 * initialised is visible to the other CPUs. +	 */ +	flush_cache_all(); +} diff --git a/arch/arm/kernel/smp_twd.c b/arch/arm/kernel/smp_twd.c new file mode 100644 index 00000000000..d8c88c633c6 --- /dev/null +++ b/arch/arm/kernel/smp_twd.c @@ -0,0 +1,175 @@ +/* + *  linux/arch/arm/kernel/smp_twd.c + * + *  Copyright (C) 2002 ARM Ltd. + *  All Rights Reserved + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/delay.h> +#include <linux/device.h> +#include <linux/smp.h> +#include <linux/jiffies.h> +#include <linux/clockchips.h> +#include <linux/irq.h> +#include <linux/io.h> + +#include <asm/smp_twd.h> +#include <asm/hardware/gic.h> + +#define TWD_TIMER_LOAD 			0x00 +#define TWD_TIMER_COUNTER		0x04 +#define TWD_TIMER_CONTROL		0x08 +#define TWD_TIMER_INTSTAT		0x0C + +#define TWD_WDOG_LOAD			0x20 +#define TWD_WDOG_COUNTER		0x24 +#define TWD_WDOG_CONTROL		0x28 +#define TWD_WDOG_INTSTAT		0x2C +#define TWD_WDOG_RESETSTAT		0x30 +#define TWD_WDOG_DISABLE		0x34 + +#define TWD_TIMER_CONTROL_ENABLE	(1 << 0) +#define TWD_TIMER_CONTROL_ONESHOT	(0 << 1) +#define TWD_TIMER_CONTROL_PERIODIC	(1 << 1) +#define TWD_TIMER_CONTROL_IT_ENABLE	(1 << 2) + +/* set up by the platform code */ +void __iomem *twd_base; + +static unsigned long twd_timer_rate; + +static void twd_set_mode(enum clock_event_mode mode, +			struct clock_event_device *clk) +{ +	unsigned long ctrl; + +	switch (mode) { +	case CLOCK_EVT_MODE_PERIODIC: +		/* timer load already set up */ +		ctrl = TWD_TIMER_CONTROL_ENABLE | TWD_TIMER_CONTROL_IT_ENABLE +			| TWD_TIMER_CONTROL_PERIODIC; +		break; +	case CLOCK_EVT_MODE_ONESHOT: +		/* period set, and timer enabled in 'next_event' hook */ +		ctrl = TWD_TIMER_CONTROL_IT_ENABLE | TWD_TIMER_CONTROL_ONESHOT; +		break; +	case CLOCK_EVT_MODE_UNUSED: +	case CLOCK_EVT_MODE_SHUTDOWN: +	default: +		ctrl = 0; +	} + +	__raw_writel(ctrl, twd_base + TWD_TIMER_CONTROL); +} + +static int twd_set_next_event(unsigned long evt, +			struct clock_event_device *unused) +{ +	unsigned long ctrl = __raw_readl(twd_base + TWD_TIMER_CONTROL); + +	ctrl |= TWD_TIMER_CONTROL_ENABLE; + +	__raw_writel(evt, twd_base + TWD_TIMER_COUNTER); +	__raw_writel(ctrl, twd_base + TWD_TIMER_CONTROL); + +	return 0; +} + +/* + * local_timer_ack: checks for a local timer interrupt. + * + * If a local timer interrupt has occurred, acknowledge and return 1. + * Otherwise, return 0. + */ +int twd_timer_ack(void) +{ +	if (__raw_readl(twd_base + TWD_TIMER_INTSTAT)) { +		__raw_writel(1, twd_base + TWD_TIMER_INTSTAT); +		return 1; +	} + +	return 0; +} + +static void __cpuinit twd_calibrate_rate(void) +{ +	unsigned long load, count; +	u64 waitjiffies; + +	/* +	 * If this is the first time round, we need to work out how fast +	 * the timer ticks +	 */ +	if (twd_timer_rate == 0) { +		printk(KERN_INFO "Calibrating local timer... "); + +		/* Wait for a tick to start */ +		waitjiffies = get_jiffies_64() + 1; + +		while (get_jiffies_64() < waitjiffies) +			udelay(10); + +		/* OK, now the tick has started, let's get the timer going */ +		waitjiffies += 5; + +				 /* enable, no interrupt or reload */ +		__raw_writel(0x1, twd_base + TWD_TIMER_CONTROL); + +				 /* maximum value */ +		__raw_writel(0xFFFFFFFFU, twd_base + TWD_TIMER_COUNTER); + +		while (get_jiffies_64() < waitjiffies) +			udelay(10); + +		count = __raw_readl(twd_base + TWD_TIMER_COUNTER); + +		twd_timer_rate = (0xFFFFFFFFU - count) * (HZ / 5); + +		printk("%lu.%02luMHz.\n", twd_timer_rate / 1000000, +			(twd_timer_rate / 100000) % 100); +	} + +	load = twd_timer_rate / HZ; + +	__raw_writel(load, twd_base + TWD_TIMER_LOAD); +} + +/* + * Setup the local clock events for a CPU. + */ +void __cpuinit twd_timer_setup(struct clock_event_device *clk) +{ +	unsigned long flags; + +	twd_calibrate_rate(); + +	clk->name = "local_timer"; +	clk->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT; +	clk->rating = 350; +	clk->set_mode = twd_set_mode; +	clk->set_next_event = twd_set_next_event; +	clk->shift = 20; +	clk->mult = div_sc(twd_timer_rate, NSEC_PER_SEC, clk->shift); +	clk->max_delta_ns = clockevent_delta2ns(0xffffffff, clk); +	clk->min_delta_ns = clockevent_delta2ns(0xf, clk); + +	/* Make sure our local interrupt controller has this enabled */ +	local_irq_save(flags); +	get_irq_chip(clk->irq)->unmask(clk->irq); +	local_irq_restore(flags); + +	clockevents_register_device(clk); +} + +/* + * take a local timer down + */ +void __cpuexit twd_timer_stop(void) +{ +	__raw_writel(0, twd_base + TWD_TIMER_CONTROL); +} diff --git a/arch/arm/kernel/unwind.c b/arch/arm/kernel/unwind.c index 1dedc2c7ff4..dd56e11f339 100644 --- a/arch/arm/kernel/unwind.c +++ b/arch/arm/kernel/unwind.c @@ -212,7 +212,8 @@ static int unwind_exec_insn(struct unwind_ctrl_block *ctrl)  			ctrl->vrs[14] = *vsp++;  		ctrl->vrs[SP] = (unsigned long)vsp;  	} else if (insn == 0xb0) { -		ctrl->vrs[PC] = ctrl->vrs[LR]; +		if (ctrl->vrs[PC] == 0) +			ctrl->vrs[PC] = ctrl->vrs[LR];  		/* no further processing */  		ctrl->entries = 0;  	} else if (insn == 0xb1) { @@ -309,18 +310,20 @@ int unwind_frame(struct stackframe *frame)  	}  	while (ctrl.entries > 0) { -		int urc; - -		if (ctrl.vrs[SP] < low || ctrl.vrs[SP] >= high) -			return -URC_FAILURE; -		urc = unwind_exec_insn(&ctrl); +		int urc = unwind_exec_insn(&ctrl);  		if (urc < 0)  			return urc; +		if (ctrl.vrs[SP] < low || ctrl.vrs[SP] >= high) +			return -URC_FAILURE;  	}  	if (ctrl.vrs[PC] == 0)  		ctrl.vrs[PC] = ctrl.vrs[LR]; +	/* check for infinite loop */ +	if (frame->pc == ctrl.vrs[PC]) +		return -URC_FAILURE; +  	frame->fp = ctrl.vrs[FP];  	frame->sp = ctrl.vrs[SP];  	frame->lr = ctrl.vrs[LR]; @@ -332,7 +335,6 @@ int unwind_frame(struct stackframe *frame)  void unwind_backtrace(struct pt_regs *regs, struct task_struct *tsk)  {  	struct stackframe frame; -	unsigned long high, low;  	register unsigned long current_sp asm ("sp");  	pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk); @@ -362,9 +364,6 @@ void unwind_backtrace(struct pt_regs *regs, struct task_struct *tsk)  		frame.pc = thread_saved_pc(tsk);  	} -	low = frame.sp & ~(THREAD_SIZE - 1); -	high = low + THREAD_SIZE; -  	while (1) {  		int urc;  		unsigned long where = frame.pc; diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S index c90f27250ea..4340bf3d2c8 100644 --- a/arch/arm/kernel/vmlinux.lds.S +++ b/arch/arm/kernel/vmlinux.lds.S @@ -84,6 +84,14 @@ SECTIONS  		*(.exitcall.exit)  		*(.ARM.exidx.exit.text)  		*(.ARM.extab.exit.text) +#ifndef CONFIG_HOTPLUG_CPU +		*(.ARM.exidx.cpuexit.text) +		*(.ARM.extab.cpuexit.text) +#endif +#ifndef CONFIG_HOTPLUG +		*(.ARM.exidx.devexit.text) +		*(.ARM.extab.devexit.text) +#endif  #ifndef CONFIG_MMU  		*(.fixup)  		*(__ex_table) @@ -141,6 +149,7 @@ SECTIONS  	.data : AT(__data_loc) {  		_data = .;		/* address in memory */ +		_sdata = .;  		/*  		 * first, the init task union, aligned @@ -192,6 +201,7 @@ SECTIONS  		__bss_start = .;	/* BSS				*/  		*(.bss)  		*(COMMON) +		__bss_stop = .;  		_end = .;  	}  					/* Stabs debugging sections.	*/  |