diff options
| -rw-r--r-- | arch/arm/include/asm/arch_timer.h | 94 | ||||
| -rw-r--r-- | arch/arm/kernel/arch_timer.c | 92 | 
2 files changed, 94 insertions, 92 deletions
diff --git a/arch/arm/include/asm/arch_timer.h b/arch/arm/include/asm/arch_timer.h index d40229d9a1c..db0fdc4cc9c 100644 --- a/arch/arm/include/asm/arch_timer.h +++ b/arch/arm/include/asm/arch_timer.h @@ -1,13 +1,107 @@  #ifndef __ASMARM_ARCH_TIMER_H  #define __ASMARM_ARCH_TIMER_H +#include <asm/barrier.h>  #include <asm/errno.h>  #include <linux/clocksource.h> +#include <linux/types.h>  #ifdef CONFIG_ARM_ARCH_TIMER  int arch_timer_of_register(void);  int arch_timer_sched_clock_init(void);  struct timecounter *arch_timer_get_timecounter(void); + +#define ARCH_TIMER_CTRL_ENABLE		(1 << 0) +#define ARCH_TIMER_CTRL_IT_MASK		(1 << 1) +#define ARCH_TIMER_CTRL_IT_STAT		(1 << 2) + +#define ARCH_TIMER_REG_CTRL		0 +#define ARCH_TIMER_REG_TVAL		1 + +#define ARCH_TIMER_PHYS_ACCESS		0 +#define ARCH_TIMER_VIRT_ACCESS		1 + +/* + * These register accessors are marked inline so the compiler can + * nicely work out which register we want, and chuck away the rest of + * the code. At least it does so with a recent GCC (4.6.3). + */ +static inline void arch_timer_reg_write(const int access, const int reg, u32 val) +{ +	if (access == ARCH_TIMER_PHYS_ACCESS) { +		switch (reg) { +		case ARCH_TIMER_REG_CTRL: +			asm volatile("mcr p15, 0, %0, c14, c2, 1" : : "r" (val)); +			break; +		case ARCH_TIMER_REG_TVAL: +			asm volatile("mcr p15, 0, %0, c14, c2, 0" : : "r" (val)); +			break; +		} +	} + +	if (access == ARCH_TIMER_VIRT_ACCESS) { +		switch (reg) { +		case ARCH_TIMER_REG_CTRL: +			asm volatile("mcr p15, 0, %0, c14, c3, 1" : : "r" (val)); +			break; +		case ARCH_TIMER_REG_TVAL: +			asm volatile("mcr p15, 0, %0, c14, c3, 0" : : "r" (val)); +			break; +		} +	} +} + +static inline u32 arch_timer_reg_read(const int access, const int reg) +{ +	u32 val = 0; + +	if (access == ARCH_TIMER_PHYS_ACCESS) { +		switch (reg) { +		case ARCH_TIMER_REG_CTRL: +			asm volatile("mrc p15, 0, %0, c14, c2, 1" : "=r" (val)); +			break; +		case ARCH_TIMER_REG_TVAL: +			asm volatile("mrc p15, 0, %0, c14, c2, 0" : "=r" (val)); +			break; +		} +	} + +	if (access == ARCH_TIMER_VIRT_ACCESS) { +		switch (reg) { +		case ARCH_TIMER_REG_CTRL: +			asm volatile("mrc p15, 0, %0, c14, c3, 1" : "=r" (val)); +			break; +		case ARCH_TIMER_REG_TVAL: +			asm volatile("mrc p15, 0, %0, c14, c3, 0" : "=r" (val)); +			break; +		} +	} + +	return val; +} + +static inline u32 arch_timer_get_cntfrq(void) +{ +	u32 val; +	asm volatile("mrc p15, 0, %0, c14, c0, 0" : "=r" (val)); +	return val; +} + +static inline u64 arch_counter_get_cntpct(void) +{ +	u64 cval; + +	asm volatile("mrrc p15, 0, %Q0, %R0, c14" : "=r" (cval)); +	return cval; +} + +static inline u64 arch_counter_get_cntvct(void) +{ +	u64 cval; + +	asm volatile("mrrc p15, 1, %Q0, %R0, c14" : "=r" (cval)); +	return cval; +}  #else  static inline int arch_timer_of_register(void)  { diff --git a/arch/arm/kernel/arch_timer.c b/arch/arm/kernel/arch_timer.c index f31c9ee18af..e973cc0eaad 100644 --- a/arch/arm/kernel/arch_timer.c +++ b/arch/arm/kernel/arch_timer.c @@ -46,98 +46,6 @@ static bool arch_timer_use_virtual = true;   * Architected system timer support.   */ -#define ARCH_TIMER_CTRL_ENABLE		(1 << 0) -#define ARCH_TIMER_CTRL_IT_MASK		(1 << 1) -#define ARCH_TIMER_CTRL_IT_STAT		(1 << 2) - -#define ARCH_TIMER_REG_CTRL		0 -#define ARCH_TIMER_REG_TVAL		1 - -#define ARCH_TIMER_PHYS_ACCESS		0 -#define ARCH_TIMER_VIRT_ACCESS		1 - -/* - * These register accessors are marked inline so the compiler can - * nicely work out which register we want, and chuck away the rest of - * the code. At least it does so with a recent GCC (4.6.3). - */ -static inline void arch_timer_reg_write(const int access, const int reg, u32 val) -{ -	if (access == ARCH_TIMER_PHYS_ACCESS) { -		switch (reg) { -		case ARCH_TIMER_REG_CTRL: -			asm volatile("mcr p15, 0, %0, c14, c2, 1" : : "r" (val)); -			break; -		case ARCH_TIMER_REG_TVAL: -			asm volatile("mcr p15, 0, %0, c14, c2, 0" : : "r" (val)); -			break; -		} -	} - -	if (access == ARCH_TIMER_VIRT_ACCESS) { -		switch (reg) { -		case ARCH_TIMER_REG_CTRL: -			asm volatile("mcr p15, 0, %0, c14, c3, 1" : : "r" (val)); -			break; -		case ARCH_TIMER_REG_TVAL: -			asm volatile("mcr p15, 0, %0, c14, c3, 0" : : "r" (val)); -			break; -		} -	} - -	isb(); -} - -static inline u32 arch_timer_reg_read(const int access, const int reg) -{ -	u32 val = 0; - -	if (access == ARCH_TIMER_PHYS_ACCESS) { -		switch (reg) { -		case ARCH_TIMER_REG_CTRL: -			asm volatile("mrc p15, 0, %0, c14, c2, 1" : "=r" (val)); -			break; -		case ARCH_TIMER_REG_TVAL: -			asm volatile("mrc p15, 0, %0, c14, c2, 0" : "=r" (val)); -			break; -		} -	} - -	if (access == ARCH_TIMER_VIRT_ACCESS) { -		switch (reg) { -		case ARCH_TIMER_REG_CTRL: -			asm volatile("mrc p15, 0, %0, c14, c3, 1" : "=r" (val)); -			break; -		case ARCH_TIMER_REG_TVAL: -			asm volatile("mrc p15, 0, %0, c14, c3, 0" : "=r" (val)); -			break; -		} -	} - -	return val; -} - -static inline u32 arch_timer_get_cntfrq(void) -{ -	u32 val; -	asm volatile("mrc p15, 0, %0, c14, c0, 0" : "=r" (val)); -	return val; -} - -static inline u64 arch_counter_get_cntpct(void) -{ -	u64 cval; -	asm volatile("mrrc p15, 0, %Q0, %R0, c14" : "=r" (cval)); -	return cval; -} - -static inline u64 arch_counter_get_cntvct(void) -{ -	u64 cval; -	asm volatile("mrrc p15, 1, %Q0, %R0, c14" : "=r" (cval)); -	return cval; -} -  static irqreturn_t inline timer_handler(const int access,  					struct clock_event_device *evt)  {  |