From 0ae7653128c80a4f2920cbe9b124792c2fd9d9e0 Mon Sep 17 00:00:00 2001 From: David Feng Date: Sat, 14 Dec 2013 11:47:35 +0800 Subject: arm64: core support Relocation code based on a patch by Scott Wood, which is: Signed-off-by: Scott Wood Signed-off-by: David Feng --- arch/arm/include/asm/proc-armv/system.h | 59 ++++++++++++++++++++++++++++++++- 1 file changed, 58 insertions(+), 1 deletion(-) (limited to 'arch/arm/include/asm/proc-armv/system.h') diff --git a/arch/arm/include/asm/proc-armv/system.h b/arch/arm/include/asm/proc-armv/system.h index cda8976b6..693d1f492 100644 --- a/arch/arm/include/asm/proc-armv/system.h +++ b/arch/arm/include/asm/proc-armv/system.h @@ -13,6 +13,60 @@ /* * Save the current interrupt enable state & disable IRQs */ +#ifdef CONFIG_ARM64 + +/* + * Save the current interrupt enable state + * and disable IRQs/FIQs + */ +#define local_irq_save(flags) \ + ({ \ + asm volatile( \ + "mrs %0, daif" \ + "msr daifset, #3" \ + : "=r" (flags) \ + : \ + : "memory"); \ + }) + +/* + * restore saved IRQ & FIQ state + */ +#define local_irq_restore(flags) \ + ({ \ + asm volatile( \ + "msr daif, %0" \ + : \ + : "r" (flags) \ + : "memory"); \ + }) + +/* + * Enable IRQs/FIQs + */ +#define local_irq_enable() \ + ({ \ + asm volatile( \ + "msr daifclr, #3" \ + : \ + : \ + : "memory"); \ + }) + +/* + * Disable IRQs/FIQs + */ +#define local_irq_disable() \ + ({ \ + asm volatile( \ + "msr daifset, #3" \ + : \ + : \ + : "memory"); \ + }) + +#else /* CONFIG_ARM64 */ + #define local_irq_save(x) \ ({ \ unsigned long temp; \ @@ -107,7 +161,10 @@ : "r" (x) \ : "memory") -#if defined(CONFIG_CPU_SA1100) || defined(CONFIG_CPU_SA110) +#endif /* CONFIG_ARM64 */ + +#if defined(CONFIG_CPU_SA1100) || defined(CONFIG_CPU_SA110) || \ + defined(CONFIG_ARM64) /* * On the StrongARM, "swp" is terminally broken since it bypasses the * cache totally. This means that the cache becomes inconsistent, and, -- cgit v1.2.3-70-g09d2