diff options
Diffstat (limited to 'arch/mips/include/asm/netlogic/mips-extns.h')
| -rw-r--r-- | arch/mips/include/asm/netlogic/mips-extns.h | 99 |
1 files changed, 85 insertions, 14 deletions
diff --git a/arch/mips/include/asm/netlogic/mips-extns.h b/arch/mips/include/asm/netlogic/mips-extns.h index 32ba6d95d47..f299d31d7c1 100644 --- a/arch/mips/include/asm/netlogic/mips-extns.h +++ b/arch/mips/include/asm/netlogic/mips-extns.h @@ -38,21 +38,16 @@ /* * XLR and XLP interrupt request and interrupt mask registers */ -#define read_c0_eirr() __read_64bit_c0_register($9, 6) -#define read_c0_eimr() __read_64bit_c0_register($9, 7) -#define write_c0_eirr(val) __write_64bit_c0_register($9, 6, val) - /* - * Writing EIMR in 32 bit is a special case, the lower 8 bit of the - * EIMR is shadowed in the status register, so we cannot save and - * restore status register for split read. + * NOTE: Do not save/restore flags around write_c0_eimr(). + * On non-R2 platforms the flags has part of EIMR that is shadowed in STATUS + * register. Restoring flags will overwrite the lower 8 bits of EIMR. + * + * Call with interrupts disabled. */ #define write_c0_eimr(val) \ do { \ - if (sizeof(unsigned long) == 4) { \ - unsigned long __flags; \ - \ - local_irq_save(__flags); \ + if (sizeof(unsigned long) == 4) { \ __asm__ __volatile__( \ ".set\tmips64\n\t" \ "dsll\t%L0, %L0, 32\n\t" \ @@ -62,12 +57,88 @@ do { \ "dmtc0\t%L0, $9, 7\n\t" \ ".set\tmips0" \ : : "r" (val)); \ - __flags = (__flags & 0xffff00ff) | (((val) & 0xff) << 8);\ - local_irq_restore(__flags); \ } else \ __write_64bit_c0_register($9, 7, (val)); \ } while (0) +/* + * Handling the 64 bit EIMR and EIRR registers in 32-bit mode with + * standard functions will be very inefficient. This provides + * optimized functions for the normal operations on the registers. + * + * Call with interrupts disabled. + */ +static inline void ack_c0_eirr(int irq) +{ + __asm__ __volatile__( + ".set push\n\t" + ".set mips64\n\t" + ".set noat\n\t" + "li $1, 1\n\t" + "dsllv $1, $1, %0\n\t" + "dmtc0 $1, $9, 6\n\t" + ".set pop" + : : "r" (irq)); +} + +static inline void set_c0_eimr(int irq) +{ + __asm__ __volatile__( + ".set push\n\t" + ".set mips64\n\t" + ".set noat\n\t" + "li $1, 1\n\t" + "dsllv %0, $1, %0\n\t" + "dmfc0 $1, $9, 7\n\t" + "or $1, %0\n\t" + "dmtc0 $1, $9, 7\n\t" + ".set pop" + : "+r" (irq)); +} + +static inline void clear_c0_eimr(int irq) +{ + __asm__ __volatile__( + ".set push\n\t" + ".set mips64\n\t" + ".set noat\n\t" + "li $1, 1\n\t" + "dsllv %0, $1, %0\n\t" + "dmfc0 $1, $9, 7\n\t" + "or $1, %0\n\t" + "xor $1, %0\n\t" + "dmtc0 $1, $9, 7\n\t" + ".set pop" + : "+r" (irq)); +} + +/* + * Read c0 eimr and c0 eirr, do AND of the two values, the result is + * the interrupts which are raised and are not masked. + */ +static inline uint64_t read_c0_eirr_and_eimr(void) +{ + uint64_t val; + +#ifdef CONFIG_64BIT + val = __read_64bit_c0_register($9, 6) & __read_64bit_c0_register($9, 7); +#else + __asm__ __volatile__( + ".set push\n\t" + ".set mips64\n\t" + ".set noat\n\t" + "dmfc0 %M0, $9, 6\n\t" + "dmfc0 %L0, $9, 7\n\t" + "and %M0, %L0\n\t" + "dsll %L0, %M0, 32\n\t" + "dsra %M0, %M0, 32\n\t" + "dsra %L0, %L0, 32\n\t" + ".set pop" + : "=r" (val)); +#endif + return val; +} + static inline int hard_smp_processor_id(void) { return __read_32bit_c0_register($15, 1) & 0x3ff; @@ -208,7 +279,7 @@ do { \ ".set\tmips0\n\t" \ : : "Jr" (value)); \ else \ - __asm__ __volatile__( \ + __asm__ __volatile__( \ ".set\tmips32\n\t" \ "mtc2\t%z0, " #reg ", " #sel "\n\t" \ ".set\tmips0\n\t" \ |