diff options
Diffstat (limited to 'arch/mips/include/asm/irqflags.h')
| -rw-r--r-- | arch/mips/include/asm/irqflags.h | 207 | 
1 files changed, 75 insertions, 132 deletions
diff --git a/arch/mips/include/asm/irqflags.h b/arch/mips/include/asm/irqflags.h index 309cbcd6909..9f3384c789d 100644 --- a/arch/mips/include/asm/irqflags.h +++ b/arch/mips/include/asm/irqflags.h @@ -16,83 +16,13 @@  #include <linux/compiler.h>  #include <asm/hazards.h> -__asm__( -	"	.macro	arch_local_irq_enable				\n" -	"	.set	push						\n" -	"	.set	reorder						\n" -	"	.set	noat						\n" -#ifdef CONFIG_MIPS_MT_SMTC -	"	mfc0	$1, $2, 1	# SMTC - clear TCStatus.IXMT	\n" -	"	ori	$1, 0x400					\n" -	"	xori	$1, 0x400					\n" -	"	mtc0	$1, $2, 1					\n" -#elif defined(CONFIG_CPU_MIPSR2) -	"	ei							\n" -#else -	"	mfc0	$1,$12						\n" -	"	ori	$1,0x1f						\n" -	"	xori	$1,0x1e						\n" -	"	mtc0	$1,$12						\n" -#endif -	"	irq_enable_hazard					\n" -	"	.set	pop						\n" -	"	.endm"); +#if defined(CONFIG_CPU_MIPSR2) && !defined(CONFIG_MIPS_MT_SMTC) -extern void smtc_ipi_replay(void); - -static inline void arch_local_irq_enable(void) -{ -#ifdef CONFIG_MIPS_MT_SMTC -	/* -	 * SMTC kernel needs to do a software replay of queued -	 * IPIs, at the cost of call overhead on each local_irq_enable() -	 */ -	smtc_ipi_replay(); -#endif -	__asm__ __volatile__( -		"arch_local_irq_enable" -		: /* no outputs */ -		: /* no inputs */ -		: "memory"); -} - - -/* - * For cli() we have to insert nops to make sure that the new value - * has actually arrived in the status register before the end of this - * macro. - * R4000/R4400 need three nops, the R4600 two nops and the R10000 needs - * no nops at all. - */ -/* - * For TX49, operating only IE bit is not enough. - * - * If mfc0 $12 follows store and the mfc0 is last instruction of a - * page and fetching the next instruction causes TLB miss, the result - * of the mfc0 might wrongly contain EXL bit. - * - * ERT-TX49H2-027, ERT-TX49H3-012, ERT-TX49HL3-006, ERT-TX49H4-008 - * - * Workaround: mask EXL bit of the result or place a nop before mfc0. - */  __asm__(  	"	.macro	arch_local_irq_disable\n"  	"	.set	push						\n"  	"	.set	noat						\n" -#ifdef CONFIG_MIPS_MT_SMTC -	"	mfc0	$1, $2, 1					\n" -	"	ori	$1, 0x400					\n" -	"	.set	noreorder					\n" -	"	mtc0	$1, $2, 1					\n" -#elif defined(CONFIG_CPU_MIPSR2)  	"	di							\n" -#else -	"	mfc0	$1,$12						\n" -	"	ori	$1,0x1f						\n" -	"	xori	$1,0x1f						\n" -	"	.set	noreorder					\n" -	"	mtc0	$1,$12						\n" -#endif  	"	irq_disable_hazard					\n"  	"	.set	pop						\n"  	"	.endm							\n"); @@ -106,46 +36,14 @@ static inline void arch_local_irq_disable(void)  		: "memory");  } -__asm__( -	"	.macro	arch_local_save_flags flags			\n" -	"	.set	push						\n" -	"	.set	reorder						\n" -#ifdef CONFIG_MIPS_MT_SMTC -	"	mfc0	\\flags, $2, 1					\n" -#else -	"	mfc0	\\flags, $12					\n" -#endif -	"	.set	pop						\n" -	"	.endm							\n"); - -static inline unsigned long arch_local_save_flags(void) -{ -	unsigned long flags; -	asm volatile("arch_local_save_flags %0" : "=r" (flags)); -	return flags; -}  __asm__(  	"	.macro	arch_local_irq_save result			\n"  	"	.set	push						\n"  	"	.set	reorder						\n"  	"	.set	noat						\n" -#ifdef CONFIG_MIPS_MT_SMTC -	"	mfc0	\\result, $2, 1					\n" -	"	ori	$1, \\result, 0x400				\n" -	"	.set	noreorder					\n" -	"	mtc0	$1, $2, 1					\n" -	"	andi	\\result, \\result, 0x400			\n" -#elif defined(CONFIG_CPU_MIPSR2)  	"	di	\\result					\n"  	"	andi	\\result, 1					\n" -#else -	"	mfc0	\\result, $12					\n" -	"	ori	$1, \\result, 0x1f				\n" -	"	xori	$1, 0x1f					\n" -	"	.set	noreorder					\n" -	"	mtc0	$1, $12						\n" -#endif  	"	irq_disable_hazard					\n"  	"	.set	pop						\n"  	"	.endm							\n"); @@ -160,61 +58,37 @@ static inline unsigned long arch_local_irq_save(void)  	return flags;  } +  __asm__(  	"	.macro	arch_local_irq_restore flags			\n"  	"	.set	push						\n"  	"	.set	noreorder					\n"  	"	.set	noat						\n" -#ifdef CONFIG_MIPS_MT_SMTC -	"mfc0	$1, $2, 1						\n" -	"andi	\\flags, 0x400						\n" -	"ori	$1, 0x400						\n" -	"xori	$1, 0x400						\n" -	"or	\\flags, $1						\n" -	"mtc0	\\flags, $2, 1						\n" -#elif defined(CONFIG_CPU_MIPSR2) && defined(CONFIG_IRQ_CPU) +#if defined(CONFIG_IRQ_CPU)  	/*  	 * Slow, but doesn't suffer from a relatively unlikely race  	 * condition we're having since days 1.  	 */  	"	beqz	\\flags, 1f					\n" -	"	 di							\n" +	"	di							\n"  	"	ei							\n"  	"1:								\n" -#elif defined(CONFIG_CPU_MIPSR2) +#else  	/*  	 * Fast, dangerous.  Life is fun, life is good.  	 */  	"	mfc0	$1, $12						\n"  	"	ins	$1, \\flags, 0, 1				\n"  	"	mtc0	$1, $12						\n" -#else -	"	mfc0	$1, $12						\n" -	"	andi	\\flags, 1					\n" -	"	ori	$1, 0x1f					\n" -	"	xori	$1, 0x1f					\n" -	"	or	\\flags, $1					\n" -	"	mtc0	\\flags, $12					\n"  #endif  	"	irq_disable_hazard					\n"  	"	.set	pop						\n"  	"	.endm							\n"); -  static inline void arch_local_irq_restore(unsigned long flags)  {  	unsigned long __tmp1; -#ifdef CONFIG_MIPS_MT_SMTC -	/* -	 * SMTC kernel needs to do a software replay of queued -	 * IPIs, at the cost of branch and call overhead on each -	 * local_irq_restore() -	 */ -	if (unlikely(!(flags & 0x0400))) -		smtc_ipi_replay(); -#endif -  	__asm__ __volatile__(  		"arch_local_irq_restore\t%0"  		: "=r" (__tmp1) @@ -232,6 +106,75 @@ static inline void __arch_local_irq_restore(unsigned long flags)  		: "0" (flags)  		: "memory");  } +#else +/* Functions that require preempt_{dis,en}able() are in mips-atomic.c */ +void arch_local_irq_disable(void); +unsigned long arch_local_irq_save(void); +void arch_local_irq_restore(unsigned long flags); +void __arch_local_irq_restore(unsigned long flags); +#endif /* if defined(CONFIG_CPU_MIPSR2) && !defined(CONFIG_MIPS_MT_SMTC) */ + + +__asm__( +	"	.macro	arch_local_irq_enable				\n" +	"	.set	push						\n" +	"	.set	reorder						\n" +	"	.set	noat						\n" +#ifdef CONFIG_MIPS_MT_SMTC +	"	mfc0	$1, $2, 1	# SMTC - clear TCStatus.IXMT	\n" +	"	ori	$1, 0x400					\n" +	"	xori	$1, 0x400					\n" +	"	mtc0	$1, $2, 1					\n" +#elif defined(CONFIG_CPU_MIPSR2) +	"	ei							\n" +#else +	"	mfc0	$1,$12						\n" +	"	ori	$1,0x1f						\n" +	"	xori	$1,0x1e						\n" +	"	mtc0	$1,$12						\n" +#endif +	"	irq_enable_hazard					\n" +	"	.set	pop						\n" +	"	.endm"); + +extern void smtc_ipi_replay(void); + +static inline void arch_local_irq_enable(void) +{ +#ifdef CONFIG_MIPS_MT_SMTC +	/* +	 * SMTC kernel needs to do a software replay of queued +	 * IPIs, at the cost of call overhead on each local_irq_enable() +	 */ +	smtc_ipi_replay(); +#endif +	__asm__ __volatile__( +		"arch_local_irq_enable" +		: /* no outputs */ +		: /* no inputs */ +		: "memory"); +} + + +__asm__( +	"	.macro	arch_local_save_flags flags			\n" +	"	.set	push						\n" +	"	.set	reorder						\n" +#ifdef CONFIG_MIPS_MT_SMTC +	"	mfc0	\\flags, $2, 1					\n" +#else +	"	mfc0	\\flags, $12					\n" +#endif +	"	.set	pop						\n" +	"	.endm							\n"); + +static inline unsigned long arch_local_save_flags(void) +{ +	unsigned long flags; +	asm volatile("arch_local_save_flags %0" : "=r" (flags)); +	return flags; +} +  static inline int arch_irqs_disabled_flags(unsigned long flags)  { @@ -245,7 +188,7 @@ static inline int arch_irqs_disabled_flags(unsigned long flags)  #endif  } -#endif +#endif /* #ifndef __ASSEMBLY__ */  /*   * Do the CPU's IRQ-state tracing from assembly code.  |