diff options
Diffstat (limited to 'arch/mips/lib')
| -rw-r--r-- | arch/mips/lib/Makefile | 5 | ||||
| -rw-r--r-- | arch/mips/lib/bitops.c | 179 | ||||
| -rw-r--r-- | arch/mips/lib/mips-atomic.c | 176 | 
3 files changed, 358 insertions, 2 deletions
diff --git a/arch/mips/lib/Makefile b/arch/mips/lib/Makefile index c4a82e841c7..eeddc58802e 100644 --- a/arch/mips/lib/Makefile +++ b/arch/mips/lib/Makefile @@ -2,8 +2,9 @@  # Makefile for MIPS-specific library files..  # -lib-y	+= csum_partial.o delay.o memcpy.o memset.o \ -	   strlen_user.o strncpy_user.o strnlen_user.o uncached.o +lib-y	+= bitops.o csum_partial.o delay.o memcpy.o memset.o \ +	   mips-atomic.o strlen_user.o strncpy_user.o \ +	   strnlen_user.o uncached.o  obj-y			+= iomap.o  obj-$(CONFIG_PCI)	+= iomap-pci.o diff --git a/arch/mips/lib/bitops.c b/arch/mips/lib/bitops.c new file mode 100644 index 00000000000..239a9c957b0 --- /dev/null +++ b/arch/mips/lib/bitops.c @@ -0,0 +1,179 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License.  See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (c) 1994-1997, 99, 2000, 06, 07 Ralf Baechle (ralf@linux-mips.org) + * Copyright (c) 1999, 2000  Silicon Graphics, Inc. + */ +#include <linux/bitops.h> +#include <linux/irqflags.h> +#include <linux/export.h> + + +/** + * __mips_set_bit - Atomically set a bit in memory.  This is called by + * set_bit() if it cannot find a faster solution. + * @nr: the bit to set + * @addr: the address to start counting from + */ +void __mips_set_bit(unsigned long nr, volatile unsigned long *addr) +{ +	volatile unsigned long *a = addr; +	unsigned bit = nr & SZLONG_MASK; +	unsigned long mask; +	unsigned long flags; + +	a += nr >> SZLONG_LOG; +	mask = 1UL << bit; +	raw_local_irq_save(flags); +	*a |= mask; +	raw_local_irq_restore(flags); +} +EXPORT_SYMBOL(__mips_set_bit); + + +/** + * __mips_clear_bit - Clears a bit in memory.  This is called by clear_bit() if + * it cannot find a faster solution. + * @nr: Bit to clear + * @addr: Address to start counting from + */ +void __mips_clear_bit(unsigned long nr, volatile unsigned long *addr) +{ +	volatile unsigned long *a = addr; +	unsigned bit = nr & SZLONG_MASK; +	unsigned long mask; +	unsigned long flags; + +	a += nr >> SZLONG_LOG; +	mask = 1UL << bit; +	raw_local_irq_save(flags); +	*a &= ~mask; +	raw_local_irq_restore(flags); +} +EXPORT_SYMBOL(__mips_clear_bit); + + +/** + * __mips_change_bit - Toggle a bit in memory.  This is called by change_bit() + * if it cannot find a faster solution. + * @nr: Bit to change + * @addr: Address to start counting from + */ +void __mips_change_bit(unsigned long nr, volatile unsigned long *addr) +{ +	volatile unsigned long *a = addr; +	unsigned bit = nr & SZLONG_MASK; +	unsigned long mask; +	unsigned long flags; + +	a += nr >> SZLONG_LOG; +	mask = 1UL << bit; +	raw_local_irq_save(flags); +	*a ^= mask; +	raw_local_irq_restore(flags); +} +EXPORT_SYMBOL(__mips_change_bit); + + +/** + * __mips_test_and_set_bit - Set a bit and return its old value.  This is + * called by test_and_set_bit() if it cannot find a faster solution. + * @nr: Bit to set + * @addr: Address to count from + */ +int __mips_test_and_set_bit(unsigned long nr, +			    volatile unsigned long *addr) +{ +	volatile unsigned long *a = addr; +	unsigned bit = nr & SZLONG_MASK; +	unsigned long mask; +	unsigned long flags; +	unsigned long res; + +	a += nr >> SZLONG_LOG; +	mask = 1UL << bit; +	raw_local_irq_save(flags); +	res = (mask & *a); +	*a |= mask; +	raw_local_irq_restore(flags); +	return res; +} +EXPORT_SYMBOL(__mips_test_and_set_bit); + + +/** + * __mips_test_and_set_bit_lock - Set a bit and return its old value.  This is + * called by test_and_set_bit_lock() if it cannot find a faster solution. + * @nr: Bit to set + * @addr: Address to count from + */ +int __mips_test_and_set_bit_lock(unsigned long nr, +				 volatile unsigned long *addr) +{ +	volatile unsigned long *a = addr; +	unsigned bit = nr & SZLONG_MASK; +	unsigned long mask; +	unsigned long flags; +	unsigned long res; + +	a += nr >> SZLONG_LOG; +	mask = 1UL << bit; +	raw_local_irq_save(flags); +	res = (mask & *a); +	*a |= mask; +	raw_local_irq_restore(flags); +	return res; +} +EXPORT_SYMBOL(__mips_test_and_set_bit_lock); + + +/** + * __mips_test_and_clear_bit - Clear a bit and return its old value.  This is + * called by test_and_clear_bit() if it cannot find a faster solution. + * @nr: Bit to clear + * @addr: Address to count from + */ +int __mips_test_and_clear_bit(unsigned long nr, volatile unsigned long *addr) +{ +	volatile unsigned long *a = addr; +	unsigned bit = nr & SZLONG_MASK; +	unsigned long mask; +	unsigned long flags; +	unsigned long res; + +	a += nr >> SZLONG_LOG; +	mask = 1UL << bit; +	raw_local_irq_save(flags); +	res = (mask & *a); +	*a &= ~mask; +	raw_local_irq_restore(flags); +	return res; +} +EXPORT_SYMBOL(__mips_test_and_clear_bit); + + +/** + * __mips_test_and_change_bit - Change a bit and return its old value.  This is + * called by test_and_change_bit() if it cannot find a faster solution. + * @nr: Bit to change + * @addr: Address to count from + */ +int __mips_test_and_change_bit(unsigned long nr, volatile unsigned long *addr) +{ +	volatile unsigned long *a = addr; +	unsigned bit = nr & SZLONG_MASK; +	unsigned long mask; +	unsigned long flags; +	unsigned long res; + +	a += nr >> SZLONG_LOG; +	mask = 1UL << bit; +	raw_local_irq_save(flags); +	res = (mask & *a); +	*a ^= mask; +	raw_local_irq_restore(flags); +	return res; +} +EXPORT_SYMBOL(__mips_test_and_change_bit); diff --git a/arch/mips/lib/mips-atomic.c b/arch/mips/lib/mips-atomic.c new file mode 100644 index 00000000000..cd160be3ce4 --- /dev/null +++ b/arch/mips/lib/mips-atomic.c @@ -0,0 +1,176 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License.  See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 1994, 95, 96, 97, 98, 99, 2003 by Ralf Baechle + * Copyright (C) 1996 by Paul M. Antoine + * Copyright (C) 1999 Silicon Graphics + * Copyright (C) 2000 MIPS Technologies, Inc. + */ +#include <asm/irqflags.h> +#include <asm/hazards.h> +#include <linux/compiler.h> +#include <linux/preempt.h> +#include <linux/export.h> + +#if !defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_MIPS_MT_SMTC) + +/* + * For cli() we have to insert nops to make sure that the new value + * has actually arrived in the status register before the end of this + * macro. + * R4000/R4400 need three nops, the R4600 two nops and the R10000 needs + * no nops at all. + */ +/* + * For TX49, operating only IE bit is not enough. + * + * If mfc0 $12 follows store and the mfc0 is last instruction of a + * page and fetching the next instruction causes TLB miss, the result + * of the mfc0 might wrongly contain EXL bit. + * + * ERT-TX49H2-027, ERT-TX49H3-012, ERT-TX49HL3-006, ERT-TX49H4-008 + * + * Workaround: mask EXL bit of the result or place a nop before mfc0. + */ +__asm__( +	"	.macro	arch_local_irq_disable\n" +	"	.set	push						\n" +	"	.set	noat						\n" +#ifdef CONFIG_MIPS_MT_SMTC +	"	mfc0	$1, $2, 1					\n" +	"	ori	$1, 0x400					\n" +	"	.set	noreorder					\n" +	"	mtc0	$1, $2, 1					\n" +#elif defined(CONFIG_CPU_MIPSR2) +	/* see irqflags.h for inline function */ +#else +	"	mfc0	$1,$12						\n" +	"	ori	$1,0x1f						\n" +	"	xori	$1,0x1f						\n" +	"	.set	noreorder					\n" +	"	mtc0	$1,$12						\n" +#endif +	"	irq_disable_hazard					\n" +	"	.set	pop						\n" +	"	.endm							\n"); + +notrace void arch_local_irq_disable(void) +{ +	preempt_disable(); +	__asm__ __volatile__( +		"arch_local_irq_disable" +		: /* no outputs */ +		: /* no inputs */ +		: "memory"); +	preempt_enable(); +} +EXPORT_SYMBOL(arch_local_irq_disable); + + +__asm__( +	"	.macro	arch_local_irq_save result			\n" +	"	.set	push						\n" +	"	.set	reorder						\n" +	"	.set	noat						\n" +#ifdef CONFIG_MIPS_MT_SMTC +	"	mfc0	\\result, $2, 1					\n" +	"	ori	$1, \\result, 0x400				\n" +	"	.set	noreorder					\n" +	"	mtc0	$1, $2, 1					\n" +	"	andi	\\result, \\result, 0x400			\n" +#elif defined(CONFIG_CPU_MIPSR2) +	/* see irqflags.h for inline function */ +#else +	"	mfc0	\\result, $12					\n" +	"	ori	$1, \\result, 0x1f				\n" +	"	xori	$1, 0x1f					\n" +	"	.set	noreorder					\n" +	"	mtc0	$1, $12						\n" +#endif +	"	irq_disable_hazard					\n" +	"	.set	pop						\n" +	"	.endm							\n"); + +notrace unsigned long arch_local_irq_save(void) +{ +	unsigned long flags; +	preempt_disable(); +	asm volatile("arch_local_irq_save\t%0" +		     : "=r" (flags) +		     : /* no inputs */ +		     : "memory"); +	preempt_enable(); +	return flags; +} +EXPORT_SYMBOL(arch_local_irq_save); + + +__asm__( +	"	.macro	arch_local_irq_restore flags			\n" +	"	.set	push						\n" +	"	.set	noreorder					\n" +	"	.set	noat						\n" +#ifdef CONFIG_MIPS_MT_SMTC +	"mfc0	$1, $2, 1						\n" +	"andi	\\flags, 0x400						\n" +	"ori	$1, 0x400						\n" +	"xori	$1, 0x400						\n" +	"or	\\flags, $1						\n" +	"mtc0	\\flags, $2, 1						\n" +#elif defined(CONFIG_CPU_MIPSR2) && defined(CONFIG_IRQ_CPU) +	/* see irqflags.h for inline function */ +#elif defined(CONFIG_CPU_MIPSR2) +	/* see irqflags.h for inline function */ +#else +	"	mfc0	$1, $12						\n" +	"	andi	\\flags, 1					\n" +	"	ori	$1, 0x1f					\n" +	"	xori	$1, 0x1f					\n" +	"	or	\\flags, $1					\n" +	"	mtc0	\\flags, $12					\n" +#endif +	"	irq_disable_hazard					\n" +	"	.set	pop						\n" +	"	.endm							\n"); + +notrace void arch_local_irq_restore(unsigned long flags) +{ +	unsigned long __tmp1; + +#ifdef CONFIG_MIPS_MT_SMTC +	/* +	 * SMTC kernel needs to do a software replay of queued +	 * IPIs, at the cost of branch and call overhead on each +	 * local_irq_restore() +	 */ +	if (unlikely(!(flags & 0x0400))) +		smtc_ipi_replay(); +#endif +	preempt_disable(); +	__asm__ __volatile__( +		"arch_local_irq_restore\t%0" +		: "=r" (__tmp1) +		: "0" (flags) +		: "memory"); +	preempt_enable(); +} +EXPORT_SYMBOL(arch_local_irq_restore); + + +notrace void __arch_local_irq_restore(unsigned long flags) +{ +	unsigned long __tmp1; + +	preempt_disable(); +	__asm__ __volatile__( +		"arch_local_irq_restore\t%0" +		: "=r" (__tmp1) +		: "0" (flags) +		: "memory"); +	preempt_enable(); +} +EXPORT_SYMBOL(__arch_local_irq_restore); + +#endif /* !defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_MIPS_MT_SMTC) */  |