diff options
Diffstat (limited to 'arch/mips')
| -rw-r--r-- | arch/mips/cavium-octeon/executive/cvmx-l2c.c | 1 | ||||
| -rw-r--r-- | arch/mips/fw/arc/misc.c | 1 | ||||
| -rw-r--r-- | arch/mips/include/asm/Kbuild | 1 | ||||
| -rw-r--r-- | arch/mips/include/asm/bitops.h | 128 | ||||
| -rw-r--r-- | arch/mips/include/asm/compat.h | 2 | ||||
| -rw-r--r-- | arch/mips/include/asm/delay.h | 6 | ||||
| -rw-r--r-- | arch/mips/include/asm/io.h | 1 | ||||
| -rw-r--r-- | arch/mips/include/asm/irqflags.h | 207 | ||||
| -rw-r--r-- | arch/mips/include/asm/pgtable-64.h | 15 | ||||
| -rw-r--r-- | arch/mips/include/asm/thread_info.h | 6 | ||||
| -rw-r--r-- | arch/mips/jz4740/serial.h | 3 | ||||
| -rw-r--r-- | arch/mips/kernel/setup.c | 26 | ||||
| -rw-r--r-- | arch/mips/kernel/smp-cmp.c | 2 | ||||
| -rw-r--r-- | arch/mips/lib/Makefile | 5 | ||||
| -rw-r--r-- | arch/mips/lib/bitops.c | 179 | ||||
| -rw-r--r-- | arch/mips/lib/delay.c | 6 | ||||
| -rw-r--r-- | arch/mips/lib/dump_tlb.c | 4 | ||||
| -rw-r--r-- | arch/mips/lib/mips-atomic.c | 176 | ||||
| -rw-r--r-- | arch/mips/mm/tlb-r4k.c | 1 | ||||
| -rw-r--r-- | arch/mips/mm/tlbex.c | 56 | ||||
| -rw-r--r-- | arch/mips/mti-malta/malta-platform.c | 3 | 
21 files changed, 567 insertions, 262 deletions
diff --git a/arch/mips/cavium-octeon/executive/cvmx-l2c.c b/arch/mips/cavium-octeon/executive/cvmx-l2c.c index d38246e33dd..9f883bf7695 100644 --- a/arch/mips/cavium-octeon/executive/cvmx-l2c.c +++ b/arch/mips/cavium-octeon/executive/cvmx-l2c.c @@ -30,6 +30,7 @@   * measurement, and debugging facilities.   */ +#include <linux/irqflags.h>  #include <asm/octeon/cvmx.h>  #include <asm/octeon/cvmx-l2c.h>  #include <asm/octeon/cvmx-spinlock.h> diff --git a/arch/mips/fw/arc/misc.c b/arch/mips/fw/arc/misc.c index 7cf80ca2c1d..f9f5307434c 100644 --- a/arch/mips/fw/arc/misc.c +++ b/arch/mips/fw/arc/misc.c @@ -11,6 +11,7 @@   */  #include <linux/init.h>  #include <linux/kernel.h> +#include <linux/irqflags.h>  #include <asm/bcache.h> diff --git a/arch/mips/include/asm/Kbuild b/arch/mips/include/asm/Kbuild index e69de29bb2d..533053d12ce 100644 --- a/arch/mips/include/asm/Kbuild +++ b/arch/mips/include/asm/Kbuild @@ -0,0 +1 @@ +# MIPS headers diff --git a/arch/mips/include/asm/bitops.h b/arch/mips/include/asm/bitops.h index 82ad35ce2b4..46ac73abd5e 100644 --- a/arch/mips/include/asm/bitops.h +++ b/arch/mips/include/asm/bitops.h @@ -14,7 +14,6 @@  #endif  #include <linux/compiler.h> -#include <linux/irqflags.h>  #include <linux/types.h>  #include <asm/barrier.h>  #include <asm/byteorder.h>		/* sigh ... */ @@ -44,6 +43,24 @@  #define smp_mb__before_clear_bit()	smp_mb__before_llsc()  #define smp_mb__after_clear_bit()	smp_llsc_mb() + +/* + * These are the "slower" versions of the functions and are in bitops.c. + * These functions call raw_local_irq_{save,restore}(). + */ +void __mips_set_bit(unsigned long nr, volatile unsigned long *addr); +void __mips_clear_bit(unsigned long nr, volatile unsigned long *addr); +void __mips_change_bit(unsigned long nr, volatile unsigned long *addr); +int __mips_test_and_set_bit(unsigned long nr, +			    volatile unsigned long *addr); +int __mips_test_and_set_bit_lock(unsigned long nr, +				 volatile unsigned long *addr); +int __mips_test_and_clear_bit(unsigned long nr, +			      volatile unsigned long *addr); +int __mips_test_and_change_bit(unsigned long nr, +			       volatile unsigned long *addr); + +  /*   * set_bit - Atomically set a bit in memory   * @nr: the bit to set @@ -57,7 +74,7 @@  static inline void set_bit(unsigned long nr, volatile unsigned long *addr)  {  	unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); -	unsigned short bit = nr & SZLONG_MASK; +	int bit = nr & SZLONG_MASK;  	unsigned long temp;  	if (kernel_uses_llsc && R10000_LLSC_WAR) { @@ -92,17 +109,8 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *addr)  			: "=&r" (temp), "+m" (*m)  			: "ir" (1UL << bit));  		} while (unlikely(!temp)); -	} else { -		volatile unsigned long *a = addr; -		unsigned long mask; -		unsigned long flags; - -		a += nr >> SZLONG_LOG; -		mask = 1UL << bit; -		raw_local_irq_save(flags); -		*a |= mask; -		raw_local_irq_restore(flags); -	} +	} else +		__mips_set_bit(nr, addr);  }  /* @@ -118,7 +126,7 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *addr)  static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)  {  	unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); -	unsigned short bit = nr & SZLONG_MASK; +	int bit = nr & SZLONG_MASK;  	unsigned long temp;  	if (kernel_uses_llsc && R10000_LLSC_WAR) { @@ -153,17 +161,8 @@ static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)  			: "=&r" (temp), "+m" (*m)  			: "ir" (~(1UL << bit)));  		} while (unlikely(!temp)); -	} else { -		volatile unsigned long *a = addr; -		unsigned long mask; -		unsigned long flags; - -		a += nr >> SZLONG_LOG; -		mask = 1UL << bit; -		raw_local_irq_save(flags); -		*a &= ~mask; -		raw_local_irq_restore(flags); -	} +	} else +		__mips_clear_bit(nr, addr);  }  /* @@ -191,7 +190,7 @@ static inline void clear_bit_unlock(unsigned long nr, volatile unsigned long *ad   */  static inline void change_bit(unsigned long nr, volatile unsigned long *addr)  { -	unsigned short bit = nr & SZLONG_MASK; +	int bit = nr & SZLONG_MASK;  	if (kernel_uses_llsc && R10000_LLSC_WAR) {  		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); @@ -220,17 +219,8 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *addr)  			: "=&r" (temp), "+m" (*m)  			: "ir" (1UL << bit));  		} while (unlikely(!temp)); -	} else { -		volatile unsigned long *a = addr; -		unsigned long mask; -		unsigned long flags; - -		a += nr >> SZLONG_LOG; -		mask = 1UL << bit; -		raw_local_irq_save(flags); -		*a ^= mask; -		raw_local_irq_restore(flags); -	} +	} else +		__mips_change_bit(nr, addr);  }  /* @@ -244,7 +234,7 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *addr)  static inline int test_and_set_bit(unsigned long nr,  	volatile unsigned long *addr)  { -	unsigned short bit = nr & SZLONG_MASK; +	int bit = nr & SZLONG_MASK;  	unsigned long res;  	smp_mb__before_llsc(); @@ -281,18 +271,8 @@ static inline int test_and_set_bit(unsigned long nr,  		} while (unlikely(!res));  		res = temp & (1UL << bit); -	} else { -		volatile unsigned long *a = addr; -		unsigned long mask; -		unsigned long flags; - -		a += nr >> SZLONG_LOG; -		mask = 1UL << bit; -		raw_local_irq_save(flags); -		res = (mask & *a); -		*a |= mask; -		raw_local_irq_restore(flags); -	} +	} else +		res = __mips_test_and_set_bit(nr, addr);  	smp_llsc_mb(); @@ -310,7 +290,7 @@ static inline int test_and_set_bit(unsigned long nr,  static inline int test_and_set_bit_lock(unsigned long nr,  	volatile unsigned long *addr)  { -	unsigned short bit = nr & SZLONG_MASK; +	int bit = nr & SZLONG_MASK;  	unsigned long res;  	if (kernel_uses_llsc && R10000_LLSC_WAR) { @@ -345,18 +325,8 @@ static inline int test_and_set_bit_lock(unsigned long nr,  		} while (unlikely(!res));  		res = temp & (1UL << bit); -	} else { -		volatile unsigned long *a = addr; -		unsigned long mask; -		unsigned long flags; - -		a += nr >> SZLONG_LOG; -		mask = 1UL << bit; -		raw_local_irq_save(flags); -		res = (mask & *a); -		*a |= mask; -		raw_local_irq_restore(flags); -	} +	} else +		res = __mips_test_and_set_bit_lock(nr, addr);  	smp_llsc_mb(); @@ -373,7 +343,7 @@ static inline int test_and_set_bit_lock(unsigned long nr,  static inline int test_and_clear_bit(unsigned long nr,  	volatile unsigned long *addr)  { -	unsigned short bit = nr & SZLONG_MASK; +	int bit = nr & SZLONG_MASK;  	unsigned long res;  	smp_mb__before_llsc(); @@ -428,18 +398,8 @@ static inline int test_and_clear_bit(unsigned long nr,  		} while (unlikely(!res));  		res = temp & (1UL << bit); -	} else { -		volatile unsigned long *a = addr; -		unsigned long mask; -		unsigned long flags; - -		a += nr >> SZLONG_LOG; -		mask = 1UL << bit; -		raw_local_irq_save(flags); -		res = (mask & *a); -		*a &= ~mask; -		raw_local_irq_restore(flags); -	} +	} else +		res = __mips_test_and_clear_bit(nr, addr);  	smp_llsc_mb(); @@ -457,7 +417,7 @@ static inline int test_and_clear_bit(unsigned long nr,  static inline int test_and_change_bit(unsigned long nr,  	volatile unsigned long *addr)  { -	unsigned short bit = nr & SZLONG_MASK; +	int bit = nr & SZLONG_MASK;  	unsigned long res;  	smp_mb__before_llsc(); @@ -494,18 +454,8 @@ static inline int test_and_change_bit(unsigned long nr,  		} while (unlikely(!res));  		res = temp & (1UL << bit); -	} else { -		volatile unsigned long *a = addr; -		unsigned long mask; -		unsigned long flags; - -		a += nr >> SZLONG_LOG; -		mask = 1UL << bit; -		raw_local_irq_save(flags); -		res = (mask & *a); -		*a ^= mask; -		raw_local_irq_restore(flags); -	} +	} else +		res = __mips_test_and_change_bit(nr, addr);  	smp_llsc_mb(); diff --git a/arch/mips/include/asm/compat.h b/arch/mips/include/asm/compat.h index 58277e0e9cd..3c5d1464b7b 100644 --- a/arch/mips/include/asm/compat.h +++ b/arch/mips/include/asm/compat.h @@ -290,7 +290,7 @@ struct compat_shmid64_ds {  static inline int is_compat_task(void)  { -	return test_thread_flag(TIF_32BIT); +	return test_thread_flag(TIF_32BIT_ADDR);  }  #endif /* _ASM_COMPAT_H */ diff --git a/arch/mips/include/asm/delay.h b/arch/mips/include/asm/delay.h index e7cd78277c2..dc0a5f77a35 100644 --- a/arch/mips/include/asm/delay.h +++ b/arch/mips/include/asm/delay.h @@ -13,9 +13,9 @@  #include <linux/param.h> -extern void __delay(unsigned int loops); -extern void __ndelay(unsigned int ns); -extern void __udelay(unsigned int us); +extern void __delay(unsigned long loops); +extern void __ndelay(unsigned long ns); +extern void __udelay(unsigned long us);  #define ndelay(ns) __ndelay(ns)  #define udelay(us) __udelay(us) diff --git a/arch/mips/include/asm/io.h b/arch/mips/include/asm/io.h index 29d9c23c20c..ff2e0345e01 100644 --- a/arch/mips/include/asm/io.h +++ b/arch/mips/include/asm/io.h @@ -15,6 +15,7 @@  #include <linux/compiler.h>  #include <linux/kernel.h>  #include <linux/types.h> +#include <linux/irqflags.h>  #include <asm/addrspace.h>  #include <asm/bug.h> diff --git a/arch/mips/include/asm/irqflags.h b/arch/mips/include/asm/irqflags.h index 309cbcd6909..9f3384c789d 100644 --- a/arch/mips/include/asm/irqflags.h +++ b/arch/mips/include/asm/irqflags.h @@ -16,83 +16,13 @@  #include <linux/compiler.h>  #include <asm/hazards.h> -__asm__( -	"	.macro	arch_local_irq_enable				\n" -	"	.set	push						\n" -	"	.set	reorder						\n" -	"	.set	noat						\n" -#ifdef CONFIG_MIPS_MT_SMTC -	"	mfc0	$1, $2, 1	# SMTC - clear TCStatus.IXMT	\n" -	"	ori	$1, 0x400					\n" -	"	xori	$1, 0x400					\n" -	"	mtc0	$1, $2, 1					\n" -#elif defined(CONFIG_CPU_MIPSR2) -	"	ei							\n" -#else -	"	mfc0	$1,$12						\n" -	"	ori	$1,0x1f						\n" -	"	xori	$1,0x1e						\n" -	"	mtc0	$1,$12						\n" -#endif -	"	irq_enable_hazard					\n" -	"	.set	pop						\n" -	"	.endm"); +#if defined(CONFIG_CPU_MIPSR2) && !defined(CONFIG_MIPS_MT_SMTC) -extern void smtc_ipi_replay(void); - -static inline void arch_local_irq_enable(void) -{ -#ifdef CONFIG_MIPS_MT_SMTC -	/* -	 * SMTC kernel needs to do a software replay of queued -	 * IPIs, at the cost of call overhead on each local_irq_enable() -	 */ -	smtc_ipi_replay(); -#endif -	__asm__ __volatile__( -		"arch_local_irq_enable" -		: /* no outputs */ -		: /* no inputs */ -		: "memory"); -} - - -/* - * For cli() we have to insert nops to make sure that the new value - * has actually arrived in the status register before the end of this - * macro. - * R4000/R4400 need three nops, the R4600 two nops and the R10000 needs - * no nops at all. - */ -/* - * For TX49, operating only IE bit is not enough. - * - * If mfc0 $12 follows store and the mfc0 is last instruction of a - * page and fetching the next instruction causes TLB miss, the result - * of the mfc0 might wrongly contain EXL bit. - * - * ERT-TX49H2-027, ERT-TX49H3-012, ERT-TX49HL3-006, ERT-TX49H4-008 - * - * Workaround: mask EXL bit of the result or place a nop before mfc0. - */  __asm__(  	"	.macro	arch_local_irq_disable\n"  	"	.set	push						\n"  	"	.set	noat						\n" -#ifdef CONFIG_MIPS_MT_SMTC -	"	mfc0	$1, $2, 1					\n" -	"	ori	$1, 0x400					\n" -	"	.set	noreorder					\n" -	"	mtc0	$1, $2, 1					\n" -#elif defined(CONFIG_CPU_MIPSR2)  	"	di							\n" -#else -	"	mfc0	$1,$12						\n" -	"	ori	$1,0x1f						\n" -	"	xori	$1,0x1f						\n" -	"	.set	noreorder					\n" -	"	mtc0	$1,$12						\n" -#endif  	"	irq_disable_hazard					\n"  	"	.set	pop						\n"  	"	.endm							\n"); @@ -106,46 +36,14 @@ static inline void arch_local_irq_disable(void)  		: "memory");  } -__asm__( -	"	.macro	arch_local_save_flags flags			\n" -	"	.set	push						\n" -	"	.set	reorder						\n" -#ifdef CONFIG_MIPS_MT_SMTC -	"	mfc0	\\flags, $2, 1					\n" -#else -	"	mfc0	\\flags, $12					\n" -#endif -	"	.set	pop						\n" -	"	.endm							\n"); - -static inline unsigned long arch_local_save_flags(void) -{ -	unsigned long flags; -	asm volatile("arch_local_save_flags %0" : "=r" (flags)); -	return flags; -}  __asm__(  	"	.macro	arch_local_irq_save result			\n"  	"	.set	push						\n"  	"	.set	reorder						\n"  	"	.set	noat						\n" -#ifdef CONFIG_MIPS_MT_SMTC -	"	mfc0	\\result, $2, 1					\n" -	"	ori	$1, \\result, 0x400				\n" -	"	.set	noreorder					\n" -	"	mtc0	$1, $2, 1					\n" -	"	andi	\\result, \\result, 0x400			\n" -#elif defined(CONFIG_CPU_MIPSR2)  	"	di	\\result					\n"  	"	andi	\\result, 1					\n" -#else -	"	mfc0	\\result, $12					\n" -	"	ori	$1, \\result, 0x1f				\n" -	"	xori	$1, 0x1f					\n" -	"	.set	noreorder					\n" -	"	mtc0	$1, $12						\n" -#endif  	"	irq_disable_hazard					\n"  	"	.set	pop						\n"  	"	.endm							\n"); @@ -160,61 +58,37 @@ static inline unsigned long arch_local_irq_save(void)  	return flags;  } +  __asm__(  	"	.macro	arch_local_irq_restore flags			\n"  	"	.set	push						\n"  	"	.set	noreorder					\n"  	"	.set	noat						\n" -#ifdef CONFIG_MIPS_MT_SMTC -	"mfc0	$1, $2, 1						\n" -	"andi	\\flags, 0x400						\n" -	"ori	$1, 0x400						\n" -	"xori	$1, 0x400						\n" -	"or	\\flags, $1						\n" -	"mtc0	\\flags, $2, 1						\n" -#elif defined(CONFIG_CPU_MIPSR2) && defined(CONFIG_IRQ_CPU) +#if defined(CONFIG_IRQ_CPU)  	/*  	 * Slow, but doesn't suffer from a relatively unlikely race  	 * condition we're having since days 1.  	 */  	"	beqz	\\flags, 1f					\n" -	"	 di							\n" +	"	di							\n"  	"	ei							\n"  	"1:								\n" -#elif defined(CONFIG_CPU_MIPSR2) +#else  	/*  	 * Fast, dangerous.  Life is fun, life is good.  	 */  	"	mfc0	$1, $12						\n"  	"	ins	$1, \\flags, 0, 1				\n"  	"	mtc0	$1, $12						\n" -#else -	"	mfc0	$1, $12						\n" -	"	andi	\\flags, 1					\n" -	"	ori	$1, 0x1f					\n" -	"	xori	$1, 0x1f					\n" -	"	or	\\flags, $1					\n" -	"	mtc0	\\flags, $12					\n"  #endif  	"	irq_disable_hazard					\n"  	"	.set	pop						\n"  	"	.endm							\n"); -  static inline void arch_local_irq_restore(unsigned long flags)  {  	unsigned long __tmp1; -#ifdef CONFIG_MIPS_MT_SMTC -	/* -	 * SMTC kernel needs to do a software replay of queued -	 * IPIs, at the cost of branch and call overhead on each -	 * local_irq_restore() -	 */ -	if (unlikely(!(flags & 0x0400))) -		smtc_ipi_replay(); -#endif -  	__asm__ __volatile__(  		"arch_local_irq_restore\t%0"  		: "=r" (__tmp1) @@ -232,6 +106,75 @@ static inline void __arch_local_irq_restore(unsigned long flags)  		: "0" (flags)  		: "memory");  } +#else +/* Functions that require preempt_{dis,en}able() are in mips-atomic.c */ +void arch_local_irq_disable(void); +unsigned long arch_local_irq_save(void); +void arch_local_irq_restore(unsigned long flags); +void __arch_local_irq_restore(unsigned long flags); +#endif /* if defined(CONFIG_CPU_MIPSR2) && !defined(CONFIG_MIPS_MT_SMTC) */ + + +__asm__( +	"	.macro	arch_local_irq_enable				\n" +	"	.set	push						\n" +	"	.set	reorder						\n" +	"	.set	noat						\n" +#ifdef CONFIG_MIPS_MT_SMTC +	"	mfc0	$1, $2, 1	# SMTC - clear TCStatus.IXMT	\n" +	"	ori	$1, 0x400					\n" +	"	xori	$1, 0x400					\n" +	"	mtc0	$1, $2, 1					\n" +#elif defined(CONFIG_CPU_MIPSR2) +	"	ei							\n" +#else +	"	mfc0	$1,$12						\n" +	"	ori	$1,0x1f						\n" +	"	xori	$1,0x1e						\n" +	"	mtc0	$1,$12						\n" +#endif +	"	irq_enable_hazard					\n" +	"	.set	pop						\n" +	"	.endm"); + +extern void smtc_ipi_replay(void); + +static inline void arch_local_irq_enable(void) +{ +#ifdef CONFIG_MIPS_MT_SMTC +	/* +	 * SMTC kernel needs to do a software replay of queued +	 * IPIs, at the cost of call overhead on each local_irq_enable() +	 */ +	smtc_ipi_replay(); +#endif +	__asm__ __volatile__( +		"arch_local_irq_enable" +		: /* no outputs */ +		: /* no inputs */ +		: "memory"); +} + + +__asm__( +	"	.macro	arch_local_save_flags flags			\n" +	"	.set	push						\n" +	"	.set	reorder						\n" +#ifdef CONFIG_MIPS_MT_SMTC +	"	mfc0	\\flags, $2, 1					\n" +#else +	"	mfc0	\\flags, $12					\n" +#endif +	"	.set	pop						\n" +	"	.endm							\n"); + +static inline unsigned long arch_local_save_flags(void) +{ +	unsigned long flags; +	asm volatile("arch_local_save_flags %0" : "=r" (flags)); +	return flags; +} +  static inline int arch_irqs_disabled_flags(unsigned long flags)  { @@ -245,7 +188,7 @@ static inline int arch_irqs_disabled_flags(unsigned long flags)  #endif  } -#endif +#endif /* #ifndef __ASSEMBLY__ */  /*   * Do the CPU's IRQ-state tracing from assembly code. diff --git a/arch/mips/include/asm/pgtable-64.h b/arch/mips/include/asm/pgtable-64.h index c26e1825007..f5b521d5a67 100644 --- a/arch/mips/include/asm/pgtable-64.h +++ b/arch/mips/include/asm/pgtable-64.h @@ -9,6 +9,7 @@  #ifndef _ASM_PGTABLE_64_H  #define _ASM_PGTABLE_64_H +#include <linux/compiler.h>  #include <linux/linkage.h>  #include <asm/addrspace.h> @@ -172,7 +173,19 @@ static inline int pmd_none(pmd_t pmd)  	return pmd_val(pmd) == (unsigned long) invalid_pte_table;  } -#define pmd_bad(pmd)		(pmd_val(pmd) & ~PAGE_MASK) +static inline int pmd_bad(pmd_t pmd) +{ +#ifdef CONFIG_HUGETLB_PAGE +	/* pmd_huge(pmd) but inline */ +	if (unlikely(pmd_val(pmd) & _PAGE_HUGE)) +		return 0; +#endif + +	if (unlikely(pmd_val(pmd) & ~PAGE_MASK)) +		return 1; + +	return 0; +}  static inline int pmd_present(pmd_t pmd)  { diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h index 8debe9e9175..18806a52061 100644 --- a/arch/mips/include/asm/thread_info.h +++ b/arch/mips/include/asm/thread_info.h @@ -112,12 +112,6 @@ register struct thread_info *__current_thread_info __asm__("$28");  #define TIF_LOAD_WATCH		25	/* If set, load watch registers */  #define TIF_SYSCALL_TRACE	31	/* syscall trace active */ -#ifdef CONFIG_MIPS32_O32 -#define TIF_32BIT TIF_32BIT_REGS -#elif defined(CONFIG_MIPS32_N32) -#define TIF_32BIT _TIF_32BIT_ADDR -#endif /* CONFIG_MIPS32_O32 */ -  #define _TIF_SYSCALL_TRACE	(1<<TIF_SYSCALL_TRACE)  #define _TIF_SIGPENDING		(1<<TIF_SIGPENDING)  #define _TIF_NEED_RESCHED	(1<<TIF_NEED_RESCHED) diff --git a/arch/mips/jz4740/serial.h b/arch/mips/jz4740/serial.h index b9fe3ade028..8eb715bb1ea 100644 --- a/arch/mips/jz4740/serial.h +++ b/arch/mips/jz4740/serial.h @@ -14,6 +14,9 @@   */  #ifndef __MIPS_JZ4740_SERIAL_H__ +#define __MIPS_JZ4740_SERIAL_H__ + +struct uart_port;  void jz4740_serial_out(struct uart_port *p, int offset, int value); diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c index a53f8ec37aa..290dc6a1d7a 100644 --- a/arch/mips/kernel/setup.c +++ b/arch/mips/kernel/setup.c @@ -79,7 +79,7 @@ static struct resource data_resource = { .name = "Kernel data", };  void __init add_memory_region(phys_t start, phys_t size, long type)  {  	int x = boot_mem_map.nr_map; -	struct boot_mem_map_entry *prev = boot_mem_map.map + x - 1; +	int i;  	/* Sanity check */  	if (start + size < start) { @@ -88,15 +88,29 @@ void __init add_memory_region(phys_t start, phys_t size, long type)  	}  	/* -	 * Try to merge with previous entry if any.  This is far less than -	 * perfect but is sufficient for most real world cases. +	 * Try to merge with existing entry, if any.  	 */ -	if (x && prev->addr + prev->size == start && prev->type == type) { -		prev->size += size; +	for (i = 0; i < boot_mem_map.nr_map; i++) { +		struct boot_mem_map_entry *entry = boot_mem_map.map + i; +		unsigned long top; + +		if (entry->type != type) +			continue; + +		if (start + size < entry->addr) +			continue;			/* no overlap */ + +		if (entry->addr + entry->size < start) +			continue;			/* no overlap */ + +		top = max(entry->addr + entry->size, start + size); +		entry->addr = min(entry->addr, start); +		entry->size = top - entry->addr; +  		return;  	} -	if (x == BOOT_MEM_MAP_MAX) { +	if (boot_mem_map.nr_map == BOOT_MEM_MAP_MAX) {  		pr_err("Ooops! Too many entries in the memory map!\n");  		return;  	} diff --git a/arch/mips/kernel/smp-cmp.c b/arch/mips/kernel/smp-cmp.c index afc379ca375..06cd0c610f4 100644 --- a/arch/mips/kernel/smp-cmp.c +++ b/arch/mips/kernel/smp-cmp.c @@ -97,7 +97,7 @@ static void cmp_init_secondary(void)  	/* Enable per-cpu interrupts: platform specific */ -	c->core = (read_c0_ebase() >> 1) & 0xff; +	c->core = (read_c0_ebase() >> 1) & 0x1ff;  #if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_MIPS_MT_SMTC)  	c->vpe_id = (read_c0_tcbind() >> TCBIND_CURVPE_SHIFT) & TCBIND_CURVPE;  #endif diff --git a/arch/mips/lib/Makefile b/arch/mips/lib/Makefile index c4a82e841c7..eeddc58802e 100644 --- a/arch/mips/lib/Makefile +++ b/arch/mips/lib/Makefile @@ -2,8 +2,9 @@  # Makefile for MIPS-specific library files..  # -lib-y	+= csum_partial.o delay.o memcpy.o memset.o \ -	   strlen_user.o strncpy_user.o strnlen_user.o uncached.o +lib-y	+= bitops.o csum_partial.o delay.o memcpy.o memset.o \ +	   mips-atomic.o strlen_user.o strncpy_user.o \ +	   strnlen_user.o uncached.o  obj-y			+= iomap.o  obj-$(CONFIG_PCI)	+= iomap-pci.o diff --git a/arch/mips/lib/bitops.c b/arch/mips/lib/bitops.c new file mode 100644 index 00000000000..239a9c957b0 --- /dev/null +++ b/arch/mips/lib/bitops.c @@ -0,0 +1,179 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License.  See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (c) 1994-1997, 99, 2000, 06, 07 Ralf Baechle (ralf@linux-mips.org) + * Copyright (c) 1999, 2000  Silicon Graphics, Inc. + */ +#include <linux/bitops.h> +#include <linux/irqflags.h> +#include <linux/export.h> + + +/** + * __mips_set_bit - Atomically set a bit in memory.  This is called by + * set_bit() if it cannot find a faster solution. + * @nr: the bit to set + * @addr: the address to start counting from + */ +void __mips_set_bit(unsigned long nr, volatile unsigned long *addr) +{ +	volatile unsigned long *a = addr; +	unsigned bit = nr & SZLONG_MASK; +	unsigned long mask; +	unsigned long flags; + +	a += nr >> SZLONG_LOG; +	mask = 1UL << bit; +	raw_local_irq_save(flags); +	*a |= mask; +	raw_local_irq_restore(flags); +} +EXPORT_SYMBOL(__mips_set_bit); + + +/** + * __mips_clear_bit - Clears a bit in memory.  This is called by clear_bit() if + * it cannot find a faster solution. + * @nr: Bit to clear + * @addr: Address to start counting from + */ +void __mips_clear_bit(unsigned long nr, volatile unsigned long *addr) +{ +	volatile unsigned long *a = addr; +	unsigned bit = nr & SZLONG_MASK; +	unsigned long mask; +	unsigned long flags; + +	a += nr >> SZLONG_LOG; +	mask = 1UL << bit; +	raw_local_irq_save(flags); +	*a &= ~mask; +	raw_local_irq_restore(flags); +} +EXPORT_SYMBOL(__mips_clear_bit); + + +/** + * __mips_change_bit - Toggle a bit in memory.  This is called by change_bit() + * if it cannot find a faster solution. + * @nr: Bit to change + * @addr: Address to start counting from + */ +void __mips_change_bit(unsigned long nr, volatile unsigned long *addr) +{ +	volatile unsigned long *a = addr; +	unsigned bit = nr & SZLONG_MASK; +	unsigned long mask; +	unsigned long flags; + +	a += nr >> SZLONG_LOG; +	mask = 1UL << bit; +	raw_local_irq_save(flags); +	*a ^= mask; +	raw_local_irq_restore(flags); +} +EXPORT_SYMBOL(__mips_change_bit); + + +/** + * __mips_test_and_set_bit - Set a bit and return its old value.  This is + * called by test_and_set_bit() if it cannot find a faster solution. + * @nr: Bit to set + * @addr: Address to count from + */ +int __mips_test_and_set_bit(unsigned long nr, +			    volatile unsigned long *addr) +{ +	volatile unsigned long *a = addr; +	unsigned bit = nr & SZLONG_MASK; +	unsigned long mask; +	unsigned long flags; +	unsigned long res; + +	a += nr >> SZLONG_LOG; +	mask = 1UL << bit; +	raw_local_irq_save(flags); +	res = (mask & *a); +	*a |= mask; +	raw_local_irq_restore(flags); +	return res; +} +EXPORT_SYMBOL(__mips_test_and_set_bit); + + +/** + * __mips_test_and_set_bit_lock - Set a bit and return its old value.  This is + * called by test_and_set_bit_lock() if it cannot find a faster solution. + * @nr: Bit to set + * @addr: Address to count from + */ +int __mips_test_and_set_bit_lock(unsigned long nr, +				 volatile unsigned long *addr) +{ +	volatile unsigned long *a = addr; +	unsigned bit = nr & SZLONG_MASK; +	unsigned long mask; +	unsigned long flags; +	unsigned long res; + +	a += nr >> SZLONG_LOG; +	mask = 1UL << bit; +	raw_local_irq_save(flags); +	res = (mask & *a); +	*a |= mask; +	raw_local_irq_restore(flags); +	return res; +} +EXPORT_SYMBOL(__mips_test_and_set_bit_lock); + + +/** + * __mips_test_and_clear_bit - Clear a bit and return its old value.  This is + * called by test_and_clear_bit() if it cannot find a faster solution. + * @nr: Bit to clear + * @addr: Address to count from + */ +int __mips_test_and_clear_bit(unsigned long nr, volatile unsigned long *addr) +{ +	volatile unsigned long *a = addr; +	unsigned bit = nr & SZLONG_MASK; +	unsigned long mask; +	unsigned long flags; +	unsigned long res; + +	a += nr >> SZLONG_LOG; +	mask = 1UL << bit; +	raw_local_irq_save(flags); +	res = (mask & *a); +	*a &= ~mask; +	raw_local_irq_restore(flags); +	return res; +} +EXPORT_SYMBOL(__mips_test_and_clear_bit); + + +/** + * __mips_test_and_change_bit - Change a bit and return its old value.  This is + * called by test_and_change_bit() if it cannot find a faster solution. + * @nr: Bit to change + * @addr: Address to count from + */ +int __mips_test_and_change_bit(unsigned long nr, volatile unsigned long *addr) +{ +	volatile unsigned long *a = addr; +	unsigned bit = nr & SZLONG_MASK; +	unsigned long mask; +	unsigned long flags; +	unsigned long res; + +	a += nr >> SZLONG_LOG; +	mask = 1UL << bit; +	raw_local_irq_save(flags); +	res = (mask & *a); +	*a ^= mask; +	raw_local_irq_restore(flags); +	return res; +} +EXPORT_SYMBOL(__mips_test_and_change_bit); diff --git a/arch/mips/lib/delay.c b/arch/mips/lib/delay.c index 5995969e8c4..dc81ca8dc0d 100644 --- a/arch/mips/lib/delay.c +++ b/arch/mips/lib/delay.c @@ -15,13 +15,17 @@  #include <asm/compiler.h>  #include <asm/war.h> -inline void __delay(unsigned int loops) +void __delay(unsigned long loops)  {  	__asm__ __volatile__ (  	"	.set	noreorder				\n"  	"	.align	3					\n"  	"1:	bnez	%0, 1b					\n" +#if __SIZEOF_LONG__ == 4  	"	subu	%0, 1					\n" +#else +	"	dsubu	%0, 1					\n" +#endif  	"	.set	reorder					\n"  	: "=r" (loops)  	: "0" (loops)); diff --git a/arch/mips/lib/dump_tlb.c b/arch/mips/lib/dump_tlb.c index 3f69725556a..a99c1d3fc56 100644 --- a/arch/mips/lib/dump_tlb.c +++ b/arch/mips/lib/dump_tlb.c @@ -50,8 +50,9 @@ static void dump_tlb(int first, int last)  {  	unsigned long s_entryhi, entryhi, asid;  	unsigned long long entrylo0, entrylo1; -	unsigned int s_index, pagemask, c0, c1, i; +	unsigned int s_index, s_pagemask, pagemask, c0, c1, i; +	s_pagemask = read_c0_pagemask();  	s_entryhi = read_c0_entryhi();  	s_index = read_c0_index();  	asid = s_entryhi & 0xff; @@ -103,6 +104,7 @@ static void dump_tlb(int first, int last)  	write_c0_entryhi(s_entryhi);  	write_c0_index(s_index); +	write_c0_pagemask(s_pagemask);  }  void dump_tlb_all(void) diff --git a/arch/mips/lib/mips-atomic.c b/arch/mips/lib/mips-atomic.c new file mode 100644 index 00000000000..cd160be3ce4 --- /dev/null +++ b/arch/mips/lib/mips-atomic.c @@ -0,0 +1,176 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License.  See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 1994, 95, 96, 97, 98, 99, 2003 by Ralf Baechle + * Copyright (C) 1996 by Paul M. Antoine + * Copyright (C) 1999 Silicon Graphics + * Copyright (C) 2000 MIPS Technologies, Inc. + */ +#include <asm/irqflags.h> +#include <asm/hazards.h> +#include <linux/compiler.h> +#include <linux/preempt.h> +#include <linux/export.h> + +#if !defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_MIPS_MT_SMTC) + +/* + * For cli() we have to insert nops to make sure that the new value + * has actually arrived in the status register before the end of this + * macro. + * R4000/R4400 need three nops, the R4600 two nops and the R10000 needs + * no nops at all. + */ +/* + * For TX49, operating only IE bit is not enough. + * + * If mfc0 $12 follows store and the mfc0 is last instruction of a + * page and fetching the next instruction causes TLB miss, the result + * of the mfc0 might wrongly contain EXL bit. + * + * ERT-TX49H2-027, ERT-TX49H3-012, ERT-TX49HL3-006, ERT-TX49H4-008 + * + * Workaround: mask EXL bit of the result or place a nop before mfc0. + */ +__asm__( +	"	.macro	arch_local_irq_disable\n" +	"	.set	push						\n" +	"	.set	noat						\n" +#ifdef CONFIG_MIPS_MT_SMTC +	"	mfc0	$1, $2, 1					\n" +	"	ori	$1, 0x400					\n" +	"	.set	noreorder					\n" +	"	mtc0	$1, $2, 1					\n" +#elif defined(CONFIG_CPU_MIPSR2) +	/* see irqflags.h for inline function */ +#else +	"	mfc0	$1,$12						\n" +	"	ori	$1,0x1f						\n" +	"	xori	$1,0x1f						\n" +	"	.set	noreorder					\n" +	"	mtc0	$1,$12						\n" +#endif +	"	irq_disable_hazard					\n" +	"	.set	pop						\n" +	"	.endm							\n"); + +notrace void arch_local_irq_disable(void) +{ +	preempt_disable(); +	__asm__ __volatile__( +		"arch_local_irq_disable" +		: /* no outputs */ +		: /* no inputs */ +		: "memory"); +	preempt_enable(); +} +EXPORT_SYMBOL(arch_local_irq_disable); + + +__asm__( +	"	.macro	arch_local_irq_save result			\n" +	"	.set	push						\n" +	"	.set	reorder						\n" +	"	.set	noat						\n" +#ifdef CONFIG_MIPS_MT_SMTC +	"	mfc0	\\result, $2, 1					\n" +	"	ori	$1, \\result, 0x400				\n" +	"	.set	noreorder					\n" +	"	mtc0	$1, $2, 1					\n" +	"	andi	\\result, \\result, 0x400			\n" +#elif defined(CONFIG_CPU_MIPSR2) +	/* see irqflags.h for inline function */ +#else +	"	mfc0	\\result, $12					\n" +	"	ori	$1, \\result, 0x1f				\n" +	"	xori	$1, 0x1f					\n" +	"	.set	noreorder					\n" +	"	mtc0	$1, $12						\n" +#endif +	"	irq_disable_hazard					\n" +	"	.set	pop						\n" +	"	.endm							\n"); + +notrace unsigned long arch_local_irq_save(void) +{ +	unsigned long flags; +	preempt_disable(); +	asm volatile("arch_local_irq_save\t%0" +		     : "=r" (flags) +		     : /* no inputs */ +		     : "memory"); +	preempt_enable(); +	return flags; +} +EXPORT_SYMBOL(arch_local_irq_save); + + +__asm__( +	"	.macro	arch_local_irq_restore flags			\n" +	"	.set	push						\n" +	"	.set	noreorder					\n" +	"	.set	noat						\n" +#ifdef CONFIG_MIPS_MT_SMTC +	"mfc0	$1, $2, 1						\n" +	"andi	\\flags, 0x400						\n" +	"ori	$1, 0x400						\n" +	"xori	$1, 0x400						\n" +	"or	\\flags, $1						\n" +	"mtc0	\\flags, $2, 1						\n" +#elif defined(CONFIG_CPU_MIPSR2) && defined(CONFIG_IRQ_CPU) +	/* see irqflags.h for inline function */ +#elif defined(CONFIG_CPU_MIPSR2) +	/* see irqflags.h for inline function */ +#else +	"	mfc0	$1, $12						\n" +	"	andi	\\flags, 1					\n" +	"	ori	$1, 0x1f					\n" +	"	xori	$1, 0x1f					\n" +	"	or	\\flags, $1					\n" +	"	mtc0	\\flags, $12					\n" +#endif +	"	irq_disable_hazard					\n" +	"	.set	pop						\n" +	"	.endm							\n"); + +notrace void arch_local_irq_restore(unsigned long flags) +{ +	unsigned long __tmp1; + +#ifdef CONFIG_MIPS_MT_SMTC +	/* +	 * SMTC kernel needs to do a software replay of queued +	 * IPIs, at the cost of branch and call overhead on each +	 * local_irq_restore() +	 */ +	if (unlikely(!(flags & 0x0400))) +		smtc_ipi_replay(); +#endif +	preempt_disable(); +	__asm__ __volatile__( +		"arch_local_irq_restore\t%0" +		: "=r" (__tmp1) +		: "0" (flags) +		: "memory"); +	preempt_enable(); +} +EXPORT_SYMBOL(arch_local_irq_restore); + + +notrace void __arch_local_irq_restore(unsigned long flags) +{ +	unsigned long __tmp1; + +	preempt_disable(); +	__asm__ __volatile__( +		"arch_local_irq_restore\t%0" +		: "=r" (__tmp1) +		: "0" (flags) +		: "memory"); +	preempt_enable(); +} +EXPORT_SYMBOL(__arch_local_irq_restore); + +#endif /* !defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_MIPS_MT_SMTC) */ diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c index 87b9cfcc30f..4b9b935a070 100644 --- a/arch/mips/mm/tlb-r4k.c +++ b/arch/mips/mm/tlb-r4k.c @@ -320,6 +320,7 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)  			tlb_write_random();  		else  			tlb_write_indexed(); +		tlbw_use_hazard();  		write_c0_pagemask(PM_DEFAULT_MASK);  	} else  #endif diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c index 658a520364c..2833dcb67b5 100644 --- a/arch/mips/mm/tlbex.c +++ b/arch/mips/mm/tlbex.c @@ -148,8 +148,8 @@ enum label_id {  	label_leave,  	label_vmalloc,  	label_vmalloc_done, -	label_tlbw_hazard, -	label_split, +	label_tlbw_hazard_0, +	label_split = label_tlbw_hazard_0 + 8,  	label_tlbl_goaround1,  	label_tlbl_goaround2,  	label_nopage_tlbl, @@ -167,7 +167,7 @@ UASM_L_LA(_second_part)  UASM_L_LA(_leave)  UASM_L_LA(_vmalloc)  UASM_L_LA(_vmalloc_done) -UASM_L_LA(_tlbw_hazard) +/* _tlbw_hazard_x is handled differently.  */  UASM_L_LA(_split)  UASM_L_LA(_tlbl_goaround1)  UASM_L_LA(_tlbl_goaround2) @@ -181,6 +181,30 @@ UASM_L_LA(_large_segbits_fault)  UASM_L_LA(_tlb_huge_update)  #endif +static int __cpuinitdata hazard_instance; + +static void uasm_bgezl_hazard(u32 **p, struct uasm_reloc **r, int instance) +{ +	switch (instance) { +	case 0 ... 7: +		uasm_il_bgezl(p, r, 0, label_tlbw_hazard_0 + instance); +		return; +	default: +		BUG(); +	} +} + +static void uasm_bgezl_label(struct uasm_label **l, u32 **p, int instance) +{ +	switch (instance) { +	case 0 ... 7: +		uasm_build_label(l, *p, label_tlbw_hazard_0 + instance); +		break; +	default: +		BUG(); +	} +} +  /*   * For debug purposes.   */ @@ -478,21 +502,28 @@ static void __cpuinit build_tlb_write_entry(u32 **p, struct uasm_label **l,  		 * This branch uses up a mtc0 hazard nop slot and saves  		 * two nops after the tlbw instruction.  		 */ -		uasm_il_bgezl(p, r, 0, label_tlbw_hazard); +		uasm_bgezl_hazard(p, r, hazard_instance);  		tlbw(p); -		uasm_l_tlbw_hazard(l, *p); +		uasm_bgezl_label(l, p, hazard_instance); +		hazard_instance++;  		uasm_i_nop(p);  		break;  	case CPU_R4600:  	case CPU_R4700: -	case CPU_R5000: -	case CPU_R5000A:  		uasm_i_nop(p);  		tlbw(p);  		uasm_i_nop(p);  		break; +	case CPU_R5000: +	case CPU_R5000A: +	case CPU_NEVADA: +		uasm_i_nop(p); /* QED specifies 2 nops hazard */ +		uasm_i_nop(p); /* QED specifies 2 nops hazard */ +		tlbw(p); +		break; +  	case CPU_R4300:  	case CPU_5KC:  	case CPU_TX49XX: @@ -526,17 +557,6 @@ static void __cpuinit build_tlb_write_entry(u32 **p, struct uasm_label **l,  		tlbw(p);  		break; -	case CPU_NEVADA: -		uasm_i_nop(p); /* QED specifies 2 nops hazard */ -		/* -		 * This branch uses up a mtc0 hazard nop slot and saves -		 * a nop after the tlbw instruction. -		 */ -		uasm_il_bgezl(p, r, 0, label_tlbw_hazard); -		tlbw(p); -		uasm_l_tlbw_hazard(l, *p); -		break; -  	case CPU_RM7000:  		uasm_i_nop(p);  		uasm_i_nop(p); diff --git a/arch/mips/mti-malta/malta-platform.c b/arch/mips/mti-malta/malta-platform.c index 80562b81f0f..74732177851 100644 --- a/arch/mips/mti-malta/malta-platform.c +++ b/arch/mips/mti-malta/malta-platform.c @@ -29,6 +29,7 @@  #include <linux/mtd/partitions.h>  #include <linux/mtd/physmap.h>  #include <linux/platform_device.h> +#include <asm/mips-boards/maltaint.h>  #include <mtd/mtd-abi.h>  #define SMC_PORT(base, int)						\ @@ -48,7 +49,7 @@ static struct plat_serial8250_port uart8250_data[] = {  	SMC_PORT(0x2F8, 3),  	{  		.mapbase	= 0x1f000900,	/* The CBUS UART */ -		.irq		= MIPS_CPU_IRQ_BASE + 2, +		.irq		= MIPS_CPU_IRQ_BASE + MIPSCPU_INT_MB2,  		.uartclk	= 3686400,	/* Twice the usual clk! */  		.iotype		= UPIO_MEM32,  		.flags		= CBUS_UART_FLAGS,  |