diff options
Diffstat (limited to 'arch')
45 files changed, 354 insertions, 1303 deletions
diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug index f15f82bf3a5..e968a52e488 100644 --- a/arch/arm/Kconfig.debug +++ b/arch/arm/Kconfig.debug @@ -356,15 +356,15 @@ choice  		  is nothing connected to read from the DCC.  	config DEBUG_SEMIHOSTING -		bool "Kernel low-level debug output via semihosting I" +		bool "Kernel low-level debug output via semihosting I/O"  		help  		  Semihosting enables code running on an ARM target to use  		  the I/O facilities on a host debugger/emulator through a -		  simple SVC calls. The host debugger or emulator must have +		  simple SVC call. The host debugger or emulator must have  		  semihosting enabled for the special svc call to be trapped  		  otherwise the kernel will crash. -		  This is known to work with OpenOCD, as wellas +		  This is known to work with OpenOCD, as well as  		  ARM's Fast Models, or any other controlling environment  		  that implements semihosting. diff --git a/arch/arm/Makefile b/arch/arm/Makefile index 210c923025b..74381a31ee4 100644 --- a/arch/arm/Makefile +++ b/arch/arm/Makefile @@ -283,10 +283,10 @@ zImage Image xipImage bootpImage uImage: vmlinux  zinstall uinstall install: vmlinux  	$(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $@ -%.dtb: +%.dtb: scripts  	$(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $(boot)/$@ -dtbs: +dtbs: scripts  	$(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $(boot)/$@  # We use MRPROPER_FILES and CLEAN_FILES now diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S index b8c64b80baf..81769c1341f 100644 --- a/arch/arm/boot/compressed/head.S +++ b/arch/arm/boot/compressed/head.S @@ -659,10 +659,14 @@ __armv7_mmu_cache_on:  #ifdef CONFIG_CPU_ENDIAN_BE8  		orr	r0, r0, #1 << 25	@ big-endian page tables  #endif +		mrcne   p15, 0, r6, c2, c0, 2   @ read ttb control reg  		orrne	r0, r0, #1		@ MMU enabled  		movne	r1, #0xfffffffd		@ domain 0 = client +		bic     r6, r6, #1 << 31        @ 32-bit translation system +		bic     r6, r6, #3 << 0         @ use only ttbr0  		mcrne	p15, 0, r3, c2, c0, 0	@ load page table pointer  		mcrne	p15, 0, r1, c3, c0, 0	@ load domain access control +		mcrne   p15, 0, r6, c2, c0, 2   @ load ttb control  #endif  		mcr	p15, 0, r0, c7, c5, 4	@ ISB  		mcr	p15, 0, r0, c1, c0, 0	@ load control register diff --git a/arch/arm/configs/tegra_defconfig b/arch/arm/configs/tegra_defconfig index db2245353f0..0d6bb738c6d 100644 --- a/arch/arm/configs/tegra_defconfig +++ b/arch/arm/configs/tegra_defconfig @@ -145,6 +145,8 @@ CONFIG_MMC_SDHCI_TEGRA=y  CONFIG_RTC_CLASS=y  CONFIG_RTC_DRV_EM3027=y  CONFIG_RTC_DRV_TEGRA=y +CONFIG_DMADEVICES=y +CONFIG_TEGRA20_APB_DMA=y  CONFIG_STAGING=y  CONFIG_SENSORS_ISL29018=y  CONFIG_SENSORS_ISL29028=y diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h index 03fb93621d0..5c8b3bf4d82 100644 --- a/arch/arm/include/asm/assembler.h +++ b/arch/arm/include/asm/assembler.h @@ -320,4 +320,12 @@  	.size \name , . - \name  	.endm +	.macro check_uaccess, addr:req, size:req, limit:req, tmp:req, bad:req +#ifndef CONFIG_CPU_USE_DOMAINS +	adds	\tmp, \addr, #\size - 1 +	sbcccs	\tmp, \tmp, \limit +	bcs	\bad +#endif +	.endm +  #endif /* __ASM_ASSEMBLER_H__ */ diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h index e965f1b560f..5f6ddcc5645 100644 --- a/arch/arm/include/asm/memory.h +++ b/arch/arm/include/asm/memory.h @@ -187,6 +187,7 @@ static inline unsigned long __phys_to_virt(unsigned long x)  #define __phys_to_virt(x)	((x) - PHYS_OFFSET + PAGE_OFFSET)  #endif  #endif +#endif /* __ASSEMBLY__ */  #ifndef PHYS_OFFSET  #ifdef PLAT_PHYS_OFFSET @@ -196,6 +197,8 @@ static inline unsigned long __phys_to_virt(unsigned long x)  #endif  #endif +#ifndef __ASSEMBLY__ +  /*   * PFNs are used to describe any physical page; this means   * PFN 0 == physical address 0. diff --git a/arch/arm/include/asm/tlb.h b/arch/arm/include/asm/tlb.h index 314d4664eae..99a19512ee2 100644 --- a/arch/arm/include/asm/tlb.h +++ b/arch/arm/include/asm/tlb.h @@ -199,6 +199,9 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,  {  	pgtable_page_dtor(pte); +#ifdef CONFIG_ARM_LPAE +	tlb_add_flush(tlb, addr); +#else  	/*  	 * With the classic ARM MMU, a pte page has two corresponding pmd  	 * entries, each covering 1MB. @@ -206,6 +209,7 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,  	addr &= PMD_MASK;  	tlb_add_flush(tlb, addr + SZ_1M - PAGE_SIZE);  	tlb_add_flush(tlb, addr + SZ_1M); +#endif  	tlb_remove_page(tlb, pte);  } diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h index 479a6352e0b..77bd79f2ffd 100644 --- a/arch/arm/include/asm/uaccess.h +++ b/arch/arm/include/asm/uaccess.h @@ -101,28 +101,39 @@ extern int __get_user_1(void *);  extern int __get_user_2(void *);  extern int __get_user_4(void *); -#define __get_user_x(__r2,__p,__e,__s,__i...)				\ +#define __GUP_CLOBBER_1	"lr", "cc" +#ifdef CONFIG_CPU_USE_DOMAINS +#define __GUP_CLOBBER_2	"ip", "lr", "cc" +#else +#define __GUP_CLOBBER_2 "lr", "cc" +#endif +#define __GUP_CLOBBER_4	"lr", "cc" + +#define __get_user_x(__r2,__p,__e,__l,__s)				\  	   __asm__ __volatile__ (					\  		__asmeq("%0", "r0") __asmeq("%1", "r2")			\ +		__asmeq("%3", "r1")					\  		"bl	__get_user_" #__s				\  		: "=&r" (__e), "=r" (__r2)				\ -		: "0" (__p)						\ -		: __i, "cc") +		: "0" (__p), "r" (__l)					\ +		: __GUP_CLOBBER_##__s) -#define get_user(x,p)							\ +#define __get_user_check(x,p)							\  	({								\ +		unsigned long __limit = current_thread_info()->addr_limit - 1; \  		register const typeof(*(p)) __user *__p asm("r0") = (p);\  		register unsigned long __r2 asm("r2");			\ +		register unsigned long __l asm("r1") = __limit;		\  		register int __e asm("r0");				\  		switch (sizeof(*(__p))) {				\  		case 1:							\ -			__get_user_x(__r2, __p, __e, 1, "lr");		\ -	       		break;						\ +			__get_user_x(__r2, __p, __e, __l, 1);		\ +			break;						\  		case 2:							\ -			__get_user_x(__r2, __p, __e, 2, "r3", "lr");	\ +			__get_user_x(__r2, __p, __e, __l, 2);		\  			break;						\  		case 4:							\ -	       		__get_user_x(__r2, __p, __e, 4, "lr");		\ +			__get_user_x(__r2, __p, __e, __l, 4);		\  			break;						\  		default: __e = __get_user_bad(); break;			\  		}							\ @@ -130,42 +141,57 @@ extern int __get_user_4(void *);  		__e;							\  	}) +#define get_user(x,p)							\ +	({								\ +		might_fault();						\ +		__get_user_check(x,p);					\ +	 }) +  extern int __put_user_1(void *, unsigned int);  extern int __put_user_2(void *, unsigned int);  extern int __put_user_4(void *, unsigned int);  extern int __put_user_8(void *, unsigned long long); -#define __put_user_x(__r2,__p,__e,__s)					\ +#define __put_user_x(__r2,__p,__e,__l,__s)				\  	   __asm__ __volatile__ (					\  		__asmeq("%0", "r0") __asmeq("%2", "r2")			\ +		__asmeq("%3", "r1")					\  		"bl	__put_user_" #__s				\  		: "=&r" (__e)						\ -		: "0" (__p), "r" (__r2)					\ +		: "0" (__p), "r" (__r2), "r" (__l)			\  		: "ip", "lr", "cc") -#define put_user(x,p)							\ +#define __put_user_check(x,p)							\  	({								\ +		unsigned long __limit = current_thread_info()->addr_limit - 1; \  		register const typeof(*(p)) __r2 asm("r2") = (x);	\  		register const typeof(*(p)) __user *__p asm("r0") = (p);\ +		register unsigned long __l asm("r1") = __limit;		\  		register int __e asm("r0");				\  		switch (sizeof(*(__p))) {				\  		case 1:							\ -			__put_user_x(__r2, __p, __e, 1);		\ +			__put_user_x(__r2, __p, __e, __l, 1);		\  			break;						\  		case 2:							\ -			__put_user_x(__r2, __p, __e, 2);		\ +			__put_user_x(__r2, __p, __e, __l, 2);		\  			break;						\  		case 4:							\ -			__put_user_x(__r2, __p, __e, 4);		\ +			__put_user_x(__r2, __p, __e, __l, 4);		\  			break;						\  		case 8:							\ -			__put_user_x(__r2, __p, __e, 8);		\ +			__put_user_x(__r2, __p, __e, __l, 8);		\  			break;						\  		default: __e = __put_user_bad(); break;			\  		}							\  		__e;							\  	}) +#define put_user(x,p)							\ +	({								\ +		might_fault();						\ +		__put_user_check(x,p);					\ +	 }) +  #else /* CONFIG_MMU */  /* @@ -219,6 +245,7 @@ do {									\  	unsigned long __gu_addr = (unsigned long)(ptr);			\  	unsigned long __gu_val;						\  	__chk_user_ptr(ptr);						\ +	might_fault();							\  	switch (sizeof(*(ptr))) {					\  	case 1:	__get_user_asm_byte(__gu_val,__gu_addr,err);	break;	\  	case 2:	__get_user_asm_half(__gu_val,__gu_addr,err);	break;	\ @@ -300,6 +327,7 @@ do {									\  	unsigned long __pu_addr = (unsigned long)(ptr);			\  	__typeof__(*(ptr)) __pu_val = (x);				\  	__chk_user_ptr(ptr);						\ +	might_fault();							\  	switch (sizeof(*(ptr))) {					\  	case 1: __put_user_asm_byte(__pu_val,__pu_addr,err);	break;	\  	case 2: __put_user_asm_half(__pu_val,__pu_addr,err);	break;	\ diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c index ba386bd9410..281bf330124 100644 --- a/arch/arm/kernel/hw_breakpoint.c +++ b/arch/arm/kernel/hw_breakpoint.c @@ -159,6 +159,12 @@ static int debug_arch_supported(void)  		arch >= ARM_DEBUG_ARCH_V7_1;  } +/* Can we determine the watchpoint access type from the fsr? */ +static int debug_exception_updates_fsr(void) +{ +	return 0; +} +  /* Determine number of WRP registers available. */  static int get_num_wrp_resources(void)  { @@ -604,13 +610,14 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp)  		/* Aligned */  		break;  	case 1: -		/* Allow single byte watchpoint. */ -		if (info->ctrl.len == ARM_BREAKPOINT_LEN_1) -			break;  	case 2:  		/* Allow halfword watchpoints and breakpoints. */  		if (info->ctrl.len == ARM_BREAKPOINT_LEN_2)  			break; +	case 3: +		/* Allow single byte watchpoint. */ +		if (info->ctrl.len == ARM_BREAKPOINT_LEN_1) +			break;  	default:  		ret = -EINVAL;  		goto out; @@ -619,18 +626,35 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp)  	info->address &= ~alignment_mask;  	info->ctrl.len <<= offset; -	/* -	 * Currently we rely on an overflow handler to take -	 * care of single-stepping the breakpoint when it fires. -	 * In the case of userspace breakpoints on a core with V7 debug, -	 * we can use the mismatch feature as a poor-man's hardware -	 * single-step, but this only works for per-task breakpoints. -	 */ -	if (!bp->overflow_handler && (arch_check_bp_in_kernelspace(bp) || -	    !core_has_mismatch_brps() || !bp->hw.bp_target)) { -		pr_warning("overflow handler required but none found\n"); -		ret = -EINVAL; +	if (!bp->overflow_handler) { +		/* +		 * Mismatch breakpoints are required for single-stepping +		 * breakpoints. +		 */ +		if (!core_has_mismatch_brps()) +			return -EINVAL; + +		/* We don't allow mismatch breakpoints in kernel space. */ +		if (arch_check_bp_in_kernelspace(bp)) +			return -EPERM; + +		/* +		 * Per-cpu breakpoints are not supported by our stepping +		 * mechanism. +		 */ +		if (!bp->hw.bp_target) +			return -EINVAL; + +		/* +		 * We only support specific access types if the fsr +		 * reports them. +		 */ +		if (!debug_exception_updates_fsr() && +		    (info->ctrl.type == ARM_BREAKPOINT_LOAD || +		     info->ctrl.type == ARM_BREAKPOINT_STORE)) +			return -EINVAL;  	} +  out:  	return ret;  } @@ -706,10 +730,12 @@ static void watchpoint_handler(unsigned long addr, unsigned int fsr,  				goto unlock;  			/* Check that the access type matches. */ -			access = (fsr & ARM_FSR_ACCESS_MASK) ? HW_BREAKPOINT_W : -				 HW_BREAKPOINT_R; -			if (!(access & hw_breakpoint_type(wp))) -				goto unlock; +			if (debug_exception_updates_fsr()) { +				access = (fsr & ARM_FSR_ACCESS_MASK) ? +					  HW_BREAKPOINT_W : HW_BREAKPOINT_R; +				if (!(access & hw_breakpoint_type(wp))) +					goto unlock; +			}  			/* We have a winner. */  			info->trigger = addr; diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c index f7945218b8c..b0179b89a04 100644 --- a/arch/arm/kernel/traps.c +++ b/arch/arm/kernel/traps.c @@ -420,20 +420,23 @@ asmlinkage void __exception do_undefinstr(struct pt_regs *regs)  #endif  			instr = *(u32 *) pc;  	} else if (thumb_mode(regs)) { -		get_user(instr, (u16 __user *)pc); +		if (get_user(instr, (u16 __user *)pc)) +			goto die_sig;  		if (is_wide_instruction(instr)) {  			unsigned int instr2; -			get_user(instr2, (u16 __user *)pc+1); +			if (get_user(instr2, (u16 __user *)pc+1)) +				goto die_sig;  			instr <<= 16;  			instr |= instr2;  		} -	} else { -		get_user(instr, (u32 __user *)pc); +	} else if (get_user(instr, (u32 __user *)pc)) { +		goto die_sig;  	}  	if (call_undef_hook(regs, instr) == 0)  		return; +die_sig:  #ifdef CONFIG_DEBUG_USER  	if (user_debug & UDBG_UNDEFINED) {  		printk(KERN_INFO "%s (%d): undefined instruction: pc=%p\n", diff --git a/arch/arm/lib/delay.c b/arch/arm/lib/delay.c index d6dacc69254..395d5fbb8fa 100644 --- a/arch/arm/lib/delay.c +++ b/arch/arm/lib/delay.c @@ -59,6 +59,7 @@ void __init init_current_timer_delay(unsigned long freq)  {  	pr_info("Switching to timer-based delay loop\n");  	lpj_fine			= freq / HZ; +	loops_per_jiffy			= lpj_fine;  	arm_delay_ops.delay		= __timer_delay;  	arm_delay_ops.const_udelay	= __timer_const_udelay;  	arm_delay_ops.udelay		= __timer_udelay; diff --git a/arch/arm/lib/getuser.S b/arch/arm/lib/getuser.S index 11093a7c3e3..9b06bb41fca 100644 --- a/arch/arm/lib/getuser.S +++ b/arch/arm/lib/getuser.S @@ -16,8 +16,9 @@   * __get_user_X   *   * Inputs:	r0 contains the address + *		r1 contains the address limit, which must be preserved   * Outputs:	r0 is the error code - *		r2, r3 contains the zero-extended value + *		r2 contains the zero-extended value   *		lr corrupted   *   * No other registers must be altered.  (see <asm/uaccess.h> @@ -27,33 +28,39 @@   * Note also that it is intended that __get_user_bad is not global.   */  #include <linux/linkage.h> +#include <asm/assembler.h>  #include <asm/errno.h>  #include <asm/domain.h>  ENTRY(__get_user_1) +	check_uaccess r0, 1, r1, r2, __get_user_bad  1: TUSER(ldrb)	r2, [r0]  	mov	r0, #0  	mov	pc, lr  ENDPROC(__get_user_1)  ENTRY(__get_user_2) -#ifdef CONFIG_THUMB2_KERNEL -2: TUSER(ldrb)	r2, [r0] -3: TUSER(ldrb)	r3, [r0, #1] +	check_uaccess r0, 2, r1, r2, __get_user_bad +#ifdef CONFIG_CPU_USE_DOMAINS +rb	.req	ip +2:	ldrbt	r2, [r0], #1 +3:	ldrbt	rb, [r0], #0  #else -2: TUSER(ldrb)	r2, [r0], #1 -3: TUSER(ldrb)	r3, [r0] +rb	.req	r0 +2:	ldrb	r2, [r0] +3:	ldrb	rb, [r0, #1]  #endif  #ifndef __ARMEB__ -	orr	r2, r2, r3, lsl #8 +	orr	r2, r2, rb, lsl #8  #else -	orr	r2, r3, r2, lsl #8 +	orr	r2, rb, r2, lsl #8  #endif  	mov	r0, #0  	mov	pc, lr  ENDPROC(__get_user_2)  ENTRY(__get_user_4) +	check_uaccess r0, 4, r1, r2, __get_user_bad  4: TUSER(ldr)	r2, [r0]  	mov	r0, #0  	mov	pc, lr diff --git a/arch/arm/lib/putuser.S b/arch/arm/lib/putuser.S index 7db25990c58..3d73dcb959b 100644 --- a/arch/arm/lib/putuser.S +++ b/arch/arm/lib/putuser.S @@ -16,6 +16,7 @@   * __put_user_X   *   * Inputs:	r0 contains the address + *		r1 contains the address limit, which must be preserved   *		r2, r3 contains the value   * Outputs:	r0 is the error code   *		lr corrupted @@ -27,16 +28,19 @@   * Note also that it is intended that __put_user_bad is not global.   */  #include <linux/linkage.h> +#include <asm/assembler.h>  #include <asm/errno.h>  #include <asm/domain.h>  ENTRY(__put_user_1) +	check_uaccess r0, 1, r1, ip, __put_user_bad  1: TUSER(strb)	r2, [r0]  	mov	r0, #0  	mov	pc, lr  ENDPROC(__put_user_1)  ENTRY(__put_user_2) +	check_uaccess r0, 2, r1, ip, __put_user_bad  	mov	ip, r2, lsr #8  #ifdef CONFIG_THUMB2_KERNEL  #ifndef __ARMEB__ @@ -60,12 +64,14 @@ ENTRY(__put_user_2)  ENDPROC(__put_user_2)  ENTRY(__put_user_4) +	check_uaccess r0, 4, r1, ip, __put_user_bad  4: TUSER(str)	r2, [r0]  	mov	r0, #0  	mov	pc, lr  ENDPROC(__put_user_4)  ENTRY(__put_user_8) +	check_uaccess r0, 8, r1, ip, __put_user_bad  #ifdef CONFIG_THUMB2_KERNEL  5: TUSER(str)	r2, [r0]  6: TUSER(str)	r3, [r0, #4] diff --git a/arch/arm/mach-imx/clk-imx25.c b/arch/arm/mach-imx/clk-imx25.c index fdd8cc87c9f..4431a62fff5 100644 --- a/arch/arm/mach-imx/clk-imx25.c +++ b/arch/arm/mach-imx/clk-imx25.c @@ -222,10 +222,8 @@ int __init mx25_clocks_init(void)  	clk_register_clkdev(clk[lcdc_ipg], "ipg", "imx-fb.0");  	clk_register_clkdev(clk[lcdc_ahb], "ahb", "imx-fb.0");  	clk_register_clkdev(clk[wdt_ipg], NULL, "imx2-wdt.0"); -	clk_register_clkdev(clk[ssi1_ipg_per], "per", "imx-ssi.0"); -	clk_register_clkdev(clk[ssi1_ipg], "ipg", "imx-ssi.0"); -	clk_register_clkdev(clk[ssi2_ipg_per], "per", "imx-ssi.1"); -	clk_register_clkdev(clk[ssi2_ipg], "ipg", "imx-ssi.1"); +	clk_register_clkdev(clk[ssi1_ipg], NULL, "imx-ssi.0"); +	clk_register_clkdev(clk[ssi2_ipg], NULL, "imx-ssi.1");  	clk_register_clkdev(clk[esdhc1_ipg_per], "per", "sdhci-esdhc-imx25.0");  	clk_register_clkdev(clk[esdhc1_ipg], "ipg", "sdhci-esdhc-imx25.0");  	clk_register_clkdev(clk[esdhc1_ahb], "ahb", "sdhci-esdhc-imx25.0"); diff --git a/arch/arm/mach-imx/clk-imx35.c b/arch/arm/mach-imx/clk-imx35.c index c6422fb10ba..65fb8bcd86c 100644 --- a/arch/arm/mach-imx/clk-imx35.c +++ b/arch/arm/mach-imx/clk-imx35.c @@ -230,10 +230,8 @@ int __init mx35_clocks_init()  	clk_register_clkdev(clk[ipu_gate], NULL, "mx3_sdc_fb");  	clk_register_clkdev(clk[owire_gate], NULL, "mxc_w1");  	clk_register_clkdev(clk[sdma_gate], NULL, "imx35-sdma"); -	clk_register_clkdev(clk[ipg], "ipg", "imx-ssi.0"); -	clk_register_clkdev(clk[ssi1_div_post], "per", "imx-ssi.0"); -	clk_register_clkdev(clk[ipg], "ipg", "imx-ssi.1"); -	clk_register_clkdev(clk[ssi2_div_post], "per", "imx-ssi.1"); +	clk_register_clkdev(clk[ssi1_gate], NULL, "imx-ssi.0"); +	clk_register_clkdev(clk[ssi2_gate], NULL, "imx-ssi.1");  	/* i.mx35 has the i.mx21 type uart */  	clk_register_clkdev(clk[uart1_gate], "per", "imx21-uart.0");  	clk_register_clkdev(clk[ipg], "ipg", "imx21-uart.0"); diff --git a/arch/arm/mach-omap2/Kconfig b/arch/arm/mach-omap2/Kconfig index fcd4e85c4dd..346fd26f3aa 100644 --- a/arch/arm/mach-omap2/Kconfig +++ b/arch/arm/mach-omap2/Kconfig @@ -232,10 +232,11 @@ config MACH_OMAP3_PANDORA  	select OMAP_PACKAGE_CBB  	select REGULATOR_FIXED_VOLTAGE if REGULATOR -config MACH_OMAP3_TOUCHBOOK +config MACH_TOUCHBOOK  	bool "OMAP3 Touch Book"  	depends on ARCH_OMAP3  	default y +	select OMAP_PACKAGE_CBB  config MACH_OMAP_3430SDP  	bool "OMAP 3430 SDP board" diff --git a/arch/arm/mach-omap2/Makefile b/arch/arm/mach-omap2/Makefile index eb203ec193d..7706fdfd025 100644 --- a/arch/arm/mach-omap2/Makefile +++ b/arch/arm/mach-omap2/Makefile @@ -235,7 +235,7 @@ obj-$(CONFIG_MACH_OMAP_3630SDP)		+= board-zoom-display.o  obj-$(CONFIG_MACH_CM_T35)		+= board-cm-t35.o  obj-$(CONFIG_MACH_CM_T3517)		+= board-cm-t3517.o  obj-$(CONFIG_MACH_IGEP0020)		+= board-igep0020.o -obj-$(CONFIG_MACH_OMAP3_TOUCHBOOK)	+= board-omap3touchbook.o +obj-$(CONFIG_MACH_TOUCHBOOK)		+= board-omap3touchbook.o  obj-$(CONFIG_MACH_OMAP_4430SDP)		+= board-4430sdp.o  obj-$(CONFIG_MACH_OMAP4_PANDA)		+= board-omap4panda.o diff --git a/arch/arm/mach-omap2/clock33xx_data.c b/arch/arm/mach-omap2/clock33xx_data.c index 7aa5ecaee5a..8e06de665b1 100644 --- a/arch/arm/mach-omap2/clock33xx_data.c +++ b/arch/arm/mach-omap2/clock33xx_data.c @@ -1036,13 +1036,13 @@ static struct omap_clk am33xx_clks[] = {  	CLK(NULL,	"mmu_fck",		&mmu_fck,	CK_AM33XX),  	CLK(NULL,	"smartreflex0_fck",	&smartreflex0_fck,	CK_AM33XX),  	CLK(NULL,	"smartreflex1_fck",	&smartreflex1_fck,	CK_AM33XX), -	CLK(NULL,	"gpt1_fck",		&timer1_fck,	CK_AM33XX), -	CLK(NULL,	"gpt2_fck",		&timer2_fck,	CK_AM33XX), -	CLK(NULL,	"gpt3_fck",		&timer3_fck,	CK_AM33XX), -	CLK(NULL,	"gpt4_fck",		&timer4_fck,	CK_AM33XX), -	CLK(NULL,	"gpt5_fck",		&timer5_fck,	CK_AM33XX), -	CLK(NULL,	"gpt6_fck",		&timer6_fck,	CK_AM33XX), -	CLK(NULL,	"gpt7_fck",		&timer7_fck,	CK_AM33XX), +	CLK(NULL,	"timer1_fck",		&timer1_fck,	CK_AM33XX), +	CLK(NULL,	"timer2_fck",		&timer2_fck,	CK_AM33XX), +	CLK(NULL,	"timer3_fck",		&timer3_fck,	CK_AM33XX), +	CLK(NULL,	"timer4_fck",		&timer4_fck,	CK_AM33XX), +	CLK(NULL,	"timer5_fck",		&timer5_fck,	CK_AM33XX), +	CLK(NULL,	"timer6_fck",		&timer6_fck,	CK_AM33XX), +	CLK(NULL,	"timer7_fck",		&timer7_fck,	CK_AM33XX),  	CLK(NULL,	"usbotg_fck",		&usbotg_fck,	CK_AM33XX),  	CLK(NULL,	"ieee5000_fck",		&ieee5000_fck,	CK_AM33XX),  	CLK(NULL,	"wdt1_fck",		&wdt1_fck,	CK_AM33XX), diff --git a/arch/arm/mach-omap2/clockdomain2xxx_3xxx.c b/arch/arm/mach-omap2/clockdomain2xxx_3xxx.c index a0d68dbecfa..f99e65cfb86 100644 --- a/arch/arm/mach-omap2/clockdomain2xxx_3xxx.c +++ b/arch/arm/mach-omap2/clockdomain2xxx_3xxx.c @@ -241,6 +241,52 @@ static void omap3_clkdm_deny_idle(struct clockdomain *clkdm)  		_clkdm_del_autodeps(clkdm);  } +static int omap3xxx_clkdm_clk_enable(struct clockdomain *clkdm) +{ +	bool hwsup = false; + +	if (!clkdm->clktrctrl_mask) +		return 0; + +	hwsup = omap2_cm_is_clkdm_in_hwsup(clkdm->pwrdm.ptr->prcm_offs, +				clkdm->clktrctrl_mask); + +	if (hwsup) { +		/* Disable HW transitions when we are changing deps */ +		_disable_hwsup(clkdm); +		_clkdm_add_autodeps(clkdm); +		_enable_hwsup(clkdm); +	} else { +		if (clkdm->flags & CLKDM_CAN_FORCE_WAKEUP) +			omap3_clkdm_wakeup(clkdm); +	} + +	return 0; +} + +static int omap3xxx_clkdm_clk_disable(struct clockdomain *clkdm) +{ +	bool hwsup = false; + +	if (!clkdm->clktrctrl_mask) +		return 0; + +	hwsup = omap2_cm_is_clkdm_in_hwsup(clkdm->pwrdm.ptr->prcm_offs, +				clkdm->clktrctrl_mask); + +	if (hwsup) { +		/* Disable HW transitions when we are changing deps */ +		_disable_hwsup(clkdm); +		_clkdm_del_autodeps(clkdm); +		_enable_hwsup(clkdm); +	} else { +		if (clkdm->flags & CLKDM_CAN_FORCE_SLEEP) +			omap3_clkdm_sleep(clkdm); +	} + +	return 0; +} +  struct clkdm_ops omap2_clkdm_operations = {  	.clkdm_add_wkdep	= omap2_clkdm_add_wkdep,  	.clkdm_del_wkdep	= omap2_clkdm_del_wkdep, @@ -267,6 +313,6 @@ struct clkdm_ops omap3_clkdm_operations = {  	.clkdm_wakeup		= omap3_clkdm_wakeup,  	.clkdm_allow_idle	= omap3_clkdm_allow_idle,  	.clkdm_deny_idle	= omap3_clkdm_deny_idle, -	.clkdm_clk_enable	= omap2_clkdm_clk_enable, -	.clkdm_clk_disable	= omap2_clkdm_clk_disable, +	.clkdm_clk_enable	= omap3xxx_clkdm_clk_enable, +	.clkdm_clk_disable	= omap3xxx_clkdm_clk_disable,  }; diff --git a/arch/arm/mach-omap2/cm-regbits-34xx.h b/arch/arm/mach-omap2/cm-regbits-34xx.h index 766338fe4d3..975f6bda0e0 100644 --- a/arch/arm/mach-omap2/cm-regbits-34xx.h +++ b/arch/arm/mach-omap2/cm-regbits-34xx.h @@ -67,6 +67,7 @@  #define OMAP3430_EN_IVA2_DPLL_MASK			(0x7 << 0)  /* CM_IDLEST_IVA2 */ +#define OMAP3430_ST_IVA2_SHIFT				0  #define OMAP3430_ST_IVA2_MASK				(1 << 0)  /* CM_IDLEST_PLL_IVA2 */ diff --git a/arch/arm/mach-omap2/omap-wakeupgen.c b/arch/arm/mach-omap2/omap-wakeupgen.c index b54427dec2a..ecaad7d371e 100644 --- a/arch/arm/mach-omap2/omap-wakeupgen.c +++ b/arch/arm/mach-omap2/omap-wakeupgen.c @@ -47,7 +47,7 @@  static void __iomem *wakeupgen_base;  static void __iomem *sar_base;  static DEFINE_SPINLOCK(wakeupgen_lock); -static unsigned int irq_target_cpu[NR_IRQS]; +static unsigned int irq_target_cpu[MAX_IRQS];  static unsigned int irq_banks = MAX_NR_REG_BANKS;  static unsigned int max_irqs = MAX_IRQS;  static unsigned int omap_secure_apis; diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c index 80b7359500f..3615e0d9ee3 100644 --- a/arch/arm/mach-omap2/omap_hwmod.c +++ b/arch/arm/mach-omap2/omap_hwmod.c @@ -1889,6 +1889,7 @@ static int _enable(struct omap_hwmod *oh)  			_enable_sysc(oh);  		}  	} else { +		_omap4_disable_module(oh);  		_disable_clocks(oh);  		pr_debug("omap_hwmod: %s: _wait_target_ready: %d\n",  			 oh->name, r); diff --git a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c index a1df9d4690f..b1675e6214d 100644 --- a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c +++ b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c @@ -100,9 +100,9 @@ static struct omap_hwmod omap3xxx_mpu_hwmod = {  /* IVA2 (IVA2) */  static struct omap_hwmod_rst_info omap3xxx_iva_resets[] = { -	{ .name = "logic", .rst_shift = 0 }, -	{ .name = "seq0", .rst_shift = 1 }, -	{ .name = "seq1", .rst_shift = 2 }, +	{ .name = "logic", .rst_shift = 0, .st_shift = 8 }, +	{ .name = "seq0", .rst_shift = 1, .st_shift = 9 }, +	{ .name = "seq1", .rst_shift = 2, .st_shift = 10 },  };  static struct omap_hwmod omap3xxx_iva_hwmod = { @@ -112,6 +112,15 @@ static struct omap_hwmod omap3xxx_iva_hwmod = {  	.rst_lines	= omap3xxx_iva_resets,  	.rst_lines_cnt	= ARRAY_SIZE(omap3xxx_iva_resets),  	.main_clk	= "iva2_ck", +	.prcm = { +		.omap2 = { +			.module_offs = OMAP3430_IVA2_MOD, +			.prcm_reg_id = 1, +			.module_bit = OMAP3430_CM_FCLKEN_IVA2_EN_IVA2_SHIFT, +			.idlest_reg_id = 1, +			.idlest_idle_bit = OMAP3430_ST_IVA2_SHIFT, +		} +	},  };  /* timer class */ diff --git a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c index f033f950a23..f9bcb24cd51 100644 --- a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c +++ b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c @@ -4209,7 +4209,7 @@ static struct omap_hwmod_ocp_if omap44xx_dsp__iva = {  };  /* dsp -> sl2if */ -static struct omap_hwmod_ocp_if omap44xx_dsp__sl2if = { +static struct omap_hwmod_ocp_if __maybe_unused omap44xx_dsp__sl2if = {  	.master		= &omap44xx_dsp_hwmod,  	.slave		= &omap44xx_sl2if_hwmod,  	.clk		= "dpll_iva_m5x2_ck", @@ -4827,7 +4827,7 @@ static struct omap_hwmod_ocp_if omap44xx_l3_main_2__iss = {  };  /* iva -> sl2if */ -static struct omap_hwmod_ocp_if omap44xx_iva__sl2if = { +static struct omap_hwmod_ocp_if __maybe_unused omap44xx_iva__sl2if = {  	.master		= &omap44xx_iva_hwmod,  	.slave		= &omap44xx_sl2if_hwmod,  	.clk		= "dpll_iva_m5x2_ck", @@ -5361,7 +5361,7 @@ static struct omap_hwmod_ocp_if omap44xx_l4_wkup__scrm = {  };  /* l3_main_2 -> sl2if */ -static struct omap_hwmod_ocp_if omap44xx_l3_main_2__sl2if = { +static struct omap_hwmod_ocp_if __maybe_unused omap44xx_l3_main_2__sl2if = {  	.master		= &omap44xx_l3_main_2_hwmod,  	.slave		= &omap44xx_sl2if_hwmod,  	.clk		= "l3_div_ck", @@ -6031,7 +6031,7 @@ static struct omap_hwmod_ocp_if *omap44xx_hwmod_ocp_ifs[] __initdata = {  	&omap44xx_l4_abe__dmic,  	&omap44xx_l4_abe__dmic_dma,  	&omap44xx_dsp__iva, -	&omap44xx_dsp__sl2if, +	/* &omap44xx_dsp__sl2if, */  	&omap44xx_l4_cfg__dsp,  	&omap44xx_l3_main_2__dss,  	&omap44xx_l4_per__dss, @@ -6067,7 +6067,7 @@ static struct omap_hwmod_ocp_if *omap44xx_hwmod_ocp_ifs[] __initdata = {  	&omap44xx_l4_per__i2c4,  	&omap44xx_l3_main_2__ipu,  	&omap44xx_l3_main_2__iss, -	&omap44xx_iva__sl2if, +	/* &omap44xx_iva__sl2if, */  	&omap44xx_l3_main_2__iva,  	&omap44xx_l4_wkup__kbd,  	&omap44xx_l4_cfg__mailbox, @@ -6098,7 +6098,7 @@ static struct omap_hwmod_ocp_if *omap44xx_hwmod_ocp_ifs[] __initdata = {  	&omap44xx_l4_cfg__cm_core,  	&omap44xx_l4_wkup__prm,  	&omap44xx_l4_wkup__scrm, -	&omap44xx_l3_main_2__sl2if, +	/* &omap44xx_l3_main_2__sl2if, */  	&omap44xx_l4_abe__slimbus1,  	&omap44xx_l4_abe__slimbus1_dma,  	&omap44xx_l4_per__slimbus2, diff --git a/arch/arm/mach-omap2/timer.c b/arch/arm/mach-omap2/timer.c index e17cf974d16..5214d5bfba2 100644 --- a/arch/arm/mach-omap2/timer.c +++ b/arch/arm/mach-omap2/timer.c @@ -262,6 +262,7 @@ static u32 notrace dmtimer_read_sched_clock(void)  	return 0;  } +#ifdef CONFIG_OMAP_32K_TIMER  /* Setup free-running counter for clocksource */  static int __init omap2_sync32k_clocksource_init(void)  { @@ -301,6 +302,12 @@ static int __init omap2_sync32k_clocksource_init(void)  	return ret;  } +#else +static inline int omap2_sync32k_clocksource_init(void) +{ +	return -ENODEV; +} +#endif  static void __init omap2_gptimer_clocksource_init(int gptimer_id,  						const char *fck_source) diff --git a/arch/arm/mach-tegra/Kconfig b/arch/arm/mach-tegra/Kconfig index b3226f80c98..5f3c03b61f8 100644 --- a/arch/arm/mach-tegra/Kconfig +++ b/arch/arm/mach-tegra/Kconfig @@ -110,13 +110,6 @@ config TEGRA_DEBUG_UART_AUTO_SCRATCH  endchoice -config TEGRA_SYSTEM_DMA -	bool "Enable system DMA driver for NVIDIA Tegra SoCs" -	default y -	help -	  Adds system DMA functionality for NVIDIA Tegra SoCs, used by -	  several Tegra device drivers -  config TEGRA_EMC_SCALING_ENABLE  	bool "Enable scaling the memory frequency" diff --git a/arch/arm/mach-tegra/Makefile b/arch/arm/mach-tegra/Makefile index 56065acbd81..0974ace4555 100644 --- a/arch/arm/mach-tegra/Makefile +++ b/arch/arm/mach-tegra/Makefile @@ -18,7 +18,6 @@ obj-$(CONFIG_ARCH_TEGRA_3x_SOC)		+= tegra30_clocks.o  obj-$(CONFIG_SMP)			+= platsmp.o headsmp.o  obj-$(CONFIG_SMP)                       += reset.o  obj-$(CONFIG_HOTPLUG_CPU)               += hotplug.o -obj-$(CONFIG_TEGRA_SYSTEM_DMA)		+= dma.o  obj-$(CONFIG_CPU_FREQ)                  += cpu-tegra.o  obj-$(CONFIG_TEGRA_PCI)			+= pcie.o  obj-$(CONFIG_USB_SUPPORT)		+= usb_phy.o diff --git a/arch/arm/mach-tegra/apbio.c b/arch/arm/mach-tegra/apbio.c index 643a37809a1..b5015d0f191 100644 --- a/arch/arm/mach-tegra/apbio.c +++ b/arch/arm/mach-tegra/apbio.c @@ -28,7 +28,7 @@  #include "apbio.h" -#if defined(CONFIG_TEGRA_SYSTEM_DMA) || defined(CONFIG_TEGRA20_APB_DMA) +#if defined(CONFIG_TEGRA20_APB_DMA)  static DEFINE_MUTEX(tegra_apb_dma_lock);  static u32 *tegra_apb_bb;  static dma_addr_t tegra_apb_bb_phys; @@ -37,121 +37,6 @@ static DECLARE_COMPLETION(tegra_apb_wait);  static u32 tegra_apb_readl_direct(unsigned long offset);  static void tegra_apb_writel_direct(u32 value, unsigned long offset); -#if defined(CONFIG_TEGRA_SYSTEM_DMA) -static struct tegra_dma_channel *tegra_apb_dma; - -bool tegra_apb_init(void) -{ -	struct tegra_dma_channel *ch; - -	mutex_lock(&tegra_apb_dma_lock); - -	/* Check to see if we raced to setup */ -	if (tegra_apb_dma) -		goto out; - -	ch = tegra_dma_allocate_channel(TEGRA_DMA_MODE_ONESHOT | -		TEGRA_DMA_SHARED); - -	if (!ch) -		goto out_fail; - -	tegra_apb_bb = dma_alloc_coherent(NULL, sizeof(u32), -		&tegra_apb_bb_phys, GFP_KERNEL); -	if (!tegra_apb_bb) { -		pr_err("%s: can not allocate bounce buffer\n", __func__); -		tegra_dma_free_channel(ch); -		goto out_fail; -	} - -	tegra_apb_dma = ch; -out: -	mutex_unlock(&tegra_apb_dma_lock); -	return true; - -out_fail: -	mutex_unlock(&tegra_apb_dma_lock); -	return false; -} - -static void apb_dma_complete(struct tegra_dma_req *req) -{ -	complete(&tegra_apb_wait); -} - -static u32 tegra_apb_readl_using_dma(unsigned long offset) -{ -	struct tegra_dma_req req; -	int ret; - -	if (!tegra_apb_dma && !tegra_apb_init()) -		return tegra_apb_readl_direct(offset); - -	mutex_lock(&tegra_apb_dma_lock); -	req.complete = apb_dma_complete; -	req.to_memory = 1; -	req.dest_addr = tegra_apb_bb_phys; -	req.dest_bus_width = 32; -	req.dest_wrap = 1; -	req.source_addr = offset; -	req.source_bus_width = 32; -	req.source_wrap = 4; -	req.req_sel = TEGRA_DMA_REQ_SEL_CNTR; -	req.size = 4; - -	INIT_COMPLETION(tegra_apb_wait); - -	tegra_dma_enqueue_req(tegra_apb_dma, &req); - -	ret = wait_for_completion_timeout(&tegra_apb_wait, -		msecs_to_jiffies(50)); - -	if (WARN(ret == 0, "apb read dma timed out")) { -		tegra_dma_dequeue_req(tegra_apb_dma, &req); -		*(u32 *)tegra_apb_bb = 0; -	} - -	mutex_unlock(&tegra_apb_dma_lock); -	return *((u32 *)tegra_apb_bb); -} - -static void tegra_apb_writel_using_dma(u32 value, unsigned long offset) -{ -	struct tegra_dma_req req; -	int ret; - -	if (!tegra_apb_dma && !tegra_apb_init()) { -		tegra_apb_writel_direct(value, offset); -		return; -	} - -	mutex_lock(&tegra_apb_dma_lock); -	*((u32 *)tegra_apb_bb) = value; -	req.complete = apb_dma_complete; -	req.to_memory = 0; -	req.dest_addr = offset; -	req.dest_wrap = 4; -	req.dest_bus_width = 32; -	req.source_addr = tegra_apb_bb_phys; -	req.source_bus_width = 32; -	req.source_wrap = 1; -	req.req_sel = TEGRA_DMA_REQ_SEL_CNTR; -	req.size = 4; - -	INIT_COMPLETION(tegra_apb_wait); - -	tegra_dma_enqueue_req(tegra_apb_dma, &req); - -	ret = wait_for_completion_timeout(&tegra_apb_wait, -		msecs_to_jiffies(50)); - -	if (WARN(ret == 0, "apb write dma timed out")) -		tegra_dma_dequeue_req(tegra_apb_dma, &req); - -	mutex_unlock(&tegra_apb_dma_lock); -} - -#else  static struct dma_chan *tegra_apb_dma_chan;  static struct dma_slave_config dma_sconfig; @@ -279,7 +164,6 @@ static void tegra_apb_writel_using_dma(u32 value, unsigned long offset)  		pr_err("error in writing offset 0x%08lx using dma\n", offset);  	mutex_unlock(&tegra_apb_dma_lock);  } -#endif  #else  #define tegra_apb_readl_using_dma tegra_apb_readl_direct  #define tegra_apb_writel_using_dma tegra_apb_writel_direct diff --git a/arch/arm/mach-tegra/dma.c b/arch/arm/mach-tegra/dma.c deleted file mode 100644 index 29c5114d607..00000000000 --- a/arch/arm/mach-tegra/dma.c +++ /dev/null @@ -1,823 +0,0 @@ -/* - * arch/arm/mach-tegra/dma.c - * - * System DMA driver for NVIDIA Tegra SoCs - * - * Copyright (c) 2008-2009, NVIDIA Corporation. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA. - */ - -#include <linux/io.h> -#include <linux/interrupt.h> -#include <linux/module.h> -#include <linux/spinlock.h> -#include <linux/err.h> -#include <linux/irq.h> -#include <linux/delay.h> -#include <linux/clk.h> -#include <mach/dma.h> -#include <mach/irqs.h> -#include <mach/iomap.h> -#include <mach/suspend.h> - -#include "apbio.h" - -#define APB_DMA_GEN				0x000 -#define GEN_ENABLE				(1<<31) - -#define APB_DMA_CNTRL				0x010 - -#define APB_DMA_IRQ_MASK			0x01c - -#define APB_DMA_IRQ_MASK_SET			0x020 - -#define APB_DMA_CHAN_CSR			0x000 -#define CSR_ENB					(1<<31) -#define CSR_IE_EOC				(1<<30) -#define CSR_HOLD				(1<<29) -#define CSR_DIR					(1<<28) -#define CSR_ONCE				(1<<27) -#define CSR_FLOW				(1<<21) -#define CSR_REQ_SEL_SHIFT			16 -#define CSR_WCOUNT_SHIFT			2 -#define CSR_WCOUNT_MASK				0xFFFC - -#define APB_DMA_CHAN_STA				0x004 -#define STA_BUSY				(1<<31) -#define STA_ISE_EOC				(1<<30) -#define STA_HALT				(1<<29) -#define STA_PING_PONG				(1<<28) -#define STA_COUNT_SHIFT				2 -#define STA_COUNT_MASK				0xFFFC - -#define APB_DMA_CHAN_AHB_PTR				0x010 - -#define APB_DMA_CHAN_AHB_SEQ				0x014 -#define AHB_SEQ_INTR_ENB			(1<<31) -#define AHB_SEQ_BUS_WIDTH_SHIFT			28 -#define AHB_SEQ_BUS_WIDTH_MASK			(0x7<<AHB_SEQ_BUS_WIDTH_SHIFT) -#define AHB_SEQ_BUS_WIDTH_8			(0<<AHB_SEQ_BUS_WIDTH_SHIFT) -#define AHB_SEQ_BUS_WIDTH_16			(1<<AHB_SEQ_BUS_WIDTH_SHIFT) -#define AHB_SEQ_BUS_WIDTH_32			(2<<AHB_SEQ_BUS_WIDTH_SHIFT) -#define AHB_SEQ_BUS_WIDTH_64			(3<<AHB_SEQ_BUS_WIDTH_SHIFT) -#define AHB_SEQ_BUS_WIDTH_128			(4<<AHB_SEQ_BUS_WIDTH_SHIFT) -#define AHB_SEQ_DATA_SWAP			(1<<27) -#define AHB_SEQ_BURST_MASK			(0x7<<24) -#define AHB_SEQ_BURST_1				(4<<24) -#define AHB_SEQ_BURST_4				(5<<24) -#define AHB_SEQ_BURST_8				(6<<24) -#define AHB_SEQ_DBL_BUF				(1<<19) -#define AHB_SEQ_WRAP_SHIFT			16 -#define AHB_SEQ_WRAP_MASK			(0x7<<AHB_SEQ_WRAP_SHIFT) - -#define APB_DMA_CHAN_APB_PTR				0x018 - -#define APB_DMA_CHAN_APB_SEQ				0x01c -#define APB_SEQ_BUS_WIDTH_SHIFT			28 -#define APB_SEQ_BUS_WIDTH_MASK			(0x7<<APB_SEQ_BUS_WIDTH_SHIFT) -#define APB_SEQ_BUS_WIDTH_8			(0<<APB_SEQ_BUS_WIDTH_SHIFT) -#define APB_SEQ_BUS_WIDTH_16			(1<<APB_SEQ_BUS_WIDTH_SHIFT) -#define APB_SEQ_BUS_WIDTH_32			(2<<APB_SEQ_BUS_WIDTH_SHIFT) -#define APB_SEQ_BUS_WIDTH_64			(3<<APB_SEQ_BUS_WIDTH_SHIFT) -#define APB_SEQ_BUS_WIDTH_128			(4<<APB_SEQ_BUS_WIDTH_SHIFT) -#define APB_SEQ_DATA_SWAP			(1<<27) -#define APB_SEQ_WRAP_SHIFT			16 -#define APB_SEQ_WRAP_MASK			(0x7<<APB_SEQ_WRAP_SHIFT) - -#define TEGRA_SYSTEM_DMA_CH_NR			16 -#define TEGRA_SYSTEM_DMA_AVP_CH_NUM		4 -#define TEGRA_SYSTEM_DMA_CH_MIN			0 -#define TEGRA_SYSTEM_DMA_CH_MAX	\ -	(TEGRA_SYSTEM_DMA_CH_NR - TEGRA_SYSTEM_DMA_AVP_CH_NUM - 1) - -#define NV_DMA_MAX_TRASFER_SIZE 0x10000 - -static const unsigned int ahb_addr_wrap_table[8] = { -	0, 32, 64, 128, 256, 512, 1024, 2048 -}; - -static const unsigned int apb_addr_wrap_table[8] = { -	0, 1, 2, 4, 8, 16, 32, 64 -}; - -static const unsigned int bus_width_table[5] = { -	8, 16, 32, 64, 128 -}; - -#define TEGRA_DMA_NAME_SIZE 16 -struct tegra_dma_channel { -	struct list_head	list; -	int			id; -	spinlock_t		lock; -	char			name[TEGRA_DMA_NAME_SIZE]; -	void  __iomem		*addr; -	int			mode; -	int			irq; -	int			req_transfer_count; -}; - -#define  NV_DMA_MAX_CHANNELS  32 - -static bool tegra_dma_initialized; -static DEFINE_MUTEX(tegra_dma_lock); -static DEFINE_SPINLOCK(enable_lock); - -static DECLARE_BITMAP(channel_usage, NV_DMA_MAX_CHANNELS); -static struct tegra_dma_channel dma_channels[NV_DMA_MAX_CHANNELS]; - -static void tegra_dma_update_hw(struct tegra_dma_channel *ch, -	struct tegra_dma_req *req); -static void tegra_dma_update_hw_partial(struct tegra_dma_channel *ch, -	struct tegra_dma_req *req); -static void tegra_dma_stop(struct tegra_dma_channel *ch); - -void tegra_dma_flush(struct tegra_dma_channel *ch) -{ -} -EXPORT_SYMBOL(tegra_dma_flush); - -void tegra_dma_dequeue(struct tegra_dma_channel *ch) -{ -	struct tegra_dma_req *req; - -	if (tegra_dma_is_empty(ch)) -		return; - -	req = list_entry(ch->list.next, typeof(*req), node); - -	tegra_dma_dequeue_req(ch, req); -	return; -} - -static void tegra_dma_stop(struct tegra_dma_channel *ch) -{ -	u32 csr; -	u32 status; - -	csr = readl(ch->addr + APB_DMA_CHAN_CSR); -	csr &= ~CSR_IE_EOC; -	writel(csr, ch->addr + APB_DMA_CHAN_CSR); - -	csr &= ~CSR_ENB; -	writel(csr, ch->addr + APB_DMA_CHAN_CSR); - -	status = readl(ch->addr + APB_DMA_CHAN_STA); -	if (status & STA_ISE_EOC) -		writel(status, ch->addr + APB_DMA_CHAN_STA); -} - -static int tegra_dma_cancel(struct tegra_dma_channel *ch) -{ -	unsigned long irq_flags; - -	spin_lock_irqsave(&ch->lock, irq_flags); -	while (!list_empty(&ch->list)) -		list_del(ch->list.next); - -	tegra_dma_stop(ch); - -	spin_unlock_irqrestore(&ch->lock, irq_flags); -	return 0; -} - -static unsigned int get_channel_status(struct tegra_dma_channel *ch, -			struct tegra_dma_req *req, bool is_stop_dma) -{ -	void __iomem *addr = IO_ADDRESS(TEGRA_APB_DMA_BASE); -	unsigned int status; - -	if (is_stop_dma) { -		/* -		 * STOP the DMA and get the transfer count. -		 * Getting the transfer count is tricky. -		 *  - Globally disable DMA on all channels -		 *  - Read the channel's status register to know the number -		 *    of pending bytes to be transfered. -		 *  - Stop the dma channel -		 *  - Globally re-enable DMA to resume other transfers -		 */ -		spin_lock(&enable_lock); -		writel(0, addr + APB_DMA_GEN); -		udelay(20); -		status = readl(ch->addr + APB_DMA_CHAN_STA); -		tegra_dma_stop(ch); -		writel(GEN_ENABLE, addr + APB_DMA_GEN); -		spin_unlock(&enable_lock); -		if (status & STA_ISE_EOC) { -			pr_err("Got Dma Int here clearing"); -			writel(status, ch->addr + APB_DMA_CHAN_STA); -		} -		req->status = TEGRA_DMA_REQ_ERROR_ABORTED; -	} else { -		status = readl(ch->addr + APB_DMA_CHAN_STA); -	} -	return status; -} - -/* should be called with the channel lock held */ -static unsigned int dma_active_count(struct tegra_dma_channel *ch, -	struct tegra_dma_req *req, unsigned int status) -{ -	unsigned int to_transfer; -	unsigned int req_transfer_count; -	unsigned int bytes_transferred; - -	to_transfer = ((status & STA_COUNT_MASK) >> STA_COUNT_SHIFT) + 1; -	req_transfer_count = ch->req_transfer_count + 1; -	bytes_transferred = req_transfer_count; -	if (status & STA_BUSY) -		bytes_transferred -= to_transfer; -	/* -	 * In continuous transfer mode, DMA only tracks the count of the -	 * half DMA buffer. So, if the DMA already finished half the DMA -	 * then add the half buffer to the completed count. -	 */ -	if (ch->mode & TEGRA_DMA_MODE_CONTINOUS) { -		if (req->buffer_status == TEGRA_DMA_REQ_BUF_STATUS_HALF_FULL) -			bytes_transferred += req_transfer_count; -		if (status & STA_ISE_EOC) -			bytes_transferred += req_transfer_count; -	} -	bytes_transferred *= 4; -	return bytes_transferred; -} - -int tegra_dma_dequeue_req(struct tegra_dma_channel *ch, -	struct tegra_dma_req *_req) -{ -	unsigned int status; -	struct tegra_dma_req *req = NULL; -	int found = 0; -	unsigned long irq_flags; -	int stop = 0; - -	spin_lock_irqsave(&ch->lock, irq_flags); - -	if (list_entry(ch->list.next, struct tegra_dma_req, node) == _req) -		stop = 1; - -	list_for_each_entry(req, &ch->list, node) { -		if (req == _req) { -			list_del(&req->node); -			found = 1; -			break; -		} -	} -	if (!found) { -		spin_unlock_irqrestore(&ch->lock, irq_flags); -		return 0; -	} - -	if (!stop) -		goto skip_stop_dma; - -	status = get_channel_status(ch, req, true); -	req->bytes_transferred = dma_active_count(ch, req, status); - -	if (!list_empty(&ch->list)) { -		/* if the list is not empty, queue the next request */ -		struct tegra_dma_req *next_req; -		next_req = list_entry(ch->list.next, -			typeof(*next_req), node); -		tegra_dma_update_hw(ch, next_req); -	} - -skip_stop_dma: -	req->status = -TEGRA_DMA_REQ_ERROR_ABORTED; - -	spin_unlock_irqrestore(&ch->lock, irq_flags); - -	/* Callback should be called without any lock */ -	req->complete(req); -	return 0; -} -EXPORT_SYMBOL(tegra_dma_dequeue_req); - -bool tegra_dma_is_empty(struct tegra_dma_channel *ch) -{ -	unsigned long irq_flags; -	bool is_empty; - -	spin_lock_irqsave(&ch->lock, irq_flags); -	if (list_empty(&ch->list)) -		is_empty = true; -	else -		is_empty = false; -	spin_unlock_irqrestore(&ch->lock, irq_flags); -	return is_empty; -} -EXPORT_SYMBOL(tegra_dma_is_empty); - -bool tegra_dma_is_req_inflight(struct tegra_dma_channel *ch, -	struct tegra_dma_req *_req) -{ -	unsigned long irq_flags; -	struct tegra_dma_req *req; - -	spin_lock_irqsave(&ch->lock, irq_flags); -	list_for_each_entry(req, &ch->list, node) { -		if (req == _req) { -			spin_unlock_irqrestore(&ch->lock, irq_flags); -			return true; -		} -	} -	spin_unlock_irqrestore(&ch->lock, irq_flags); -	return false; -} -EXPORT_SYMBOL(tegra_dma_is_req_inflight); - -int tegra_dma_enqueue_req(struct tegra_dma_channel *ch, -	struct tegra_dma_req *req) -{ -	unsigned long irq_flags; -	struct tegra_dma_req *_req; -	int start_dma = 0; - -	if (req->size > NV_DMA_MAX_TRASFER_SIZE || -		req->source_addr & 0x3 || req->dest_addr & 0x3) { -		pr_err("Invalid DMA request for channel %d\n", ch->id); -		return -EINVAL; -	} - -	spin_lock_irqsave(&ch->lock, irq_flags); - -	list_for_each_entry(_req, &ch->list, node) { -		if (req == _req) { -		    spin_unlock_irqrestore(&ch->lock, irq_flags); -		    return -EEXIST; -		} -	} - -	req->bytes_transferred = 0; -	req->status = 0; -	req->buffer_status = 0; -	if (list_empty(&ch->list)) -		start_dma = 1; - -	list_add_tail(&req->node, &ch->list); - -	if (start_dma) -		tegra_dma_update_hw(ch, req); - -	spin_unlock_irqrestore(&ch->lock, irq_flags); - -	return 0; -} -EXPORT_SYMBOL(tegra_dma_enqueue_req); - -struct tegra_dma_channel *tegra_dma_allocate_channel(int mode) -{ -	int channel; -	struct tegra_dma_channel *ch = NULL; - -	if (!tegra_dma_initialized) -		return NULL; - -	mutex_lock(&tegra_dma_lock); - -	/* first channel is the shared channel */ -	if (mode & TEGRA_DMA_SHARED) { -		channel = TEGRA_SYSTEM_DMA_CH_MIN; -	} else { -		channel = find_first_zero_bit(channel_usage, -			ARRAY_SIZE(dma_channels)); -		if (channel >= ARRAY_SIZE(dma_channels)) -			goto out; -	} -	__set_bit(channel, channel_usage); -	ch = &dma_channels[channel]; -	ch->mode = mode; - -out: -	mutex_unlock(&tegra_dma_lock); -	return ch; -} -EXPORT_SYMBOL(tegra_dma_allocate_channel); - -void tegra_dma_free_channel(struct tegra_dma_channel *ch) -{ -	if (ch->mode & TEGRA_DMA_SHARED) -		return; -	tegra_dma_cancel(ch); -	mutex_lock(&tegra_dma_lock); -	__clear_bit(ch->id, channel_usage); -	mutex_unlock(&tegra_dma_lock); -} -EXPORT_SYMBOL(tegra_dma_free_channel); - -static void tegra_dma_update_hw_partial(struct tegra_dma_channel *ch, -	struct tegra_dma_req *req) -{ -	u32 apb_ptr; -	u32 ahb_ptr; - -	if (req->to_memory) { -		apb_ptr = req->source_addr; -		ahb_ptr = req->dest_addr; -	} else { -		apb_ptr = req->dest_addr; -		ahb_ptr = req->source_addr; -	} -	writel(apb_ptr, ch->addr + APB_DMA_CHAN_APB_PTR); -	writel(ahb_ptr, ch->addr + APB_DMA_CHAN_AHB_PTR); - -	req->status = TEGRA_DMA_REQ_INFLIGHT; -	return; -} - -static void tegra_dma_update_hw(struct tegra_dma_channel *ch, -	struct tegra_dma_req *req) -{ -	int ahb_addr_wrap; -	int apb_addr_wrap; -	int ahb_bus_width; -	int apb_bus_width; -	int index; - -	u32 ahb_seq; -	u32 apb_seq; -	u32 ahb_ptr; -	u32 apb_ptr; -	u32 csr; - -	csr = CSR_IE_EOC | CSR_FLOW; -	ahb_seq = AHB_SEQ_INTR_ENB | AHB_SEQ_BURST_1; -	apb_seq = 0; - -	csr |= req->req_sel << CSR_REQ_SEL_SHIFT; - -	/* One shot mode is always single buffered, -	 * continuous mode is always double buffered -	 * */ -	if (ch->mode & TEGRA_DMA_MODE_ONESHOT) { -		csr |= CSR_ONCE; -		ch->req_transfer_count = (req->size >> 2) - 1; -	} else { -		ahb_seq |= AHB_SEQ_DBL_BUF; - -		/* In double buffered mode, we set the size to half the -		 * requested size and interrupt when half the buffer -		 * is full */ -		ch->req_transfer_count = (req->size >> 3) - 1; -	} - -	csr |= ch->req_transfer_count << CSR_WCOUNT_SHIFT; - -	if (req->to_memory) { -		apb_ptr = req->source_addr; -		ahb_ptr = req->dest_addr; - -		apb_addr_wrap = req->source_wrap; -		ahb_addr_wrap = req->dest_wrap; -		apb_bus_width = req->source_bus_width; -		ahb_bus_width = req->dest_bus_width; - -	} else { -		csr |= CSR_DIR; -		apb_ptr = req->dest_addr; -		ahb_ptr = req->source_addr; - -		apb_addr_wrap = req->dest_wrap; -		ahb_addr_wrap = req->source_wrap; -		apb_bus_width = req->dest_bus_width; -		ahb_bus_width = req->source_bus_width; -	} - -	apb_addr_wrap >>= 2; -	ahb_addr_wrap >>= 2; - -	/* set address wrap for APB size */ -	index = 0; -	do  { -		if (apb_addr_wrap_table[index] == apb_addr_wrap) -			break; -		index++; -	} while (index < ARRAY_SIZE(apb_addr_wrap_table)); -	BUG_ON(index == ARRAY_SIZE(apb_addr_wrap_table)); -	apb_seq |= index << APB_SEQ_WRAP_SHIFT; - -	/* set address wrap for AHB size */ -	index = 0; -	do  { -		if (ahb_addr_wrap_table[index] == ahb_addr_wrap) -			break; -		index++; -	} while (index < ARRAY_SIZE(ahb_addr_wrap_table)); -	BUG_ON(index == ARRAY_SIZE(ahb_addr_wrap_table)); -	ahb_seq |= index << AHB_SEQ_WRAP_SHIFT; - -	for (index = 0; index < ARRAY_SIZE(bus_width_table); index++) { -		if (bus_width_table[index] == ahb_bus_width) -			break; -	} -	BUG_ON(index == ARRAY_SIZE(bus_width_table)); -	ahb_seq |= index << AHB_SEQ_BUS_WIDTH_SHIFT; - -	for (index = 0; index < ARRAY_SIZE(bus_width_table); index++) { -		if (bus_width_table[index] == apb_bus_width) -			break; -	} -	BUG_ON(index == ARRAY_SIZE(bus_width_table)); -	apb_seq |= index << APB_SEQ_BUS_WIDTH_SHIFT; - -	writel(csr, ch->addr + APB_DMA_CHAN_CSR); -	writel(apb_seq, ch->addr + APB_DMA_CHAN_APB_SEQ); -	writel(apb_ptr, ch->addr + APB_DMA_CHAN_APB_PTR); -	writel(ahb_seq, ch->addr + APB_DMA_CHAN_AHB_SEQ); -	writel(ahb_ptr, ch->addr + APB_DMA_CHAN_AHB_PTR); - -	csr |= CSR_ENB; -	writel(csr, ch->addr + APB_DMA_CHAN_CSR); - -	req->status = TEGRA_DMA_REQ_INFLIGHT; -} - -static void handle_oneshot_dma(struct tegra_dma_channel *ch) -{ -	struct tegra_dma_req *req; -	unsigned long irq_flags; - -	spin_lock_irqsave(&ch->lock, irq_flags); -	if (list_empty(&ch->list)) { -		spin_unlock_irqrestore(&ch->lock, irq_flags); -		return; -	} - -	req = list_entry(ch->list.next, typeof(*req), node); -	if (req) { -		int bytes_transferred; - -		bytes_transferred = ch->req_transfer_count; -		bytes_transferred += 1; -		bytes_transferred <<= 2; - -		list_del(&req->node); -		req->bytes_transferred = bytes_transferred; -		req->status = TEGRA_DMA_REQ_SUCCESS; - -		spin_unlock_irqrestore(&ch->lock, irq_flags); -		/* Callback should be called without any lock */ -		pr_debug("%s: transferred %d bytes\n", __func__, -			req->bytes_transferred); -		req->complete(req); -		spin_lock_irqsave(&ch->lock, irq_flags); -	} - -	if (!list_empty(&ch->list)) { -		req = list_entry(ch->list.next, typeof(*req), node); -		/* the complete function we just called may have enqueued -		   another req, in which case dma has already started */ -		if (req->status != TEGRA_DMA_REQ_INFLIGHT) -			tegra_dma_update_hw(ch, req); -	} -	spin_unlock_irqrestore(&ch->lock, irq_flags); -} - -static void handle_continuous_dma(struct tegra_dma_channel *ch) -{ -	struct tegra_dma_req *req; -	unsigned long irq_flags; - -	spin_lock_irqsave(&ch->lock, irq_flags); -	if (list_empty(&ch->list)) { -		spin_unlock_irqrestore(&ch->lock, irq_flags); -		return; -	} - -	req = list_entry(ch->list.next, typeof(*req), node); -	if (req) { -		if (req->buffer_status == TEGRA_DMA_REQ_BUF_STATUS_EMPTY) { -			bool is_dma_ping_complete; -			is_dma_ping_complete = (readl(ch->addr + APB_DMA_CHAN_STA) -						& STA_PING_PONG) ? true : false; -			if (req->to_memory) -				is_dma_ping_complete = !is_dma_ping_complete; -			/* Out of sync - Release current buffer */ -			if (!is_dma_ping_complete) { -				int bytes_transferred; - -				bytes_transferred = ch->req_transfer_count; -				bytes_transferred += 1; -				bytes_transferred <<= 3; -				req->buffer_status = TEGRA_DMA_REQ_BUF_STATUS_FULL; -				req->bytes_transferred = bytes_transferred; -				req->status = TEGRA_DMA_REQ_SUCCESS; -				tegra_dma_stop(ch); - -				if (!list_is_last(&req->node, &ch->list)) { -					struct tegra_dma_req *next_req; - -					next_req = list_entry(req->node.next, -						typeof(*next_req), node); -					tegra_dma_update_hw(ch, next_req); -				} - -				list_del(&req->node); - -				/* DMA lock is NOT held when callbak is called */ -				spin_unlock_irqrestore(&ch->lock, irq_flags); -				req->complete(req); -				return; -			} -			/* Load the next request into the hardware, if available -			 * */ -			if (!list_is_last(&req->node, &ch->list)) { -				struct tegra_dma_req *next_req; - -				next_req = list_entry(req->node.next, -					typeof(*next_req), node); -				tegra_dma_update_hw_partial(ch, next_req); -			} -			req->buffer_status = TEGRA_DMA_REQ_BUF_STATUS_HALF_FULL; -			req->status = TEGRA_DMA_REQ_SUCCESS; -			/* DMA lock is NOT held when callback is called */ -			spin_unlock_irqrestore(&ch->lock, irq_flags); -			if (likely(req->threshold)) -				req->threshold(req); -			return; - -		} else if (req->buffer_status == -			TEGRA_DMA_REQ_BUF_STATUS_HALF_FULL) { -			/* Callback when the buffer is completely full (i.e on -			 * the second  interrupt */ -			int bytes_transferred; - -			bytes_transferred = ch->req_transfer_count; -			bytes_transferred += 1; -			bytes_transferred <<= 3; - -			req->buffer_status = TEGRA_DMA_REQ_BUF_STATUS_FULL; -			req->bytes_transferred = bytes_transferred; -			req->status = TEGRA_DMA_REQ_SUCCESS; -			list_del(&req->node); - -			/* DMA lock is NOT held when callbak is called */ -			spin_unlock_irqrestore(&ch->lock, irq_flags); -			req->complete(req); -			return; - -		} else { -			BUG(); -		} -	} -	spin_unlock_irqrestore(&ch->lock, irq_flags); -} - -static irqreturn_t dma_isr(int irq, void *data) -{ -	struct tegra_dma_channel *ch = data; -	unsigned long status; - -	status = readl(ch->addr + APB_DMA_CHAN_STA); -	if (status & STA_ISE_EOC) -		writel(status, ch->addr + APB_DMA_CHAN_STA); -	else { -		pr_warning("Got a spurious ISR for DMA channel %d\n", ch->id); -		return IRQ_HANDLED; -	} -	return IRQ_WAKE_THREAD; -} - -static irqreturn_t dma_thread_fn(int irq, void *data) -{ -	struct tegra_dma_channel *ch = data; - -	if (ch->mode & TEGRA_DMA_MODE_ONESHOT) -		handle_oneshot_dma(ch); -	else -		handle_continuous_dma(ch); - - -	return IRQ_HANDLED; -} - -int __init tegra_dma_init(void) -{ -	int ret = 0; -	int i; -	unsigned int irq; -	void __iomem *addr; -	struct clk *c; - -	bitmap_fill(channel_usage, NV_DMA_MAX_CHANNELS); - -	c = clk_get_sys("tegra-apbdma", NULL); -	if (IS_ERR(c)) { -		pr_err("Unable to get clock for APB DMA\n"); -		ret = PTR_ERR(c); -		goto fail; -	} -	ret = clk_prepare_enable(c); -	if (ret != 0) { -		pr_err("Unable to enable clock for APB DMA\n"); -		goto fail; -	} - -	addr = IO_ADDRESS(TEGRA_APB_DMA_BASE); -	writel(GEN_ENABLE, addr + APB_DMA_GEN); -	writel(0, addr + APB_DMA_CNTRL); -	writel(0xFFFFFFFFul >> (31 - TEGRA_SYSTEM_DMA_CH_MAX), -	       addr + APB_DMA_IRQ_MASK_SET); - -	for (i = TEGRA_SYSTEM_DMA_CH_MIN; i <= TEGRA_SYSTEM_DMA_CH_MAX; i++) { -		struct tegra_dma_channel *ch = &dma_channels[i]; - -		ch->id = i; -		snprintf(ch->name, TEGRA_DMA_NAME_SIZE, "dma_channel_%d", i); - -		ch->addr = IO_ADDRESS(TEGRA_APB_DMA_CH0_BASE + -			TEGRA_APB_DMA_CH0_SIZE * i); - -		spin_lock_init(&ch->lock); -		INIT_LIST_HEAD(&ch->list); - -		irq = INT_APB_DMA_CH0 + i; -		ret = request_threaded_irq(irq, dma_isr, dma_thread_fn, 0, -			dma_channels[i].name, ch); -		if (ret) { -			pr_err("Failed to register IRQ %d for DMA %d\n", -				irq, i); -			goto fail; -		} -		ch->irq = irq; - -		__clear_bit(i, channel_usage); -	} -	/* mark the shared channel allocated */ -	__set_bit(TEGRA_SYSTEM_DMA_CH_MIN, channel_usage); - -	tegra_dma_initialized = true; - -	return 0; -fail: -	writel(0, addr + APB_DMA_GEN); -	for (i = TEGRA_SYSTEM_DMA_CH_MIN; i <= TEGRA_SYSTEM_DMA_CH_MAX; i++) { -		struct tegra_dma_channel *ch = &dma_channels[i]; -		if (ch->irq) -			free_irq(ch->irq, ch); -	} -	return ret; -} -postcore_initcall(tegra_dma_init); - -#ifdef CONFIG_PM -static u32 apb_dma[5*TEGRA_SYSTEM_DMA_CH_NR + 3]; - -void tegra_dma_suspend(void) -{ -	void __iomem *addr = IO_ADDRESS(TEGRA_APB_DMA_BASE); -	u32 *ctx = apb_dma; -	int i; - -	*ctx++ = readl(addr + APB_DMA_GEN); -	*ctx++ = readl(addr + APB_DMA_CNTRL); -	*ctx++ = readl(addr + APB_DMA_IRQ_MASK); - -	for (i = 0; i < TEGRA_SYSTEM_DMA_CH_NR; i++) { -		addr = IO_ADDRESS(TEGRA_APB_DMA_CH0_BASE + -				  TEGRA_APB_DMA_CH0_SIZE * i); - -		*ctx++ = readl(addr + APB_DMA_CHAN_CSR); -		*ctx++ = readl(addr + APB_DMA_CHAN_AHB_PTR); -		*ctx++ = readl(addr + APB_DMA_CHAN_AHB_SEQ); -		*ctx++ = readl(addr + APB_DMA_CHAN_APB_PTR); -		*ctx++ = readl(addr + APB_DMA_CHAN_APB_SEQ); -	} -} - -void tegra_dma_resume(void) -{ -	void __iomem *addr = IO_ADDRESS(TEGRA_APB_DMA_BASE); -	u32 *ctx = apb_dma; -	int i; - -	writel(*ctx++, addr + APB_DMA_GEN); -	writel(*ctx++, addr + APB_DMA_CNTRL); -	writel(*ctx++, addr + APB_DMA_IRQ_MASK); - -	for (i = 0; i < TEGRA_SYSTEM_DMA_CH_NR; i++) { -		addr = IO_ADDRESS(TEGRA_APB_DMA_CH0_BASE + -				  TEGRA_APB_DMA_CH0_SIZE * i); - -		writel(*ctx++, addr + APB_DMA_CHAN_CSR); -		writel(*ctx++, addr + APB_DMA_CHAN_AHB_PTR); -		writel(*ctx++, addr + APB_DMA_CHAN_AHB_SEQ); -		writel(*ctx++, addr + APB_DMA_CHAN_APB_PTR); -		writel(*ctx++, addr + APB_DMA_CHAN_APB_SEQ); -	} -} - -#endif diff --git a/arch/arm/mach-tegra/include/mach/dma.h b/arch/arm/mach-tegra/include/mach/dma.h index 9077092812c..3081cc6dda3 100644 --- a/arch/arm/mach-tegra/include/mach/dma.h +++ b/arch/arm/mach-tegra/include/mach/dma.h @@ -51,101 +51,4 @@  #define TEGRA_DMA_REQ_SEL_OWR			25  #define TEGRA_DMA_REQ_SEL_INVALID		31 -struct tegra_dma_req; -struct tegra_dma_channel; - -enum tegra_dma_mode { -	TEGRA_DMA_SHARED = 1, -	TEGRA_DMA_MODE_CONTINOUS = 2, -	TEGRA_DMA_MODE_ONESHOT = 4, -}; - -enum tegra_dma_req_error { -	TEGRA_DMA_REQ_SUCCESS = 0, -	TEGRA_DMA_REQ_ERROR_ABORTED, -	TEGRA_DMA_REQ_INFLIGHT, -}; - -enum tegra_dma_req_buff_status { -	TEGRA_DMA_REQ_BUF_STATUS_EMPTY = 0, -	TEGRA_DMA_REQ_BUF_STATUS_HALF_FULL, -	TEGRA_DMA_REQ_BUF_STATUS_FULL, -}; - -struct tegra_dma_req { -	struct list_head node; -	unsigned int modid; -	int instance; - -	/* Called when the req is complete and from the DMA ISR context. -	 * When this is called the req structure is no longer queued by -	 * the DMA channel. -	 * -	 * State of the DMA depends on the number of req it has. If there are -	 * no DMA requests queued up, then it will STOP the DMA. It there are -	 * more requests in the DMA, then it will queue the next request. -	 */ -	void (*complete)(struct tegra_dma_req *req); - -	/*  This is a called from the DMA ISR context when the DMA is still in -	 *  progress and is actively filling same buffer. -	 * -	 *  In case of continuous mode receive, this threshold is 1/2 the buffer -	 *  size. In other cases, this will not even be called as there is no -	 *  hardware support for it. -	 * -	 * In the case of continuous mode receive, if there is next req already -	 * queued, DMA programs the HW to use that req when this req is -	 * completed. If there is no "next req" queued, then DMA ISR doesn't do -	 * anything before calling this callback. -	 * -	 *	This is mainly used by the cases, where the clients has queued -	 *	only one req and want to get some sort of DMA threshold -	 *	callback to program the next buffer. -	 * -	 */ -	void (*threshold)(struct tegra_dma_req *req); - -	/* 1 to copy to memory. -	 * 0 to copy from the memory to device FIFO */ -	int to_memory; - -	void *virt_addr; - -	unsigned long source_addr; -	unsigned long dest_addr; -	unsigned long dest_wrap; -	unsigned long source_wrap; -	unsigned long source_bus_width; -	unsigned long dest_bus_width; -	unsigned long req_sel; -	unsigned int size; - -	/* Updated by the DMA driver on the conpletion of the request. */ -	int bytes_transferred; -	int status; - -	/* DMA completion tracking information */ -	int buffer_status; - -	/* Client specific data */ -	void *dev; -}; - -int tegra_dma_enqueue_req(struct tegra_dma_channel *ch, -	struct tegra_dma_req *req); -int tegra_dma_dequeue_req(struct tegra_dma_channel *ch, -	struct tegra_dma_req *req); -void tegra_dma_dequeue(struct tegra_dma_channel *ch); -void tegra_dma_flush(struct tegra_dma_channel *ch); - -bool tegra_dma_is_req_inflight(struct tegra_dma_channel *ch, -	struct tegra_dma_req *req); -bool tegra_dma_is_empty(struct tegra_dma_channel *ch); - -struct tegra_dma_channel *tegra_dma_allocate_channel(int mode); -void tegra_dma_free_channel(struct tegra_dma_channel *ch); - -int __init tegra_dma_init(void); -  #endif diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c index 119bc52ab93..4e07eec1270 100644 --- a/arch/arm/mm/context.c +++ b/arch/arm/mm/context.c @@ -63,10 +63,11 @@ static int contextidr_notifier(struct notifier_block *unused, unsigned long cmd,  	pid = task_pid_nr(thread->task) << ASID_BITS;  	asm volatile(  	"	mrc	p15, 0, %0, c13, c0, 1\n" -	"	bfi	%1, %0, #0, %2\n" -	"	mcr	p15, 0, %1, c13, c0, 1\n" +	"	and	%0, %0, %2\n" +	"	orr	%0, %0, %1\n" +	"	mcr	p15, 0, %0, c13, c0, 1\n"  	: "=r" (contextidr), "+r" (pid) -	: "I" (ASID_BITS)); +	: "I" (~ASID_MASK));  	isb();  	return NOTIFY_OK; diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index 051204fc461..e59c4ab71bc 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -489,7 +489,7 @@ static bool __in_atomic_pool(void *start, size_t size)  	void *pool_start = pool->vaddr;  	void *pool_end = pool->vaddr + pool->size; -	if (start < pool_start || start > pool_end) +	if (start < pool_start || start >= pool_end)  		return false;  	if (end <= pool_end) diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index a7a9e41fa2c..18144e6a311 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -990,8 +990,8 @@ void __init sanity_check_meminfo(void)  		 * Check whether this memory bank would partially overlap  		 * the vmalloc area.  		 */ -		if (__va(bank->start + bank->size) > vmalloc_min || -		    __va(bank->start + bank->size) < __va(bank->start)) { +		if (__va(bank->start + bank->size - 1) >= vmalloc_min || +		    __va(bank->start + bank->size - 1) <= __va(bank->start)) {  			unsigned long newsize = vmalloc_min - __va(bank->start);  			printk(KERN_NOTICE "Truncating RAM at %.8llx-%.8llx "  			       "to -%.8llx (vmalloc region overlap).\n", diff --git a/arch/arm/plat-omap/sram.c b/arch/arm/plat-omap/sram.c index d861aa73299..28acb383e7d 100644 --- a/arch/arm/plat-omap/sram.c +++ b/arch/arm/plat-omap/sram.c @@ -67,6 +67,7 @@  static unsigned long omap_sram_start;  static void __iomem *omap_sram_base; +static unsigned long omap_sram_skip;  static unsigned long omap_sram_size;  static void __iomem *omap_sram_ceil; @@ -105,6 +106,7 @@ static int is_sram_locked(void)   */  static void __init omap_detect_sram(void)  { +	omap_sram_skip = SRAM_BOOTLOADER_SZ;  	if (cpu_class_is_omap2()) {  		if (is_sram_locked()) {  			if (cpu_is_omap34xx()) { @@ -112,6 +114,7 @@ static void __init omap_detect_sram(void)  				if ((omap_type() == OMAP2_DEVICE_TYPE_EMU) ||  				    (omap_type() == OMAP2_DEVICE_TYPE_SEC)) {  					omap_sram_size = 0x7000; /* 28K */ +					omap_sram_skip += SZ_16K;  				} else {  					omap_sram_size = 0x8000; /* 32K */  				} @@ -174,8 +177,10 @@ static void __init omap_map_sram(void)  		return;  #ifdef CONFIG_OMAP4_ERRATA_I688 +	if (cpu_is_omap44xx()) {  		omap_sram_start += PAGE_SIZE;  		omap_sram_size -= SZ_16K; +	}  #endif  	if (cpu_is_omap34xx()) {  		/* @@ -202,8 +207,8 @@ static void __init omap_map_sram(void)  	 * Looks like we need to preserve some bootloader code at the  	 * beginning of SRAM for jumping to flash for reboot to work...  	 */ -	memset_io(omap_sram_base + SRAM_BOOTLOADER_SZ, 0, -		  omap_sram_size - SRAM_BOOTLOADER_SZ); +	memset_io(omap_sram_base + omap_sram_skip, 0, +		  omap_sram_size - omap_sram_skip);  }  /* @@ -217,7 +222,7 @@ void *omap_sram_push_address(unsigned long size)  {  	unsigned long available, new_ceil = (unsigned long)omap_sram_ceil; -	available = omap_sram_ceil - (omap_sram_base + SRAM_BOOTLOADER_SZ); +	available = omap_sram_ceil - (omap_sram_base + omap_sram_skip);  	if (size > available) {  		pr_err("Not enough space in SRAM\n"); diff --git a/arch/blackfin/Kconfig b/arch/blackfin/Kconfig index f3486192063..c7092e6057c 100644 --- a/arch/blackfin/Kconfig +++ b/arch/blackfin/Kconfig @@ -38,6 +38,7 @@ config BLACKFIN  	select GENERIC_ATOMIC64  	select GENERIC_IRQ_PROBE  	select IRQ_PER_CPU if SMP +	select USE_GENERIC_SMP_HELPERS if SMP  	select HAVE_NMI_WATCHDOG if NMI_WATCHDOG  	select GENERIC_SMP_IDLE_THREAD  	select ARCH_USES_GETTIMEOFFSET if !GENERIC_CLOCKEVENTS diff --git a/arch/blackfin/Makefile b/arch/blackfin/Makefile index d3d7e64ca96..66cf00095b8 100644 --- a/arch/blackfin/Makefile +++ b/arch/blackfin/Makefile @@ -20,7 +20,6 @@ endif  KBUILD_AFLAGS           += $(call cc-option,-mno-fdpic)  KBUILD_CFLAGS_MODULE    += -mlong-calls  LDFLAGS                 += -m elf32bfin -KALLSYMS         += --symbol-prefix=_  KBUILD_DEFCONFIG := BF537-STAMP_defconfig diff --git a/arch/blackfin/include/asm/smp.h b/arch/blackfin/include/asm/smp.h index dc3d144b4bb..9631598dcc5 100644 --- a/arch/blackfin/include/asm/smp.h +++ b/arch/blackfin/include/asm/smp.h @@ -18,6 +18,8 @@  #define raw_smp_processor_id()  blackfin_core_id()  extern void bfin_relocate_coreb_l1_mem(void); +extern void arch_send_call_function_single_ipi(int cpu); +extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);  #if defined(CONFIG_SMP) && defined(CONFIG_ICACHE_FLUSH_L1)  asmlinkage void blackfin_icache_flush_range_l1(unsigned long *ptr); diff --git a/arch/blackfin/mach-common/smp.c b/arch/blackfin/mach-common/smp.c index 00bbe672b3b..a40151306b7 100644 --- a/arch/blackfin/mach-common/smp.c +++ b/arch/blackfin/mach-common/smp.c @@ -48,10 +48,13 @@ unsigned long blackfin_iflush_l1_entry[NR_CPUS];  struct blackfin_initial_pda __cpuinitdata initial_pda_coreb; -#define BFIN_IPI_TIMER	      0 -#define BFIN_IPI_RESCHEDULE   1 -#define BFIN_IPI_CALL_FUNC    2 -#define BFIN_IPI_CPU_STOP     3 +enum ipi_message_type { +	BFIN_IPI_TIMER, +	BFIN_IPI_RESCHEDULE, +	BFIN_IPI_CALL_FUNC, +	BFIN_IPI_CALL_FUNC_SINGLE, +	BFIN_IPI_CPU_STOP, +};  struct blackfin_flush_data {  	unsigned long start; @@ -60,35 +63,20 @@ struct blackfin_flush_data {  void *secondary_stack; - -struct smp_call_struct { -	void (*func)(void *info); -	void *info; -	int wait; -	cpumask_t *waitmask; -}; -  static struct blackfin_flush_data smp_flush_data;  static DEFINE_SPINLOCK(stop_lock); -struct ipi_message { -	unsigned long type; -	struct smp_call_struct call_struct; -}; -  /* A magic number - stress test shows this is safe for common cases */  #define BFIN_IPI_MSGQ_LEN 5  /* Simple FIFO buffer, overflow leads to panic */ -struct ipi_message_queue { -	spinlock_t lock; +struct ipi_data {  	unsigned long count; -	unsigned long head; /* head of the queue */ -	struct ipi_message ipi_message[BFIN_IPI_MSGQ_LEN]; +	unsigned long bits;  }; -static DEFINE_PER_CPU(struct ipi_message_queue, ipi_msg_queue); +static DEFINE_PER_CPU(struct ipi_data, bfin_ipi);  static void ipi_cpu_stop(unsigned int cpu)  { @@ -129,28 +117,6 @@ static void ipi_flush_icache(void *info)  	blackfin_icache_flush_range(fdata->start, fdata->end);  } -static void ipi_call_function(unsigned int cpu, struct ipi_message *msg) -{ -	int wait; -	void (*func)(void *info); -	void *info; -	func = msg->call_struct.func; -	info = msg->call_struct.info; -	wait = msg->call_struct.wait; -	func(info); -	if (wait) { -#ifdef __ARCH_SYNC_CORE_DCACHE -		/* -		 * 'wait' usually means synchronization between CPUs. -		 * Invalidate D cache in case shared data was changed -		 * by func() to ensure cache coherence. -		 */ -		resync_core_dcache(); -#endif -		cpumask_clear_cpu(cpu, msg->call_struct.waitmask); -	} -} -  /* Use IRQ_SUPPLE_0 to request reschedule.   * When returning from interrupt to user space,   * there is chance to reschedule */ @@ -172,152 +138,95 @@ void ipi_timer(void)  static irqreturn_t ipi_handler_int1(int irq, void *dev_instance)  { -	struct ipi_message *msg; -	struct ipi_message_queue *msg_queue; +	struct ipi_data *bfin_ipi_data;  	unsigned int cpu = smp_processor_id(); -	unsigned long flags; +	unsigned long pending; +	unsigned long msg;  	platform_clear_ipi(cpu, IRQ_SUPPLE_1); -	msg_queue = &__get_cpu_var(ipi_msg_queue); +	bfin_ipi_data = &__get_cpu_var(bfin_ipi); + +	while ((pending = xchg(&bfin_ipi_data->bits, 0)) != 0) { +		msg = 0; +		do { +			msg = find_next_bit(&pending, BITS_PER_LONG, msg + 1); +			switch (msg) { +			case BFIN_IPI_TIMER: +				ipi_timer(); +				break; +			case BFIN_IPI_RESCHEDULE: +				scheduler_ipi(); +				break; +			case BFIN_IPI_CALL_FUNC: +				generic_smp_call_function_interrupt(); +				break; + +			case BFIN_IPI_CALL_FUNC_SINGLE: +				generic_smp_call_function_single_interrupt(); +				break; -	spin_lock_irqsave(&msg_queue->lock, flags); +			case BFIN_IPI_CPU_STOP: +				ipi_cpu_stop(cpu); +				break; +			} +		} while (msg < BITS_PER_LONG); -	while (msg_queue->count) { -		msg = &msg_queue->ipi_message[msg_queue->head]; -		switch (msg->type) { -		case BFIN_IPI_TIMER: -			ipi_timer(); -			break; -		case BFIN_IPI_RESCHEDULE: -			scheduler_ipi(); -			break; -		case BFIN_IPI_CALL_FUNC: -			ipi_call_function(cpu, msg); -			break; -		case BFIN_IPI_CPU_STOP: -			ipi_cpu_stop(cpu); -			break; -		default: -			printk(KERN_CRIT "CPU%u: Unknown IPI message 0x%lx\n", -			       cpu, msg->type); -			break; -		} -		msg_queue->head++; -		msg_queue->head %= BFIN_IPI_MSGQ_LEN; -		msg_queue->count--; +		smp_mb();  	} -	spin_unlock_irqrestore(&msg_queue->lock, flags);  	return IRQ_HANDLED;  } -static void ipi_queue_init(void) +static void bfin_ipi_init(void)  {  	unsigned int cpu; -	struct ipi_message_queue *msg_queue; +	struct ipi_data *bfin_ipi_data;  	for_each_possible_cpu(cpu) { -		msg_queue = &per_cpu(ipi_msg_queue, cpu); -		spin_lock_init(&msg_queue->lock); -		msg_queue->count = 0; -		msg_queue->head = 0; +		bfin_ipi_data = &per_cpu(bfin_ipi, cpu); +		bfin_ipi_data->bits = 0; +		bfin_ipi_data->count = 0;  	}  } -static inline void smp_send_message(cpumask_t callmap, unsigned long type, -					void (*func) (void *info), void *info, int wait) +void send_ipi(const struct cpumask *cpumask, enum ipi_message_type msg)  {  	unsigned int cpu; -	struct ipi_message_queue *msg_queue; -	struct ipi_message *msg; -	unsigned long flags, next_msg; -	cpumask_t waitmask; /* waitmask is shared by all cpus */ +	struct ipi_data *bfin_ipi_data; +	unsigned long flags; -	cpumask_copy(&waitmask, &callmap); -	for_each_cpu(cpu, &callmap) { -		msg_queue = &per_cpu(ipi_msg_queue, cpu); -		spin_lock_irqsave(&msg_queue->lock, flags); -		if (msg_queue->count < BFIN_IPI_MSGQ_LEN) { -			next_msg = (msg_queue->head + msg_queue->count) -					% BFIN_IPI_MSGQ_LEN; -			msg = &msg_queue->ipi_message[next_msg]; -			msg->type = type; -			if (type == BFIN_IPI_CALL_FUNC) { -				msg->call_struct.func = func; -				msg->call_struct.info = info; -				msg->call_struct.wait = wait; -				msg->call_struct.waitmask = &waitmask; -			} -			msg_queue->count++; -		} else -			panic("IPI message queue overflow\n"); -		spin_unlock_irqrestore(&msg_queue->lock, flags); +	local_irq_save(flags); + +	for_each_cpu(cpu, cpumask) { +		bfin_ipi_data = &per_cpu(bfin_ipi, cpu); +		smp_mb(); +		set_bit(msg, &bfin_ipi_data->bits); +		bfin_ipi_data->count++;  		platform_send_ipi_cpu(cpu, IRQ_SUPPLE_1);  	} -	if (wait) { -		while (!cpumask_empty(&waitmask)) -			blackfin_dcache_invalidate_range( -				(unsigned long)(&waitmask), -				(unsigned long)(&waitmask)); -#ifdef __ARCH_SYNC_CORE_DCACHE -		/* -		 * Invalidate D cache in case shared data was changed by -		 * other processors to ensure cache coherence. -		 */ -		resync_core_dcache(); -#endif -	} +	local_irq_restore(flags);  } -int smp_call_function(void (*func)(void *info), void *info, int wait) +void arch_send_call_function_single_ipi(int cpu)  { -	cpumask_t callmap; - -	preempt_disable(); -	cpumask_copy(&callmap, cpu_online_mask); -	cpumask_clear_cpu(smp_processor_id(), &callmap); -	if (!cpumask_empty(&callmap)) -		smp_send_message(callmap, BFIN_IPI_CALL_FUNC, func, info, wait); - -	preempt_enable(); - -	return 0; +	send_ipi(cpumask_of(cpu), BFIN_IPI_CALL_FUNC_SINGLE);  } -EXPORT_SYMBOL_GPL(smp_call_function); -int smp_call_function_single(int cpuid, void (*func) (void *info), void *info, -				int wait) +void arch_send_call_function_ipi_mask(const struct cpumask *mask)  { -	unsigned int cpu = cpuid; -	cpumask_t callmap; - -	if (cpu_is_offline(cpu)) -		return 0; -	cpumask_clear(&callmap); -	cpumask_set_cpu(cpu, &callmap); - -	smp_send_message(callmap, BFIN_IPI_CALL_FUNC, func, info, wait); - -	return 0; +	send_ipi(mask, BFIN_IPI_CALL_FUNC);  } -EXPORT_SYMBOL_GPL(smp_call_function_single);  void smp_send_reschedule(int cpu)  { -	cpumask_t callmap; -	/* simply trigger an ipi */ - -	cpumask_clear(&callmap); -	cpumask_set_cpu(cpu, &callmap); - -	smp_send_message(callmap, BFIN_IPI_RESCHEDULE, NULL, NULL, 0); +	send_ipi(cpumask_of(cpu), BFIN_IPI_RESCHEDULE);  	return;  }  void smp_send_msg(const struct cpumask *mask, unsigned long type)  { -	smp_send_message(*mask, type, NULL, NULL, 0); +	send_ipi(mask, type);  }  void smp_timer_broadcast(const struct cpumask *mask) @@ -333,7 +242,7 @@ void smp_send_stop(void)  	cpumask_copy(&callmap, cpu_online_mask);  	cpumask_clear_cpu(smp_processor_id(), &callmap);  	if (!cpumask_empty(&callmap)) -		smp_send_message(callmap, BFIN_IPI_CPU_STOP, NULL, NULL, 0); +		send_ipi(&callmap, BFIN_IPI_CPU_STOP);  	preempt_enable(); @@ -436,7 +345,7 @@ void __init smp_prepare_boot_cpu(void)  void __init smp_prepare_cpus(unsigned int max_cpus)  {  	platform_prepare_cpus(max_cpus); -	ipi_queue_init(); +	bfin_ipi_init();  	platform_request_ipi(IRQ_SUPPLE_0, ipi_handler_int0);  	platform_request_ipi(IRQ_SUPPLE_1, ipi_handler_int1);  } diff --git a/arch/s390/oprofile/init.c b/arch/s390/oprofile/init.c index a1e9d69a9c9..584b93674ea 100644 --- a/arch/s390/oprofile/init.c +++ b/arch/s390/oprofile/init.c @@ -169,7 +169,7 @@ static ssize_t hw_interval_write(struct file *file, char const __user *buf,  	if (*offset)  		return -EINVAL;  	retval = oprofilefs_ulong_from_user(&val, buf, count); -	if (retval) +	if (retval <= 0)  		return retval;  	if (val < oprofile_min_interval)  		oprofile_hw_interval = oprofile_min_interval; @@ -212,7 +212,7 @@ static ssize_t hwsampler_zero_write(struct file *file, char const __user *buf,  		return -EINVAL;  	retval = oprofilefs_ulong_from_user(&val, buf, count); -	if (retval) +	if (retval <= 0)  		return retval;  	if (val != 0)  		return -EINVAL; @@ -243,7 +243,7 @@ static ssize_t hwsampler_kernel_write(struct file *file, char const __user *buf,  		return -EINVAL;  	retval = oprofilefs_ulong_from_user(&val, buf, count); -	if (retval) +	if (retval <= 0)  		return retval;  	if (val != 0 && val != 1) @@ -278,7 +278,7 @@ static ssize_t hwsampler_user_write(struct file *file, char const __user *buf,  		return -EINVAL;  	retval = oprofilefs_ulong_from_user(&val, buf, count); -	if (retval) +	if (retval <= 0)  		return retval;  	if (val != 0 && val != 1) @@ -317,7 +317,7 @@ static ssize_t timer_enabled_write(struct file *file, char const __user *buf,  		return -EINVAL;  	retval = oprofilefs_ulong_from_user(&val, buf, count); -	if (retval) +	if (retval <= 0)  		return retval;  	if (val != 0 && val != 1) diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c index 7f2739e03e7..0d3d63afa76 100644 --- a/arch/x86/kernel/cpu/perf_event_intel.c +++ b/arch/x86/kernel/cpu/perf_event_intel.c @@ -2008,6 +2008,7 @@ __init int intel_pmu_init(void)  		break;  	case 28: /* Atom */ +	case 54: /* Cedariew */  		memcpy(hw_cache_event_ids, atom_hw_cache_event_ids,  		       sizeof(hw_cache_event_ids)); diff --git a/arch/x86/kernel/cpu/perf_event_intel_lbr.c b/arch/x86/kernel/cpu/perf_event_intel_lbr.c index 520b4265fcd..da02e9cc375 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_lbr.c +++ b/arch/x86/kernel/cpu/perf_event_intel_lbr.c @@ -686,7 +686,8 @@ void intel_pmu_lbr_init_atom(void)  	 * to have an operational LBR which can freeze  	 * on PMU interrupt  	 */ -	if (boot_cpu_data.x86_mask < 10) { +	if (boot_cpu_data.x86_model == 28 +	    && boot_cpu_data.x86_mask < 10) {  		pr_cont("LBR disabled due to erratum");  		return;  	} diff --git a/arch/x86/kernel/microcode_core.c b/arch/x86/kernel/microcode_core.c index 4873e62db6a..9e5bcf1e237 100644 --- a/arch/x86/kernel/microcode_core.c +++ b/arch/x86/kernel/microcode_core.c @@ -225,6 +225,9 @@ static ssize_t microcode_write(struct file *file, const char __user *buf,  	if (do_microcode_update(buf, len) == 0)  		ret = (ssize_t)len; +	if (ret > 0) +		perf_check_microcode(); +  	mutex_unlock(µcode_mutex);  	put_online_cpus(); diff --git a/arch/x86/kvm/i8259.c b/arch/x86/kvm/i8259.c index e498b18f010..9fc9aa7ac70 100644 --- a/arch/x86/kvm/i8259.c +++ b/arch/x86/kvm/i8259.c @@ -318,7 +318,7 @@ static void pic_ioport_write(void *opaque, u32 addr, u32 val)  		if (val & 0x10) {  			u8 edge_irr = s->irr & ~s->elcr;  			int i; -			bool found; +			bool found = false;  			struct kvm_vcpu *vcpu;  			s->init4 = val & 1; diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index c00f03de1b7..b1eb202ee76 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -3619,6 +3619,7 @@ static void seg_setup(int seg)  static int alloc_apic_access_page(struct kvm *kvm)  { +	struct page *page;  	struct kvm_userspace_memory_region kvm_userspace_mem;  	int r = 0; @@ -3633,7 +3634,13 @@ static int alloc_apic_access_page(struct kvm *kvm)  	if (r)  		goto out; -	kvm->arch.apic_access_page = gfn_to_page(kvm, 0xfee00); +	page = gfn_to_page(kvm, 0xfee00); +	if (is_error_page(page)) { +		r = -EFAULT; +		goto out; +	} + +	kvm->arch.apic_access_page = page;  out:  	mutex_unlock(&kvm->slots_lock);  	return r; @@ -3641,6 +3648,7 @@ out:  static int alloc_identity_pagetable(struct kvm *kvm)  { +	struct page *page;  	struct kvm_userspace_memory_region kvm_userspace_mem;  	int r = 0; @@ -3656,8 +3664,13 @@ static int alloc_identity_pagetable(struct kvm *kvm)  	if (r)  		goto out; -	kvm->arch.ept_identity_pagetable = gfn_to_page(kvm, -			kvm->arch.ept_identity_map_addr >> PAGE_SHIFT); +	page = gfn_to_page(kvm, kvm->arch.ept_identity_map_addr >> PAGE_SHIFT); +	if (is_error_page(page)) { +		r = -EFAULT; +		goto out; +	} + +	kvm->arch.ept_identity_pagetable = page;  out:  	mutex_unlock(&kvm->slots_lock);  	return r; @@ -6575,7 +6588,7 @@ static void vmx_cpuid_update(struct kvm_vcpu *vcpu)  	/* Exposing INVPCID only when PCID is exposed */  	best = kvm_find_cpuid_entry(vcpu, 0x7, 0);  	if (vmx_invpcid_supported() && -	    best && (best->ecx & bit(X86_FEATURE_INVPCID)) && +	    best && (best->ebx & bit(X86_FEATURE_INVPCID)) &&  	    guest_cpuid_has_pcid(vcpu)) {  		exec_control |= SECONDARY_EXEC_ENABLE_INVPCID;  		vmcs_write32(SECONDARY_VM_EXEC_CONTROL, @@ -6585,7 +6598,7 @@ static void vmx_cpuid_update(struct kvm_vcpu *vcpu)  		vmcs_write32(SECONDARY_VM_EXEC_CONTROL,  			     exec_control);  		if (best) -			best->ecx &= ~bit(X86_FEATURE_INVPCID); +			best->ebx &= ~bit(X86_FEATURE_INVPCID);  	}  } diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 148ed666e31..2966c847d48 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -5113,17 +5113,20 @@ static void post_kvm_run_save(struct kvm_vcpu *vcpu)  			!kvm_event_needs_reinjection(vcpu);  } -static void vapic_enter(struct kvm_vcpu *vcpu) +static int vapic_enter(struct kvm_vcpu *vcpu)  {  	struct kvm_lapic *apic = vcpu->arch.apic;  	struct page *page;  	if (!apic || !apic->vapic_addr) -		return; +		return 0;  	page = gfn_to_page(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT); +	if (is_error_page(page)) +		return -EFAULT;  	vcpu->arch.apic->vapic_page = page; +	return 0;  }  static void vapic_exit(struct kvm_vcpu *vcpu) @@ -5430,7 +5433,11 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)  	}  	vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); -	vapic_enter(vcpu); +	r = vapic_enter(vcpu); +	if (r) { +		srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); +		return r; +	}  	r = 1;  	while (r > 0) {  |