diff options
268 files changed, 3090 insertions, 2963 deletions
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index 363e348bff9..6c723811c0a 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt @@ -2438,7 +2438,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted.  			real-time workloads.  It can also improve energy  			efficiency for asymmetric multiprocessors. -	rcu_nocbs_poll	[KNL,BOOT] +	rcu_nocb_poll	[KNL,BOOT]  			Rather than requiring that offloaded CPUs  			(specified by rcu_nocbs= above) explicitly  			awaken the corresponding "rcuoN" kthreads, diff --git a/Documentation/x86/boot.txt b/Documentation/x86/boot.txt index 3edb4c2887a..e540fd67f76 100644 --- a/Documentation/x86/boot.txt +++ b/Documentation/x86/boot.txt @@ -57,7 +57,7 @@ Protocol 2.10:	(Kernel 2.6.31) Added a protocol for relaxed alignment  Protocol 2.11:	(Kernel 3.6) Added a field for offset of EFI handover  		protocol entry point. -Protocol 2.12:	(Kernel 3.9) Added the xloadflags field and extension fields +Protocol 2.12:	(Kernel 3.8) Added the xloadflags field and extension fields  	 	to struct boot_params for for loading bzImage and ramdisk  		above 4G in 64bit. diff --git a/MAINTAINERS b/MAINTAINERS index 212c255b934..35a56bcd5e7 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -1489,7 +1489,7 @@ AVR32 ARCHITECTURE  M:	Haavard Skinnemoen <hskinnemoen@gmail.com>  M:	Hans-Christian Egtvedt <egtvedt@samfundet.no>  W:	http://www.atmel.com/products/AVR32/ -W:	http://avr32linux.org/ +W:	http://mirror.egtvedt.no/avr32linux.org/  W:	http://avrfreaks.net/  S:	Maintained  F:	arch/avr32/ @@ -1,7 +1,7 @@  VERSION = 3  PATCHLEVEL = 8  SUBLEVEL = 0 -EXTRAVERSION = -rc6 +EXTRAVERSION = -rc7  NAME = Unicycling Gorilla  # *DOCUMENTATION* diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 67874b82a4e..c407edb43fe 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -1650,7 +1650,6 @@ config HZ  	int  	default 200 if ARCH_EBSA110 || ARCH_S3C24XX || ARCH_S5P64X0 || \  		ARCH_S5PV210 || ARCH_EXYNOS4 -	default OMAP_32K_TIMER_HZ if ARCH_OMAP && OMAP_32K_TIMER  	default AT91_TIMER_HZ if ARCH_AT91  	default SHMOBILE_TIMER_HZ if ARCH_SHMOBILE  	default 100 diff --git a/arch/arm/boot/dts/am33xx.dtsi b/arch/arm/boot/dts/am33xx.dtsi index c2f14e875eb..0957645b73a 100644 --- a/arch/arm/boot/dts/am33xx.dtsi +++ b/arch/arm/boot/dts/am33xx.dtsi @@ -385,5 +385,19 @@  				mac-address = [ 00 00 00 00 00 00 ];  			};  		}; + +		ocmcram: ocmcram@40300000 { +			compatible = "ti,am3352-ocmcram"; +			reg = <0x40300000 0x10000>; +			ti,hwmods = "ocmcram"; +			ti,no_idle_on_suspend; +		}; + +		wkup_m3: wkup_m3@44d00000 { +			compatible = "ti,am3353-wkup-m3"; +			reg = <0x44d00000 0x4000	/* M3 UMEM */ +			       0x44d80000 0x2000>;	/* M3 DMEM */ +			ti,hwmods = "wkup_m3"; +		};  	};  }; diff --git a/arch/arm/common/gic.c b/arch/arm/common/gic.c index 36ae03a3f5d..87dfa9026c5 100644 --- a/arch/arm/common/gic.c +++ b/arch/arm/common/gic.c @@ -351,6 +351,25 @@ void __init gic_cascade_irq(unsigned int gic_nr, unsigned int irq)  	irq_set_chained_handler(irq, gic_handle_cascade_irq);  } +static u8 gic_get_cpumask(struct gic_chip_data *gic) +{ +	void __iomem *base = gic_data_dist_base(gic); +	u32 mask, i; + +	for (i = mask = 0; i < 32; i += 4) { +		mask = readl_relaxed(base + GIC_DIST_TARGET + i); +		mask |= mask >> 16; +		mask |= mask >> 8; +		if (mask) +			break; +	} + +	if (!mask) +		pr_crit("GIC CPU mask not found - kernel will fail to boot.\n"); + +	return mask; +} +  static void __init gic_dist_init(struct gic_chip_data *gic)  {  	unsigned int i; @@ -369,7 +388,9 @@ static void __init gic_dist_init(struct gic_chip_data *gic)  	/*  	 * Set all global interrupts to this CPU only.  	 */ -	cpumask = readl_relaxed(base + GIC_DIST_TARGET + 0); +	cpumask = gic_get_cpumask(gic); +	cpumask |= cpumask << 8; +	cpumask |= cpumask << 16;  	for (i = 32; i < gic_irqs; i += 4)  		writel_relaxed(cpumask, base + GIC_DIST_TARGET + i * 4 / 4); @@ -400,7 +421,7 @@ static void __cpuinit gic_cpu_init(struct gic_chip_data *gic)  	 * Get what the GIC says our CPU mask is.  	 */  	BUG_ON(cpu >= NR_GIC_CPU_IF); -	cpu_mask = readl_relaxed(dist_base + GIC_DIST_TARGET + 0); +	cpu_mask = gic_get_cpumask(gic);  	gic_cpu_map[cpu] = cpu_mask;  	/* diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h index 73cf03aa981..1c4df27f933 100644 --- a/arch/arm/include/asm/memory.h +++ b/arch/arm/include/asm/memory.h @@ -37,7 +37,7 @@   */  #define PAGE_OFFSET		UL(CONFIG_PAGE_OFFSET)  #define TASK_SIZE		(UL(CONFIG_PAGE_OFFSET) - UL(0x01000000)) -#define TASK_UNMAPPED_BASE	(UL(CONFIG_PAGE_OFFSET) / 3) +#define TASK_UNMAPPED_BASE	ALIGN(TASK_SIZE / 3, SZ_16M)  /*   * The maximum size of a 26-bit user space task. diff --git a/arch/arm/mach-exynos/Kconfig b/arch/arm/mach-exynos/Kconfig index e103c290bc9..85afb031b67 100644 --- a/arch/arm/mach-exynos/Kconfig +++ b/arch/arm/mach-exynos/Kconfig @@ -414,7 +414,7 @@ config MACH_EXYNOS4_DT  	select CPU_EXYNOS4210  	select HAVE_SAMSUNG_KEYPAD if INPUT_KEYBOARD  	select PINCTRL -	select PINCTRL_EXYNOS4 +	select PINCTRL_EXYNOS  	select USE_OF  	help  	  Machine support for Samsung Exynos4 machine with device tree enabled. diff --git a/arch/arm/mach-omap2/Makefile b/arch/arm/mach-omap2/Makefile index d88788facf5..9a14ae52731 100644 --- a/arch/arm/mach-omap2/Makefile +++ b/arch/arm/mach-omap2/Makefile @@ -53,6 +53,7 @@ AFLAGS_sram34xx.o			:=-Wa,-march=armv7-a  # Restart code (OMAP4/5 currently in omap4-common.c)  obj-$(CONFIG_SOC_OMAP2420)		+= omap2-restart.o  obj-$(CONFIG_SOC_OMAP2430)		+= omap2-restart.o +obj-$(CONFIG_SOC_AM33XX)		+= am33xx-restart.o  obj-$(CONFIG_ARCH_OMAP3)		+= omap3-restart.o  # Pin multiplexing diff --git a/arch/arm/mach-omap2/am33xx-restart.c b/arch/arm/mach-omap2/am33xx-restart.c new file mode 100644 index 00000000000..88e4fa8af03 --- /dev/null +++ b/arch/arm/mach-omap2/am33xx-restart.c @@ -0,0 +1,34 @@ +/* + * am33xx-restart.c - Code common to all AM33xx machines. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include <linux/kernel.h> + +#include "common.h" +#include "prm-regbits-33xx.h" +#include "prm33xx.h" + +/** + * am3xx_restart - trigger a software restart of the SoC + * @mode: the "reboot mode", see arch/arm/kernel/{setup,process}.c + * @cmd: passed from the userspace program rebooting the system (if provided) + * + * Resets the SoC.  For @cmd, see the 'reboot' syscall in + * kernel/sys.c.  No return value. + */ +void am33xx_restart(char mode, const char *cmd) +{ +	/* TODO: Handle mode and cmd if necessary */ + +	am33xx_prm_rmw_reg_bits(AM33XX_GLOBAL_WARM_SW_RST_MASK, +				AM33XX_GLOBAL_WARM_SW_RST_MASK, +				AM33XX_PRM_DEVICE_MOD, +				AM33XX_PRM_RSTCTRL_OFFSET); + +	/* OCP barrier */ +	(void)am33xx_prm_read_reg(AM33XX_PRM_DEVICE_MOD, +				  AM33XX_PRM_RSTCTRL_OFFSET); +} diff --git a/arch/arm/mach-omap2/am35xx-emac.c b/arch/arm/mach-omap2/am35xx-emac.c index af11dcdb7e2..25b79a29736 100644 --- a/arch/arm/mach-omap2/am35xx-emac.c +++ b/arch/arm/mach-omap2/am35xx-emac.c @@ -62,8 +62,7 @@ static int __init omap_davinci_emac_dev_init(struct omap_hwmod *oh,  {  	struct platform_device *pdev; -	pdev = omap_device_build(oh->class->name, 0, oh, pdata, pdata_len, -				 NULL, 0, false); +	pdev = omap_device_build(oh->class->name, 0, oh, pdata, pdata_len);  	if (IS_ERR(pdev)) {  		WARN(1, "Can't build omap_device for %s:%s.\n",  		     oh->class->name, oh->name); diff --git a/arch/arm/mach-omap2/board-generic.c b/arch/arm/mach-omap2/board-generic.c index 53cb380b787..fac00f0960b 100644 --- a/arch/arm/mach-omap2/board-generic.c +++ b/arch/arm/mach-omap2/board-generic.c @@ -141,6 +141,7 @@ DT_MACHINE_START(AM33XX_DT, "Generic AM33XX (Flattened Device Tree)")  	.init_machine	= omap_generic_init,  	.timer		= &omap3_am33xx_timer,  	.dt_compat	= am33xx_boards_compat, +	.restart	= am33xx_restart,  MACHINE_END  #endif diff --git a/arch/arm/mach-omap2/cclock2420_data.c b/arch/arm/mach-omap2/cclock2420_data.c index ab7e952d207..0f0a97c1fcc 100644 --- a/arch/arm/mach-omap2/cclock2420_data.c +++ b/arch/arm/mach-omap2/cclock2420_data.c @@ -622,15 +622,10 @@ static struct clk_hw_omap gpios_fck_hw = {  DEFINE_STRUCT_CLK(gpios_fck, gpios_fck_parent_names, aes_ick_ops); -static struct clk wu_l4_ick; - -DEFINE_STRUCT_CLK_HW_OMAP(wu_l4_ick, "wkup_clkdm"); -DEFINE_STRUCT_CLK(wu_l4_ick, dpll_ck_parent_names, core_ck_ops); -  static struct clk gpios_ick;  static const char *gpios_ick_parent_names[] = { -	"wu_l4_ick", +	"sys_ck",  };  static struct clk_hw_omap gpios_ick_hw = { @@ -1682,13 +1677,6 @@ static struct clk_hw_omap wdt1_ick_hw = {  DEFINE_STRUCT_CLK(wdt1_ick, gpios_ick_parent_names, aes_ick_ops); -static struct clk wdt1_osc_ck; - -static const struct clk_ops wdt1_osc_ck_ops = {}; - -DEFINE_STRUCT_CLK_HW_OMAP(wdt1_osc_ck, NULL); -DEFINE_STRUCT_CLK(wdt1_osc_ck, sys_ck_parent_names, wdt1_osc_ck_ops); -  static struct clk wdt3_fck;  static struct clk_hw_omap wdt3_fck_hw = { @@ -1767,7 +1755,6 @@ static struct omap_clk omap2420_clks[] = {  	CLK(NULL,	"func_96m_ck",	&func_96m_ck,	CK_242X),  	CLK(NULL,	"func_48m_ck",	&func_48m_ck,	CK_242X),  	CLK(NULL,	"func_12m_ck",	&func_12m_ck,	CK_242X), -	CLK(NULL,	"ck_wdt1_osc",	&wdt1_osc_ck,	CK_242X),  	CLK(NULL,	"sys_clkout_src", &sys_clkout_src, CK_242X),  	CLK(NULL,	"sys_clkout",	&sys_clkout,	CK_242X),  	CLK(NULL,	"sys_clkout2_src", &sys_clkout2_src, CK_242X), @@ -1797,7 +1784,6 @@ static struct omap_clk omap2420_clks[] = {  	/* L4 domain clocks */  	CLK(NULL,	"l4_ck",	&l4_ck,		CK_242X),  	CLK(NULL,	"ssi_l4_ick",	&ssi_l4_ick,	CK_242X), -	CLK(NULL,	"wu_l4_ick",	&wu_l4_ick,	CK_242X),  	/* virtual meta-group clock */  	CLK(NULL,	"virt_prcm_set", &virt_prcm_set, CK_242X),  	/* general l4 interface ck, multi-parent functional clk */ diff --git a/arch/arm/mach-omap2/cclock2430_data.c b/arch/arm/mach-omap2/cclock2430_data.c index eb3dab68d53..aed8f74ca07 100644 --- a/arch/arm/mach-omap2/cclock2430_data.c +++ b/arch/arm/mach-omap2/cclock2430_data.c @@ -601,15 +601,10 @@ static struct clk_hw_omap gpios_fck_hw = {  DEFINE_STRUCT_CLK(gpios_fck, gpio5_fck_parent_names, aes_ick_ops); -static struct clk wu_l4_ick; - -DEFINE_STRUCT_CLK_HW_OMAP(wu_l4_ick, "wkup_clkdm"); -DEFINE_STRUCT_CLK(wu_l4_ick, dpll_ck_parent_names, core_ck_ops); -  static struct clk gpios_ick;  static const char *gpios_ick_parent_names[] = { -	"wu_l4_ick", +	"sys_ck",  };  static struct clk_hw_omap gpios_ick_hw = { @@ -1811,13 +1806,6 @@ static struct clk_hw_omap wdt1_ick_hw = {  DEFINE_STRUCT_CLK(wdt1_ick, gpios_ick_parent_names, aes_ick_ops); -static struct clk wdt1_osc_ck; - -static const struct clk_ops wdt1_osc_ck_ops = {}; - -DEFINE_STRUCT_CLK_HW_OMAP(wdt1_osc_ck, NULL); -DEFINE_STRUCT_CLK(wdt1_osc_ck, sys_ck_parent_names, wdt1_osc_ck_ops); -  static struct clk wdt4_fck;  static struct clk_hw_omap wdt4_fck_hw = { @@ -1869,7 +1857,6 @@ static struct omap_clk omap2430_clks[] = {  	CLK(NULL,	"func_96m_ck",	&func_96m_ck,	CK_243X),  	CLK(NULL,	"func_48m_ck",	&func_48m_ck,	CK_243X),  	CLK(NULL,	"func_12m_ck",	&func_12m_ck,	CK_243X), -	CLK(NULL,	"ck_wdt1_osc",	&wdt1_osc_ck,	CK_243X),  	CLK(NULL,	"sys_clkout_src", &sys_clkout_src, CK_243X),  	CLK(NULL,	"sys_clkout",	&sys_clkout,	CK_243X),  	CLK(NULL,	"emul_ck",	&emul_ck,	CK_243X), @@ -1898,7 +1885,6 @@ static struct omap_clk omap2430_clks[] = {  	/* L4 domain clocks */  	CLK(NULL,	"l4_ck",	&l4_ck,		CK_243X),  	CLK(NULL,	"ssi_l4_ick",	&ssi_l4_ick,	CK_243X), -	CLK(NULL,	"wu_l4_ick",	&wu_l4_ick,	CK_243X),  	/* virtual meta-group clock */  	CLK(NULL,	"virt_prcm_set", &virt_prcm_set, CK_243X),  	/* general l4 interface ck, multi-parent functional clk */ diff --git a/arch/arm/mach-omap2/cclock33xx_data.c b/arch/arm/mach-omap2/cclock33xx_data.c index ea64ad60675..476b82066cb 100644 --- a/arch/arm/mach-omap2/cclock33xx_data.c +++ b/arch/arm/mach-omap2/cclock33xx_data.c @@ -284,9 +284,10 @@ DEFINE_STRUCT_CLK(dpll_disp_ck, dpll_core_ck_parents, dpll_ddr_ck_ops);   * TODO: Add clksel here (sys_clkin, CORE_CLKOUTM6, PER_CLKOUTM2   * and ALT_CLK1/2)   */ -DEFINE_CLK_DIVIDER(dpll_disp_m2_ck, "dpll_disp_ck", &dpll_disp_ck, 0x0, -		   AM33XX_CM_DIV_M2_DPLL_DISP, AM33XX_DPLL_CLKOUT_DIV_SHIFT, -		   AM33XX_DPLL_CLKOUT_DIV_WIDTH, CLK_DIVIDER_ONE_BASED, NULL); +DEFINE_CLK_DIVIDER(dpll_disp_m2_ck, "dpll_disp_ck", &dpll_disp_ck, +		   CLK_SET_RATE_PARENT, AM33XX_CM_DIV_M2_DPLL_DISP, +		   AM33XX_DPLL_CLKOUT_DIV_SHIFT, AM33XX_DPLL_CLKOUT_DIV_WIDTH, +		   CLK_DIVIDER_ONE_BASED, NULL);  /* DPLL_PER */  static struct dpll_data dpll_per_dd = { @@ -723,7 +724,8 @@ static struct clk_hw_omap lcd_gclk_hw = {  	.clksel_mask	= AM33XX_CLKSEL_0_1_MASK,  }; -DEFINE_STRUCT_CLK(lcd_gclk, lcd_ck_parents, gpio_fck_ops); +DEFINE_STRUCT_CLK_FLAGS(lcd_gclk, lcd_ck_parents, +			gpio_fck_ops, CLK_SET_RATE_PARENT);  DEFINE_CLK_FIXED_FACTOR(mmc_clk, "dpll_per_m2_ck", &dpll_per_m2_ck, 0x0, 1, 2); diff --git a/arch/arm/mach-omap2/cclock3xxx_data.c b/arch/arm/mach-omap2/cclock3xxx_data.c index 6ef87580c33..4579c3c5338 100644 --- a/arch/arm/mach-omap2/cclock3xxx_data.c +++ b/arch/arm/mach-omap2/cclock3xxx_data.c @@ -426,6 +426,7 @@ static struct clk dpll4_m5x2_ck_3630 = {  	.parent_names	= dpll4_m5x2_ck_parent_names,  	.num_parents	= ARRAY_SIZE(dpll4_m5x2_ck_parent_names),  	.ops		= &dpll4_m5x2_ck_3630_ops, +	.flags		= CLK_SET_RATE_PARENT,  };  static struct clk cam_mclk; @@ -443,7 +444,14 @@ static struct clk_hw_omap cam_mclk_hw = {  	.clkdm_name	= "cam_clkdm",  }; -DEFINE_STRUCT_CLK(cam_mclk, cam_mclk_parent_names, aes2_ick_ops); +static struct clk cam_mclk = { +	.name		= "cam_mclk", +	.hw		= &cam_mclk_hw.hw, +	.parent_names	= cam_mclk_parent_names, +	.num_parents	= ARRAY_SIZE(cam_mclk_parent_names), +	.ops		= &aes2_ick_ops, +	.flags		= CLK_SET_RATE_PARENT, +};  static const struct clksel_rate clkout2_src_core_rates[] = {  	{ .div = 1, .val = 0, .flags = RATE_IN_3XXX }, diff --git a/arch/arm/mach-omap2/cclock44xx_data.c b/arch/arm/mach-omap2/cclock44xx_data.c index a2cc046b47f..3d58f335f17 100644 --- a/arch/arm/mach-omap2/cclock44xx_data.c +++ b/arch/arm/mach-omap2/cclock44xx_data.c @@ -16,6 +16,10 @@   * XXX Some of the ES1 clocks have been removed/changed; once support   * is added for discriminating clocks by ES level, these should be added back   * in. + * + * XXX All of the remaining MODULEMODE clock nodes should be removed + * once the drivers are updated to use pm_runtime or to use the appropriate + * upstream clock node for rate/parent selection.   */  #include <linux/kernel.h> @@ -315,7 +319,7 @@ DEFINE_CLK_DIVIDER(dpll_abe_m2_ck, "dpll_abe_ck", &dpll_abe_ck, 0x0,  		   OMAP4430_CM_DIV_M2_DPLL_ABE, OMAP4430_DPLL_CLKOUT_DIV_SHIFT,  		   OMAP4430_DPLL_CLKOUT_DIV_WIDTH, CLK_DIVIDER_ONE_BASED, NULL); -static const struct clk_ops dmic_fck_ops = { +static const struct clk_ops dpll_hsd_ops = {  	.enable		= &omap2_dflt_clk_enable,  	.disable	= &omap2_dflt_clk_disable,  	.is_enabled	= &omap2_dflt_clk_is_enabled, @@ -325,6 +329,12 @@ static const struct clk_ops dmic_fck_ops = {  	.init		= &omap2_init_clk_clkdm,  }; +static const struct clk_ops func_dmic_abe_gfclk_ops = { +	.recalc_rate	= &omap2_clksel_recalc, +	.get_parent	= &omap2_clksel_find_parent_index, +	.set_parent	= &omap2_clksel_set_parent, +}; +  static const char *dpll_core_m3x2_ck_parents[] = {  	"dpll_core_x2_ck",  }; @@ -340,7 +350,7 @@ DEFINE_CLK_OMAP_MUX_GATE(dpll_core_m3x2_ck, NULL, dpll_core_m3x2_div,  			 OMAP4430_DPLL_CLKOUTHIF_DIV_MASK,  			 OMAP4430_CM_DIV_M3_DPLL_CORE,  			 OMAP4430_DPLL_CLKOUTHIF_GATE_CTRL_SHIFT, NULL, -			 dpll_core_m3x2_ck_parents, dmic_fck_ops); +			 dpll_core_m3x2_ck_parents, dpll_hsd_ops);  DEFINE_CLK_OMAP_HSDIVIDER(dpll_core_m7x2_ck, "dpll_core_x2_ck",  			  &dpll_core_x2_ck, 0x0, OMAP4430_CM_DIV_M7_DPLL_CORE, @@ -547,7 +557,7 @@ DEFINE_CLK_OMAP_MUX_GATE(dpll_per_m3x2_ck, NULL, dpll_per_m3x2_div,  			 OMAP4430_DPLL_CLKOUTHIF_DIV_MASK,  			 OMAP4430_CM_DIV_M3_DPLL_PER,  			 OMAP4430_DPLL_CLKOUTHIF_GATE_CTRL_SHIFT, NULL, -			 dpll_per_m3x2_ck_parents, dmic_fck_ops); +			 dpll_per_m3x2_ck_parents, dpll_hsd_ops);  DEFINE_CLK_OMAP_HSDIVIDER(dpll_per_m4x2_ck, "dpll_per_x2_ck", &dpll_per_x2_ck,  			  0x0, OMAP4430_CM_DIV_M4_DPLL_PER, @@ -595,15 +605,26 @@ static const char *dpll_usb_ck_parents[] = {  static struct clk dpll_usb_ck; +static const struct clk_ops dpll_usb_ck_ops = { +	.enable		= &omap3_noncore_dpll_enable, +	.disable	= &omap3_noncore_dpll_disable, +	.recalc_rate	= &omap3_dpll_recalc, +	.round_rate	= &omap2_dpll_round_rate, +	.set_rate	= &omap3_noncore_dpll_set_rate, +	.get_parent	= &omap2_init_dpll_parent, +	.init		= &omap2_init_clk_clkdm, +}; +  static struct clk_hw_omap dpll_usb_ck_hw = {  	.hw = {  		.clk = &dpll_usb_ck,  	},  	.dpll_data	= &dpll_usb_dd, +	.clkdm_name	= "l3_init_clkdm",  	.ops		= &clkhwops_omap3_dpll,  }; -DEFINE_STRUCT_CLK(dpll_usb_ck, dpll_usb_ck_parents, dpll_ck_ops); +DEFINE_STRUCT_CLK(dpll_usb_ck, dpll_usb_ck_parents, dpll_usb_ck_ops);  static const char *dpll_usb_clkdcoldo_ck_parents[] = {  	"dpll_usb_ck", @@ -749,10 +770,6 @@ DEFINE_CLK_GATE(aes2_fck, "l3_div_ck", &l3_div_ck, 0x0,  		OMAP4430_CM_L4SEC_AES2_CLKCTRL,  		OMAP4430_MODULEMODE_SWCTRL_SHIFT, 0x0, NULL); -DEFINE_CLK_GATE(aess_fck, "aess_fclk", &aess_fclk, 0x0, -		OMAP4430_CM1_ABE_AESS_CLKCTRL, OMAP4430_MODULEMODE_SWCTRL_SHIFT, -		0x0, NULL); -  DEFINE_CLK_GATE(bandgap_fclk, "sys_32k_ck", &sys_32k_ck, 0x0,  		OMAP4430_CM_WKUP_BANDGAP_CLKCTRL,  		OMAP4430_OPTFCLKEN_BGAP_32K_SHIFT, 0x0, NULL); @@ -774,11 +791,6 @@ DEFINE_CLK_GATE(bandgap_ts_fclk, "div_ts_ck", &div_ts_ck, 0x0,  		OMAP4460_OPTFCLKEN_TS_FCLK_SHIFT,  		0x0, NULL); -DEFINE_CLK_GATE(des3des_fck, "l4_div_ck", &l4_div_ck, 0x0, -		OMAP4430_CM_L4SEC_DES3DES_CLKCTRL, -		OMAP4430_MODULEMODE_SWCTRL_SHIFT, -		0x0, NULL); -  static const char *dmic_sync_mux_ck_parents[] = {  	"abe_24m_fclk", "syc_clk_div_ck", "func_24m_clk",  }; @@ -795,23 +807,13 @@ static const struct clksel func_dmic_abe_gfclk_sel[] = {  	{ .parent = NULL },  }; -static const char *dmic_fck_parents[] = { +static const char *func_dmic_abe_gfclk_parents[] = {  	"dmic_sync_mux_ck", "pad_clks_ck", "slimbus_clk",  }; -/* Merged func_dmic_abe_gfclk into dmic */ -static struct clk dmic_fck; - -DEFINE_CLK_OMAP_MUX_GATE(dmic_fck, "abe_clkdm", func_dmic_abe_gfclk_sel, -			 OMAP4430_CM1_ABE_DMIC_CLKCTRL, -			 OMAP4430_CLKSEL_SOURCE_MASK, -			 OMAP4430_CM1_ABE_DMIC_CLKCTRL, -			 OMAP4430_MODULEMODE_SWCTRL_SHIFT, NULL, -			 dmic_fck_parents, dmic_fck_ops); - -DEFINE_CLK_GATE(dsp_fck, "dpll_iva_m4x2_ck", &dpll_iva_m4x2_ck, 0x0, -		OMAP4430_CM_TESLA_TESLA_CLKCTRL, -		OMAP4430_MODULEMODE_HWCTRL_SHIFT, 0x0, NULL); +DEFINE_CLK_OMAP_MUX(func_dmic_abe_gfclk, "abe_clkdm", func_dmic_abe_gfclk_sel, +		    OMAP4430_CM1_ABE_DMIC_CLKCTRL, OMAP4430_CLKSEL_SOURCE_MASK, +		    func_dmic_abe_gfclk_parents, func_dmic_abe_gfclk_ops);  DEFINE_CLK_GATE(dss_sys_clk, "syc_clk_div_ck", &syc_clk_div_ck, 0x0,  		OMAP4430_CM_DSS_DSS_CLKCTRL, @@ -833,177 +835,57 @@ DEFINE_CLK_GATE(dss_fck, "l3_div_ck", &l3_div_ck, 0x0,  		OMAP4430_CM_DSS_DSS_CLKCTRL, OMAP4430_MODULEMODE_SWCTRL_SHIFT,  		0x0, NULL); -DEFINE_CLK_GATE(efuse_ctrl_cust_fck, "sys_clkin_ck", &sys_clkin_ck, 0x0, -		OMAP4430_CM_CEFUSE_CEFUSE_CLKCTRL, -		OMAP4430_MODULEMODE_SWCTRL_SHIFT, 0x0, NULL); - -DEFINE_CLK_GATE(emif1_fck, "ddrphy_ck", &ddrphy_ck, 0x0, -		OMAP4430_CM_MEMIF_EMIF_1_CLKCTRL, -		OMAP4430_MODULEMODE_HWCTRL_SHIFT, 0x0, NULL); - -DEFINE_CLK_GATE(emif2_fck, "ddrphy_ck", &ddrphy_ck, 0x0, -		OMAP4430_CM_MEMIF_EMIF_2_CLKCTRL, -		OMAP4430_MODULEMODE_HWCTRL_SHIFT, 0x0, NULL); -  DEFINE_CLK_DIVIDER(fdif_fck, "dpll_per_m4x2_ck", &dpll_per_m4x2_ck, 0x0,  		   OMAP4430_CM_CAM_FDIF_CLKCTRL, OMAP4430_CLKSEL_FCLK_SHIFT,  		   OMAP4430_CLKSEL_FCLK_WIDTH, CLK_DIVIDER_POWER_OF_TWO, NULL); -DEFINE_CLK_GATE(fpka_fck, "l4_div_ck", &l4_div_ck, 0x0, -		OMAP4430_CM_L4SEC_PKAEIP29_CLKCTRL, -		OMAP4430_MODULEMODE_SWCTRL_SHIFT, 0x0, NULL); -  DEFINE_CLK_GATE(gpio1_dbclk, "sys_32k_ck", &sys_32k_ck, 0x0,  		OMAP4430_CM_WKUP_GPIO1_CLKCTRL,  		OMAP4430_OPTFCLKEN_DBCLK_SHIFT,	0x0, NULL); -DEFINE_CLK_GATE(gpio1_ick, "l4_wkup_clk_mux_ck", &l4_wkup_clk_mux_ck, 0x0, -		OMAP4430_CM_WKUP_GPIO1_CLKCTRL, -		OMAP4430_MODULEMODE_HWCTRL_SHIFT, 0x0, NULL); -  DEFINE_CLK_GATE(gpio2_dbclk, "sys_32k_ck", &sys_32k_ck, 0x0,  		OMAP4430_CM_L4PER_GPIO2_CLKCTRL, OMAP4430_OPTFCLKEN_DBCLK_SHIFT,  		0x0, NULL); -DEFINE_CLK_GATE(gpio2_ick, "l4_div_ck", &l4_div_ck, 0x0, -		OMAP4430_CM_L4PER_GPIO2_CLKCTRL, -		OMAP4430_MODULEMODE_HWCTRL_SHIFT, 0x0, NULL); -  DEFINE_CLK_GATE(gpio3_dbclk, "sys_32k_ck", &sys_32k_ck, 0x0,  		OMAP4430_CM_L4PER_GPIO3_CLKCTRL,  		OMAP4430_OPTFCLKEN_DBCLK_SHIFT, 0x0, NULL); -DEFINE_CLK_GATE(gpio3_ick, "l4_div_ck", &l4_div_ck, 0x0, -		OMAP4430_CM_L4PER_GPIO3_CLKCTRL, -		OMAP4430_MODULEMODE_HWCTRL_SHIFT, 0x0, NULL); -  DEFINE_CLK_GATE(gpio4_dbclk, "sys_32k_ck", &sys_32k_ck, 0x0,  		OMAP4430_CM_L4PER_GPIO4_CLKCTRL, OMAP4430_OPTFCLKEN_DBCLK_SHIFT,  		0x0, NULL); -DEFINE_CLK_GATE(gpio4_ick, "l4_div_ck", &l4_div_ck, 0x0, -		OMAP4430_CM_L4PER_GPIO4_CLKCTRL, -		OMAP4430_MODULEMODE_HWCTRL_SHIFT, 0x0, NULL); -  DEFINE_CLK_GATE(gpio5_dbclk, "sys_32k_ck", &sys_32k_ck, 0x0,  		OMAP4430_CM_L4PER_GPIO5_CLKCTRL, OMAP4430_OPTFCLKEN_DBCLK_SHIFT,  		0x0, NULL); -DEFINE_CLK_GATE(gpio5_ick, "l4_div_ck", &l4_div_ck, 0x0, -		OMAP4430_CM_L4PER_GPIO5_CLKCTRL, -		OMAP4430_MODULEMODE_HWCTRL_SHIFT, 0x0, NULL); -  DEFINE_CLK_GATE(gpio6_dbclk, "sys_32k_ck", &sys_32k_ck, 0x0,  		OMAP4430_CM_L4PER_GPIO6_CLKCTRL, OMAP4430_OPTFCLKEN_DBCLK_SHIFT,  		0x0, NULL); -DEFINE_CLK_GATE(gpio6_ick, "l4_div_ck", &l4_div_ck, 0x0, -		OMAP4430_CM_L4PER_GPIO6_CLKCTRL, -		OMAP4430_MODULEMODE_HWCTRL_SHIFT, 0x0, NULL); - -DEFINE_CLK_GATE(gpmc_ick, "l3_div_ck", &l3_div_ck, 0x0, -		OMAP4430_CM_L3_2_GPMC_CLKCTRL, OMAP4430_MODULEMODE_HWCTRL_SHIFT, -		0x0, NULL); -  static const struct clksel sgx_clk_mux_sel[] = {  	{ .parent = &dpll_core_m7x2_ck, .rates = div_1_0_rates },  	{ .parent = &dpll_per_m7x2_ck, .rates = div_1_1_rates },  	{ .parent = NULL },  }; -static const char *gpu_fck_parents[] = { +static const char *sgx_clk_mux_parents[] = {  	"dpll_core_m7x2_ck", "dpll_per_m7x2_ck",  }; -/* Merged sgx_clk_mux into gpu */ -DEFINE_CLK_OMAP_MUX_GATE(gpu_fck, "l3_gfx_clkdm", sgx_clk_mux_sel, -			 OMAP4430_CM_GFX_GFX_CLKCTRL, -			 OMAP4430_CLKSEL_SGX_FCLK_MASK, -			 OMAP4430_CM_GFX_GFX_CLKCTRL, -			 OMAP4430_MODULEMODE_SWCTRL_SHIFT, NULL, -			 gpu_fck_parents, dmic_fck_ops); - -DEFINE_CLK_GATE(hdq1w_fck, "func_12m_fclk", &func_12m_fclk, 0x0, -		OMAP4430_CM_L4PER_HDQ1W_CLKCTRL, -		OMAP4430_MODULEMODE_SWCTRL_SHIFT, 0x0, NULL); +DEFINE_CLK_OMAP_MUX(sgx_clk_mux, "l3_gfx_clkdm", sgx_clk_mux_sel, +		    OMAP4430_CM_GFX_GFX_CLKCTRL, OMAP4430_CLKSEL_SGX_FCLK_MASK, +		    sgx_clk_mux_parents, func_dmic_abe_gfclk_ops);  DEFINE_CLK_DIVIDER(hsi_fck, "dpll_per_m2x2_ck", &dpll_per_m2x2_ck, 0x0,  		   OMAP4430_CM_L3INIT_HSI_CLKCTRL, OMAP4430_CLKSEL_24_25_SHIFT,  		   OMAP4430_CLKSEL_24_25_WIDTH, CLK_DIVIDER_POWER_OF_TWO,  		   NULL); -DEFINE_CLK_GATE(i2c1_fck, "func_96m_fclk", &func_96m_fclk, 0x0, -		OMAP4430_CM_L4PER_I2C1_CLKCTRL, -		OMAP4430_MODULEMODE_SWCTRL_SHIFT, 0x0, NULL); - -DEFINE_CLK_GATE(i2c2_fck, "func_96m_fclk", &func_96m_fclk, 0x0, -		OMAP4430_CM_L4PER_I2C2_CLKCTRL, -		OMAP4430_MODULEMODE_SWCTRL_SHIFT, 0x0, NULL); - -DEFINE_CLK_GATE(i2c3_fck, "func_96m_fclk", &func_96m_fclk, 0x0, -		OMAP4430_CM_L4PER_I2C3_CLKCTRL, -		OMAP4430_MODULEMODE_SWCTRL_SHIFT, 0x0, NULL); - -DEFINE_CLK_GATE(i2c4_fck, "func_96m_fclk", &func_96m_fclk, 0x0, -		OMAP4430_CM_L4PER_I2C4_CLKCTRL, -		OMAP4430_MODULEMODE_SWCTRL_SHIFT, 0x0, NULL); - -DEFINE_CLK_GATE(ipu_fck, "ducati_clk_mux_ck", &ducati_clk_mux_ck, 0x0, -		OMAP4430_CM_DUCATI_DUCATI_CLKCTRL, -		OMAP4430_MODULEMODE_HWCTRL_SHIFT, 0x0, NULL); -  DEFINE_CLK_GATE(iss_ctrlclk, "func_96m_fclk", &func_96m_fclk, 0x0,  		OMAP4430_CM_CAM_ISS_CLKCTRL, OMAP4430_OPTFCLKEN_CTRLCLK_SHIFT,  		0x0, NULL); -DEFINE_CLK_GATE(iss_fck, "ducati_clk_mux_ck", &ducati_clk_mux_ck, 0x0, -		OMAP4430_CM_CAM_ISS_CLKCTRL, OMAP4430_MODULEMODE_SWCTRL_SHIFT, -		0x0, NULL); - -DEFINE_CLK_GATE(iva_fck, "dpll_iva_m5x2_ck", &dpll_iva_m5x2_ck, 0x0, -		OMAP4430_CM_IVAHD_IVAHD_CLKCTRL, -		OMAP4430_MODULEMODE_HWCTRL_SHIFT, 0x0, NULL); - -DEFINE_CLK_GATE(kbd_fck, "sys_32k_ck", &sys_32k_ck, 0x0, -		OMAP4430_CM_WKUP_KEYBOARD_CLKCTRL, -		OMAP4430_MODULEMODE_SWCTRL_SHIFT, 0x0, NULL); - -static struct clk l3_instr_ick; - -static const char *l3_instr_ick_parent_names[] = { -	"l3_div_ck", -}; - -static const struct clk_ops l3_instr_ick_ops = { -	.enable		= &omap2_dflt_clk_enable, -	.disable	= &omap2_dflt_clk_disable, -	.is_enabled	= &omap2_dflt_clk_is_enabled, -	.init		= &omap2_init_clk_clkdm, -}; - -static struct clk_hw_omap l3_instr_ick_hw = { -	.hw = { -		.clk = &l3_instr_ick, -	}, -	.enable_reg	= OMAP4430_CM_L3INSTR_L3_INSTR_CLKCTRL, -	.enable_bit	= OMAP4430_MODULEMODE_HWCTRL_SHIFT, -	.clkdm_name	= "l3_instr_clkdm", -}; - -DEFINE_STRUCT_CLK(l3_instr_ick, l3_instr_ick_parent_names, l3_instr_ick_ops); - -static struct clk l3_main_3_ick; -static struct clk_hw_omap l3_main_3_ick_hw = { -	.hw = { -		.clk = &l3_main_3_ick, -	}, -	.enable_reg	= OMAP4430_CM_L3INSTR_L3_3_CLKCTRL, -	.enable_bit	= OMAP4430_MODULEMODE_HWCTRL_SHIFT, -	.clkdm_name	= "l3_instr_clkdm", -}; - -DEFINE_STRUCT_CLK(l3_main_3_ick, l3_instr_ick_parent_names, l3_instr_ick_ops); -  DEFINE_CLK_MUX(mcasp_sync_mux_ck, dmic_sync_mux_ck_parents, NULL, 0x0,  	       OMAP4430_CM1_ABE_MCASP_CLKCTRL,  	       OMAP4430_CLKSEL_INTERNAL_SOURCE_SHIFT, @@ -1016,17 +898,13 @@ static const struct clksel func_mcasp_abe_gfclk_sel[] = {  	{ .parent = NULL },  }; -static const char *mcasp_fck_parents[] = { +static const char *func_mcasp_abe_gfclk_parents[] = {  	"mcasp_sync_mux_ck", "pad_clks_ck", "slimbus_clk",  }; -/* Merged func_mcasp_abe_gfclk into mcasp */ -DEFINE_CLK_OMAP_MUX_GATE(mcasp_fck, "abe_clkdm", func_mcasp_abe_gfclk_sel, -			 OMAP4430_CM1_ABE_MCASP_CLKCTRL, -			 OMAP4430_CLKSEL_SOURCE_MASK, -			 OMAP4430_CM1_ABE_MCASP_CLKCTRL, -			 OMAP4430_MODULEMODE_SWCTRL_SHIFT, NULL, -			 mcasp_fck_parents, dmic_fck_ops); +DEFINE_CLK_OMAP_MUX(func_mcasp_abe_gfclk, "abe_clkdm", func_mcasp_abe_gfclk_sel, +		    OMAP4430_CM1_ABE_MCASP_CLKCTRL, OMAP4430_CLKSEL_SOURCE_MASK, +		    func_mcasp_abe_gfclk_parents, func_dmic_abe_gfclk_ops);  DEFINE_CLK_MUX(mcbsp1_sync_mux_ck, dmic_sync_mux_ck_parents, NULL, 0x0,  	       OMAP4430_CM1_ABE_MCBSP1_CLKCTRL, @@ -1040,17 +918,14 @@ static const struct clksel func_mcbsp1_gfclk_sel[] = {  	{ .parent = NULL },  }; -static const char *mcbsp1_fck_parents[] = { +static const char *func_mcbsp1_gfclk_parents[] = {  	"mcbsp1_sync_mux_ck", "pad_clks_ck", "slimbus_clk",  }; -/* Merged func_mcbsp1_gfclk into mcbsp1 */ -DEFINE_CLK_OMAP_MUX_GATE(mcbsp1_fck, "abe_clkdm", func_mcbsp1_gfclk_sel, -			 OMAP4430_CM1_ABE_MCBSP1_CLKCTRL, -			 OMAP4430_CLKSEL_SOURCE_MASK, -			 OMAP4430_CM1_ABE_MCBSP1_CLKCTRL, -			 OMAP4430_MODULEMODE_SWCTRL_SHIFT, NULL, -			 mcbsp1_fck_parents, dmic_fck_ops); +DEFINE_CLK_OMAP_MUX(func_mcbsp1_gfclk, "abe_clkdm", func_mcbsp1_gfclk_sel, +		    OMAP4430_CM1_ABE_MCBSP1_CLKCTRL, +		    OMAP4430_CLKSEL_SOURCE_MASK, func_mcbsp1_gfclk_parents, +		    func_dmic_abe_gfclk_ops);  DEFINE_CLK_MUX(mcbsp2_sync_mux_ck, dmic_sync_mux_ck_parents, NULL, 0x0,  	       OMAP4430_CM1_ABE_MCBSP2_CLKCTRL, @@ -1064,17 +939,14 @@ static const struct clksel func_mcbsp2_gfclk_sel[] = {  	{ .parent = NULL },  }; -static const char *mcbsp2_fck_parents[] = { +static const char *func_mcbsp2_gfclk_parents[] = {  	"mcbsp2_sync_mux_ck", "pad_clks_ck", "slimbus_clk",  }; -/* Merged func_mcbsp2_gfclk into mcbsp2 */ -DEFINE_CLK_OMAP_MUX_GATE(mcbsp2_fck, "abe_clkdm", func_mcbsp2_gfclk_sel, -			 OMAP4430_CM1_ABE_MCBSP2_CLKCTRL, -			 OMAP4430_CLKSEL_SOURCE_MASK, -			 OMAP4430_CM1_ABE_MCBSP2_CLKCTRL, -			 OMAP4430_MODULEMODE_SWCTRL_SHIFT, NULL, -			 mcbsp2_fck_parents, dmic_fck_ops); +DEFINE_CLK_OMAP_MUX(func_mcbsp2_gfclk, "abe_clkdm", func_mcbsp2_gfclk_sel, +		    OMAP4430_CM1_ABE_MCBSP2_CLKCTRL, +		    OMAP4430_CLKSEL_SOURCE_MASK, func_mcbsp2_gfclk_parents, +		    func_dmic_abe_gfclk_ops);  DEFINE_CLK_MUX(mcbsp3_sync_mux_ck, dmic_sync_mux_ck_parents, NULL, 0x0,  	       OMAP4430_CM1_ABE_MCBSP3_CLKCTRL, @@ -1088,17 +960,14 @@ static const struct clksel func_mcbsp3_gfclk_sel[] = {  	{ .parent = NULL },  }; -static const char *mcbsp3_fck_parents[] = { +static const char *func_mcbsp3_gfclk_parents[] = {  	"mcbsp3_sync_mux_ck", "pad_clks_ck", "slimbus_clk",  }; -/* Merged func_mcbsp3_gfclk into mcbsp3 */ -DEFINE_CLK_OMAP_MUX_GATE(mcbsp3_fck, "abe_clkdm", func_mcbsp3_gfclk_sel, -			 OMAP4430_CM1_ABE_MCBSP3_CLKCTRL, -			 OMAP4430_CLKSEL_SOURCE_MASK, -			 OMAP4430_CM1_ABE_MCBSP3_CLKCTRL, -			 OMAP4430_MODULEMODE_SWCTRL_SHIFT, NULL, -			 mcbsp3_fck_parents, dmic_fck_ops); +DEFINE_CLK_OMAP_MUX(func_mcbsp3_gfclk, "abe_clkdm", func_mcbsp3_gfclk_sel, +		    OMAP4430_CM1_ABE_MCBSP3_CLKCTRL, +		    OMAP4430_CLKSEL_SOURCE_MASK, func_mcbsp3_gfclk_parents, +		    func_dmic_abe_gfclk_ops);  static const char *mcbsp4_sync_mux_ck_parents[] = {  	"func_96m_fclk", "per_abe_nc_fclk", @@ -1115,37 +984,14 @@ static const struct clksel per_mcbsp4_gfclk_sel[] = {  	{ .parent = NULL },  }; -static const char *mcbsp4_fck_parents[] = { +static const char *per_mcbsp4_gfclk_parents[] = {  	"mcbsp4_sync_mux_ck", "pad_clks_ck",  }; -/* Merged per_mcbsp4_gfclk into mcbsp4 */ -DEFINE_CLK_OMAP_MUX_GATE(mcbsp4_fck, "l4_per_clkdm", per_mcbsp4_gfclk_sel, -			 OMAP4430_CM_L4PER_MCBSP4_CLKCTRL, -			 OMAP4430_CLKSEL_SOURCE_24_24_MASK, -			 OMAP4430_CM_L4PER_MCBSP4_CLKCTRL, -			 OMAP4430_MODULEMODE_SWCTRL_SHIFT, NULL, -			 mcbsp4_fck_parents, dmic_fck_ops); - -DEFINE_CLK_GATE(mcpdm_fck, "pad_clks_ck", &pad_clks_ck, 0x0, -		OMAP4430_CM1_ABE_PDM_CLKCTRL, OMAP4430_MODULEMODE_SWCTRL_SHIFT, -		0x0, NULL); - -DEFINE_CLK_GATE(mcspi1_fck, "func_48m_fclk", &func_48m_fclk, 0x0, -		OMAP4430_CM_L4PER_MCSPI1_CLKCTRL, -		OMAP4430_MODULEMODE_SWCTRL_SHIFT, 0x0, NULL); - -DEFINE_CLK_GATE(mcspi2_fck, "func_48m_fclk", &func_48m_fclk, 0x0, -		OMAP4430_CM_L4PER_MCSPI2_CLKCTRL, -		OMAP4430_MODULEMODE_SWCTRL_SHIFT, 0x0, NULL); - -DEFINE_CLK_GATE(mcspi3_fck, "func_48m_fclk", &func_48m_fclk, 0x0, -		OMAP4430_CM_L4PER_MCSPI3_CLKCTRL, -		OMAP4430_MODULEMODE_SWCTRL_SHIFT, 0x0, NULL); - -DEFINE_CLK_GATE(mcspi4_fck, "func_48m_fclk", &func_48m_fclk, 0x0, -		OMAP4430_CM_L4PER_MCSPI4_CLKCTRL, -		OMAP4430_MODULEMODE_SWCTRL_SHIFT, 0x0, NULL); +DEFINE_CLK_OMAP_MUX(per_mcbsp4_gfclk, "l4_per_clkdm", per_mcbsp4_gfclk_sel, +		    OMAP4430_CM_L4PER_MCBSP4_CLKCTRL, +		    OMAP4430_CLKSEL_SOURCE_24_24_MASK, per_mcbsp4_gfclk_parents, +		    func_dmic_abe_gfclk_ops);  static const struct clksel hsmmc1_fclk_sel[] = {  	{ .parent = &func_64m_fclk, .rates = div_1_0_rates }, @@ -1153,69 +999,22 @@ static const struct clksel hsmmc1_fclk_sel[] = {  	{ .parent = NULL },  }; -static const char *mmc1_fck_parents[] = { +static const char *hsmmc1_fclk_parents[] = {  	"func_64m_fclk", "func_96m_fclk",  }; -/* Merged hsmmc1_fclk into mmc1 */ -DEFINE_CLK_OMAP_MUX_GATE(mmc1_fck, "l3_init_clkdm", hsmmc1_fclk_sel, -			 OMAP4430_CM_L3INIT_MMC1_CLKCTRL, OMAP4430_CLKSEL_MASK, -			 OMAP4430_CM_L3INIT_MMC1_CLKCTRL, -			 OMAP4430_MODULEMODE_SWCTRL_SHIFT, NULL, -			 mmc1_fck_parents, dmic_fck_ops); - -/* Merged hsmmc2_fclk into mmc2 */ -DEFINE_CLK_OMAP_MUX_GATE(mmc2_fck, "l3_init_clkdm", hsmmc1_fclk_sel, -			 OMAP4430_CM_L3INIT_MMC2_CLKCTRL, OMAP4430_CLKSEL_MASK, -			 OMAP4430_CM_L3INIT_MMC2_CLKCTRL, -			 OMAP4430_MODULEMODE_SWCTRL_SHIFT, NULL, -			 mmc1_fck_parents, dmic_fck_ops); - -DEFINE_CLK_GATE(mmc3_fck, "func_48m_fclk", &func_48m_fclk, 0x0, -		OMAP4430_CM_L4PER_MMCSD3_CLKCTRL, -		OMAP4430_MODULEMODE_SWCTRL_SHIFT, 0x0, NULL); - -DEFINE_CLK_GATE(mmc4_fck, "func_48m_fclk", &func_48m_fclk, 0x0, -		OMAP4430_CM_L4PER_MMCSD4_CLKCTRL, -		OMAP4430_MODULEMODE_SWCTRL_SHIFT, 0x0, NULL); - -DEFINE_CLK_GATE(mmc5_fck, "func_48m_fclk", &func_48m_fclk, 0x0, -		OMAP4430_CM_L4PER_MMCSD5_CLKCTRL, -		OMAP4430_MODULEMODE_SWCTRL_SHIFT, 0x0, NULL); - -DEFINE_CLK_GATE(ocp2scp_usb_phy_phy_48m, "func_48m_fclk", &func_48m_fclk, 0x0, -		OMAP4430_CM_L3INIT_USBPHYOCP2SCP_CLKCTRL, -		OMAP4430_OPTFCLKEN_PHY_48M_SHIFT, 0x0, NULL); - -DEFINE_CLK_GATE(ocp2scp_usb_phy_ick, "l4_div_ck", &l4_div_ck, 0x0, -		OMAP4430_CM_L3INIT_USBPHYOCP2SCP_CLKCTRL, -		OMAP4430_MODULEMODE_HWCTRL_SHIFT, 0x0, NULL); - -static struct clk ocp_wp_noc_ick; - -static struct clk_hw_omap ocp_wp_noc_ick_hw = { -	.hw = { -		.clk = &ocp_wp_noc_ick, -	}, -	.enable_reg	= OMAP4430_CM_L3INSTR_OCP_WP1_CLKCTRL, -	.enable_bit	= OMAP4430_MODULEMODE_HWCTRL_SHIFT, -	.clkdm_name	= "l3_instr_clkdm", -}; - -DEFINE_STRUCT_CLK(ocp_wp_noc_ick, l3_instr_ick_parent_names, l3_instr_ick_ops); +DEFINE_CLK_OMAP_MUX(hsmmc1_fclk, "l3_init_clkdm", hsmmc1_fclk_sel, +		    OMAP4430_CM_L3INIT_MMC1_CLKCTRL, OMAP4430_CLKSEL_MASK, +		    hsmmc1_fclk_parents, func_dmic_abe_gfclk_ops); -DEFINE_CLK_GATE(rng_ick, "l4_div_ck", &l4_div_ck, 0x0, -		OMAP4430_CM_L4SEC_RNG_CLKCTRL, OMAP4430_MODULEMODE_HWCTRL_SHIFT, -		0x0, NULL); +DEFINE_CLK_OMAP_MUX(hsmmc2_fclk, "l3_init_clkdm", hsmmc1_fclk_sel, +		    OMAP4430_CM_L3INIT_MMC2_CLKCTRL, OMAP4430_CLKSEL_MASK, +		    hsmmc1_fclk_parents, func_dmic_abe_gfclk_ops);  DEFINE_CLK_GATE(sha2md5_fck, "l3_div_ck", &l3_div_ck, 0x0,  		OMAP4430_CM_L4SEC_SHA2MD51_CLKCTRL,  		OMAP4430_MODULEMODE_SWCTRL_SHIFT, 0x0, NULL); -DEFINE_CLK_GATE(sl2if_ick, "dpll_iva_m5x2_ck", &dpll_iva_m5x2_ck, 0x0, -		OMAP4430_CM_IVAHD_SL2_CLKCTRL, OMAP4430_MODULEMODE_HWCTRL_SHIFT, -		0x0, NULL); -  DEFINE_CLK_GATE(slimbus1_fclk_1, "func_24m_clk", &func_24m_clk, 0x0,  		OMAP4430_CM1_ABE_SLIMBUS_CLKCTRL,  		OMAP4430_OPTFCLKEN_FCLK1_SHIFT, 0x0, NULL); @@ -1232,10 +1031,6 @@ DEFINE_CLK_GATE(slimbus1_slimbus_clk, "slimbus_clk", &slimbus_clk, 0x0,  		OMAP4430_CM1_ABE_SLIMBUS_CLKCTRL,  		OMAP4430_OPTFCLKEN_SLIMBUS_CLK_11_11_SHIFT, 0x0, NULL); -DEFINE_CLK_GATE(slimbus1_fck, "ocp_abe_iclk", &ocp_abe_iclk, 0x0, -		OMAP4430_CM1_ABE_SLIMBUS_CLKCTRL, -		OMAP4430_MODULEMODE_SWCTRL_SHIFT, 0x0, NULL); -  DEFINE_CLK_GATE(slimbus2_fclk_1, "per_abe_24m_fclk", &per_abe_24m_fclk, 0x0,  		OMAP4430_CM_L4PER_SLIMBUS2_CLKCTRL,  		OMAP4430_OPTFCLKEN_PERABE24M_GFCLK_SHIFT, 0x0, NULL); @@ -1249,10 +1044,6 @@ DEFINE_CLK_GATE(slimbus2_slimbus_clk, "pad_slimbus_core_clks_ck",  		OMAP4430_CM_L4PER_SLIMBUS2_CLKCTRL,  		OMAP4430_OPTFCLKEN_SLIMBUS_CLK_SHIFT, 0x0, NULL); -DEFINE_CLK_GATE(slimbus2_fck, "l4_div_ck", &l4_div_ck, 0x0, -		OMAP4430_CM_L4PER_SLIMBUS2_CLKCTRL, -		OMAP4430_MODULEMODE_SWCTRL_SHIFT, 0x0, NULL); -  DEFINE_CLK_GATE(smartreflex_core_fck, "l4_wkup_clk_mux_ck", &l4_wkup_clk_mux_ck,  		0x0, OMAP4430_CM_ALWON_SR_CORE_CLKCTRL,  		OMAP4430_MODULEMODE_SWCTRL_SHIFT, 0x0, NULL); @@ -1271,52 +1062,35 @@ static const struct clksel dmt1_clk_mux_sel[] = {  	{ .parent = NULL },  }; -/* Merged dmt1_clk_mux into timer1 */ -DEFINE_CLK_OMAP_MUX_GATE(timer1_fck, "l4_wkup_clkdm", dmt1_clk_mux_sel, -			 OMAP4430_CM_WKUP_TIMER1_CLKCTRL, OMAP4430_CLKSEL_MASK, -			 OMAP4430_CM_WKUP_TIMER1_CLKCTRL, -			 OMAP4430_MODULEMODE_SWCTRL_SHIFT, NULL, -			 abe_dpll_bypass_clk_mux_ck_parents, dmic_fck_ops); +DEFINE_CLK_OMAP_MUX(dmt1_clk_mux, "l4_wkup_clkdm", dmt1_clk_mux_sel, +		    OMAP4430_CM_WKUP_TIMER1_CLKCTRL, OMAP4430_CLKSEL_MASK, +		    abe_dpll_bypass_clk_mux_ck_parents, +		    func_dmic_abe_gfclk_ops); -/* Merged cm2_dm10_mux into timer10 */ -DEFINE_CLK_OMAP_MUX_GATE(timer10_fck, "l4_per_clkdm", dmt1_clk_mux_sel, -			 OMAP4430_CM_L4PER_DMTIMER10_CLKCTRL, -			 OMAP4430_CLKSEL_MASK, -			 OMAP4430_CM_L4PER_DMTIMER10_CLKCTRL, -			 OMAP4430_MODULEMODE_SWCTRL_SHIFT, NULL, -			 abe_dpll_bypass_clk_mux_ck_parents, dmic_fck_ops); +DEFINE_CLK_OMAP_MUX(cm2_dm10_mux, "l4_per_clkdm", dmt1_clk_mux_sel, +		    OMAP4430_CM_L4PER_DMTIMER10_CLKCTRL, OMAP4430_CLKSEL_MASK, +		    abe_dpll_bypass_clk_mux_ck_parents, +		    func_dmic_abe_gfclk_ops); -/* Merged cm2_dm11_mux into timer11 */ -DEFINE_CLK_OMAP_MUX_GATE(timer11_fck, "l4_per_clkdm", dmt1_clk_mux_sel, -			 OMAP4430_CM_L4PER_DMTIMER11_CLKCTRL, -			 OMAP4430_CLKSEL_MASK, -			 OMAP4430_CM_L4PER_DMTIMER11_CLKCTRL, -			 OMAP4430_MODULEMODE_SWCTRL_SHIFT, NULL, -			 abe_dpll_bypass_clk_mux_ck_parents, dmic_fck_ops); +DEFINE_CLK_OMAP_MUX(cm2_dm11_mux, "l4_per_clkdm", dmt1_clk_mux_sel, +		    OMAP4430_CM_L4PER_DMTIMER11_CLKCTRL, OMAP4430_CLKSEL_MASK, +		    abe_dpll_bypass_clk_mux_ck_parents, +		    func_dmic_abe_gfclk_ops); -/* Merged cm2_dm2_mux into timer2 */ -DEFINE_CLK_OMAP_MUX_GATE(timer2_fck, "l4_per_clkdm", dmt1_clk_mux_sel, -			 OMAP4430_CM_L4PER_DMTIMER2_CLKCTRL, -			 OMAP4430_CLKSEL_MASK, -			 OMAP4430_CM_L4PER_DMTIMER2_CLKCTRL, -			 OMAP4430_MODULEMODE_SWCTRL_SHIFT, NULL, -			 abe_dpll_bypass_clk_mux_ck_parents, dmic_fck_ops); +DEFINE_CLK_OMAP_MUX(cm2_dm2_mux, "l4_per_clkdm", dmt1_clk_mux_sel, +		    OMAP4430_CM_L4PER_DMTIMER2_CLKCTRL, OMAP4430_CLKSEL_MASK, +		    abe_dpll_bypass_clk_mux_ck_parents, +		    func_dmic_abe_gfclk_ops); -/* Merged cm2_dm3_mux into timer3 */ -DEFINE_CLK_OMAP_MUX_GATE(timer3_fck, "l4_per_clkdm", dmt1_clk_mux_sel, -			 OMAP4430_CM_L4PER_DMTIMER3_CLKCTRL, -			 OMAP4430_CLKSEL_MASK, -			 OMAP4430_CM_L4PER_DMTIMER3_CLKCTRL, -			 OMAP4430_MODULEMODE_SWCTRL_SHIFT, NULL, -			 abe_dpll_bypass_clk_mux_ck_parents, dmic_fck_ops); +DEFINE_CLK_OMAP_MUX(cm2_dm3_mux, "l4_per_clkdm", dmt1_clk_mux_sel, +		    OMAP4430_CM_L4PER_DMTIMER3_CLKCTRL, OMAP4430_CLKSEL_MASK, +		    abe_dpll_bypass_clk_mux_ck_parents, +		    func_dmic_abe_gfclk_ops); -/* Merged cm2_dm4_mux into timer4 */ -DEFINE_CLK_OMAP_MUX_GATE(timer4_fck, "l4_per_clkdm", dmt1_clk_mux_sel, -			 OMAP4430_CM_L4PER_DMTIMER4_CLKCTRL, -			 OMAP4430_CLKSEL_MASK, -			 OMAP4430_CM_L4PER_DMTIMER4_CLKCTRL, -			 OMAP4430_MODULEMODE_SWCTRL_SHIFT, NULL, -			 abe_dpll_bypass_clk_mux_ck_parents, dmic_fck_ops); +DEFINE_CLK_OMAP_MUX(cm2_dm4_mux, "l4_per_clkdm", dmt1_clk_mux_sel, +		    OMAP4430_CM_L4PER_DMTIMER4_CLKCTRL, OMAP4430_CLKSEL_MASK, +		    abe_dpll_bypass_clk_mux_ck_parents, +		    func_dmic_abe_gfclk_ops);  static const struct clksel timer5_sync_mux_sel[] = {  	{ .parent = &syc_clk_div_ck, .rates = div_1_0_rates }, @@ -1324,61 +1098,30 @@ static const struct clksel timer5_sync_mux_sel[] = {  	{ .parent = NULL },  }; -static const char *timer5_fck_parents[] = { +static const char *timer5_sync_mux_parents[] = {  	"syc_clk_div_ck", "sys_32k_ck",  }; -/* Merged timer5_sync_mux into timer5 */ -DEFINE_CLK_OMAP_MUX_GATE(timer5_fck, "abe_clkdm", timer5_sync_mux_sel, -			 OMAP4430_CM1_ABE_TIMER5_CLKCTRL, OMAP4430_CLKSEL_MASK, -			 OMAP4430_CM1_ABE_TIMER5_CLKCTRL, -			 OMAP4430_MODULEMODE_SWCTRL_SHIFT, NULL, -			 timer5_fck_parents, dmic_fck_ops); +DEFINE_CLK_OMAP_MUX(timer5_sync_mux, "abe_clkdm", timer5_sync_mux_sel, +		    OMAP4430_CM1_ABE_TIMER5_CLKCTRL, OMAP4430_CLKSEL_MASK, +		    timer5_sync_mux_parents, func_dmic_abe_gfclk_ops); -/* Merged timer6_sync_mux into timer6 */ -DEFINE_CLK_OMAP_MUX_GATE(timer6_fck, "abe_clkdm", timer5_sync_mux_sel, -			 OMAP4430_CM1_ABE_TIMER6_CLKCTRL, OMAP4430_CLKSEL_MASK, -			 OMAP4430_CM1_ABE_TIMER6_CLKCTRL, -			 OMAP4430_MODULEMODE_SWCTRL_SHIFT, NULL, -			 timer5_fck_parents, dmic_fck_ops); +DEFINE_CLK_OMAP_MUX(timer6_sync_mux, "abe_clkdm", timer5_sync_mux_sel, +		    OMAP4430_CM1_ABE_TIMER6_CLKCTRL, OMAP4430_CLKSEL_MASK, +		    timer5_sync_mux_parents, func_dmic_abe_gfclk_ops); -/* Merged timer7_sync_mux into timer7 */ -DEFINE_CLK_OMAP_MUX_GATE(timer7_fck, "abe_clkdm", timer5_sync_mux_sel, -			 OMAP4430_CM1_ABE_TIMER7_CLKCTRL, OMAP4430_CLKSEL_MASK, -			 OMAP4430_CM1_ABE_TIMER7_CLKCTRL, -			 OMAP4430_MODULEMODE_SWCTRL_SHIFT, NULL, -			 timer5_fck_parents, dmic_fck_ops); +DEFINE_CLK_OMAP_MUX(timer7_sync_mux, "abe_clkdm", timer5_sync_mux_sel, +		    OMAP4430_CM1_ABE_TIMER7_CLKCTRL, OMAP4430_CLKSEL_MASK, +		    timer5_sync_mux_parents, func_dmic_abe_gfclk_ops); -/* Merged timer8_sync_mux into timer8 */ -DEFINE_CLK_OMAP_MUX_GATE(timer8_fck, "abe_clkdm", timer5_sync_mux_sel, -			 OMAP4430_CM1_ABE_TIMER8_CLKCTRL, OMAP4430_CLKSEL_MASK, -			 OMAP4430_CM1_ABE_TIMER8_CLKCTRL, -			 OMAP4430_MODULEMODE_SWCTRL_SHIFT, NULL, -			 timer5_fck_parents, dmic_fck_ops); +DEFINE_CLK_OMAP_MUX(timer8_sync_mux, "abe_clkdm", timer5_sync_mux_sel, +		    OMAP4430_CM1_ABE_TIMER8_CLKCTRL, OMAP4430_CLKSEL_MASK, +		    timer5_sync_mux_parents, func_dmic_abe_gfclk_ops); -/* Merged cm2_dm9_mux into timer9 */ -DEFINE_CLK_OMAP_MUX_GATE(timer9_fck, "l4_per_clkdm", dmt1_clk_mux_sel, -			 OMAP4430_CM_L4PER_DMTIMER9_CLKCTRL, -			 OMAP4430_CLKSEL_MASK, -			 OMAP4430_CM_L4PER_DMTIMER9_CLKCTRL, -			 OMAP4430_MODULEMODE_SWCTRL_SHIFT, NULL, -			 abe_dpll_bypass_clk_mux_ck_parents, dmic_fck_ops); - -DEFINE_CLK_GATE(uart1_fck, "func_48m_fclk", &func_48m_fclk, 0x0, -		OMAP4430_CM_L4PER_UART1_CLKCTRL, -		OMAP4430_MODULEMODE_SWCTRL_SHIFT, 0x0, NULL); - -DEFINE_CLK_GATE(uart2_fck, "func_48m_fclk", &func_48m_fclk, 0x0, -		OMAP4430_CM_L4PER_UART2_CLKCTRL, -		OMAP4430_MODULEMODE_SWCTRL_SHIFT, 0x0, NULL); - -DEFINE_CLK_GATE(uart3_fck, "func_48m_fclk", &func_48m_fclk, 0x0, -		OMAP4430_CM_L4PER_UART3_CLKCTRL, -		OMAP4430_MODULEMODE_SWCTRL_SHIFT, 0x0, NULL); - -DEFINE_CLK_GATE(uart4_fck, "func_48m_fclk", &func_48m_fclk, 0x0, -		OMAP4430_CM_L4PER_UART4_CLKCTRL, -		OMAP4430_MODULEMODE_SWCTRL_SHIFT, 0x0, NULL); +DEFINE_CLK_OMAP_MUX(cm2_dm9_mux, "l4_per_clkdm", dmt1_clk_mux_sel, +		    OMAP4430_CM_L4PER_DMTIMER9_CLKCTRL, OMAP4430_CLKSEL_MASK, +		    abe_dpll_bypass_clk_mux_ck_parents, +		    func_dmic_abe_gfclk_ops);  static struct clk usb_host_fs_fck; @@ -1512,18 +1255,6 @@ DEFINE_CLK_GATE(usim_fclk, "usim_ck", &usim_ck, 0x0,  		OMAP4430_CM_WKUP_USIM_CLKCTRL, OMAP4430_OPTFCLKEN_FCLK_SHIFT,  		0x0, NULL); -DEFINE_CLK_GATE(usim_fck, "sys_32k_ck", &sys_32k_ck, 0x0, -		OMAP4430_CM_WKUP_USIM_CLKCTRL, OMAP4430_MODULEMODE_HWCTRL_SHIFT, -		0x0, NULL); - -DEFINE_CLK_GATE(wd_timer2_fck, "sys_32k_ck", &sys_32k_ck, 0x0, -		OMAP4430_CM_WKUP_WDT2_CLKCTRL, OMAP4430_MODULEMODE_SWCTRL_SHIFT, -		0x0, NULL); - -DEFINE_CLK_GATE(wd_timer3_fck, "sys_32k_ck", &sys_32k_ck, 0x0, -		OMAP4430_CM1_ABE_WDT3_CLKCTRL, OMAP4430_MODULEMODE_SWCTRL_SHIFT, -		0x0, NULL); -  /* Remaining optional clocks */  static const char *pmd_stm_clock_mux_ck_parents[] = {  	"sys_clkin_ck", "dpll_core_m6x2_ck", "tie_low_clock_ck", @@ -1774,106 +1505,61 @@ static struct omap_clk omap44xx_clks[] = {  	CLK(NULL,	"syc_clk_div_ck",		&syc_clk_div_ck,	CK_443X),  	CLK(NULL,	"aes1_fck",			&aes1_fck,	CK_443X),  	CLK(NULL,	"aes2_fck",			&aes2_fck,	CK_443X), -	CLK(NULL,	"aess_fck",			&aess_fck,	CK_443X),  	CLK(NULL,	"bandgap_fclk",			&bandgap_fclk,	CK_443X),  	CLK(NULL,	"div_ts_ck",			&div_ts_ck,	CK_446X),  	CLK(NULL,	"bandgap_ts_fclk",		&bandgap_ts_fclk,	CK_446X), -	CLK(NULL,	"des3des_fck",			&des3des_fck,	CK_443X),  	CLK(NULL,	"dmic_sync_mux_ck",		&dmic_sync_mux_ck,	CK_443X), -	CLK(NULL,	"dmic_fck",			&dmic_fck,	CK_443X), -	CLK(NULL,	"dsp_fck",			&dsp_fck,	CK_443X), +	CLK(NULL,	"func_dmic_abe_gfclk",			&func_dmic_abe_gfclk,	CK_443X),  	CLK(NULL,	"dss_sys_clk",			&dss_sys_clk,	CK_443X),  	CLK(NULL,	"dss_tv_clk",			&dss_tv_clk,	CK_443X),  	CLK(NULL,	"dss_dss_clk",			&dss_dss_clk,	CK_443X),  	CLK(NULL,	"dss_48mhz_clk",		&dss_48mhz_clk,	CK_443X),  	CLK(NULL,	"dss_fck",			&dss_fck,	CK_443X),  	CLK("omapdss_dss",	"ick",			&dss_fck,	CK_443X), -	CLK(NULL,	"efuse_ctrl_cust_fck",		&efuse_ctrl_cust_fck,	CK_443X), -	CLK(NULL,	"emif1_fck",			&emif1_fck,	CK_443X), -	CLK(NULL,	"emif2_fck",			&emif2_fck,	CK_443X),  	CLK(NULL,	"fdif_fck",			&fdif_fck,	CK_443X), -	CLK(NULL,	"fpka_fck",			&fpka_fck,	CK_443X),  	CLK(NULL,	"gpio1_dbclk",			&gpio1_dbclk,	CK_443X), -	CLK(NULL,	"gpio1_ick",			&gpio1_ick,	CK_443X),  	CLK(NULL,	"gpio2_dbclk",			&gpio2_dbclk,	CK_443X), -	CLK(NULL,	"gpio2_ick",			&gpio2_ick,	CK_443X),  	CLK(NULL,	"gpio3_dbclk",			&gpio3_dbclk,	CK_443X), -	CLK(NULL,	"gpio3_ick",			&gpio3_ick,	CK_443X),  	CLK(NULL,	"gpio4_dbclk",			&gpio4_dbclk,	CK_443X), -	CLK(NULL,	"gpio4_ick",			&gpio4_ick,	CK_443X),  	CLK(NULL,	"gpio5_dbclk",			&gpio5_dbclk,	CK_443X), -	CLK(NULL,	"gpio5_ick",			&gpio5_ick,	CK_443X),  	CLK(NULL,	"gpio6_dbclk",			&gpio6_dbclk,	CK_443X), -	CLK(NULL,	"gpio6_ick",			&gpio6_ick,	CK_443X), -	CLK(NULL,	"gpmc_ick",			&gpmc_ick,	CK_443X), -	CLK(NULL,	"gpu_fck",			&gpu_fck,	CK_443X), -	CLK(NULL,	"hdq1w_fck",			&hdq1w_fck,	CK_443X), +	CLK(NULL,	"sgx_clk_mux",			&sgx_clk_mux,	CK_443X),  	CLK(NULL,	"hsi_fck",			&hsi_fck,	CK_443X), -	CLK(NULL,	"i2c1_fck",			&i2c1_fck,	CK_443X), -	CLK(NULL,	"i2c2_fck",			&i2c2_fck,	CK_443X), -	CLK(NULL,	"i2c3_fck",			&i2c3_fck,	CK_443X), -	CLK(NULL,	"i2c4_fck",			&i2c4_fck,	CK_443X), -	CLK(NULL,	"ipu_fck",			&ipu_fck,	CK_443X),  	CLK(NULL,	"iss_ctrlclk",			&iss_ctrlclk,	CK_443X), -	CLK(NULL,	"iss_fck",			&iss_fck,	CK_443X), -	CLK(NULL,	"iva_fck",			&iva_fck,	CK_443X), -	CLK(NULL,	"kbd_fck",			&kbd_fck,	CK_443X), -	CLK(NULL,	"l3_instr_ick",			&l3_instr_ick,	CK_443X), -	CLK(NULL,	"l3_main_3_ick",		&l3_main_3_ick,	CK_443X),  	CLK(NULL,	"mcasp_sync_mux_ck",		&mcasp_sync_mux_ck,	CK_443X), -	CLK(NULL,	"mcasp_fck",			&mcasp_fck,	CK_443X), +	CLK(NULL,	"func_mcasp_abe_gfclk",			&func_mcasp_abe_gfclk,	CK_443X),  	CLK(NULL,	"mcbsp1_sync_mux_ck",		&mcbsp1_sync_mux_ck,	CK_443X), -	CLK(NULL,	"mcbsp1_fck",			&mcbsp1_fck,	CK_443X), +	CLK(NULL,	"func_mcbsp1_gfclk",			&func_mcbsp1_gfclk,	CK_443X),  	CLK(NULL,	"mcbsp2_sync_mux_ck",		&mcbsp2_sync_mux_ck,	CK_443X), -	CLK(NULL,	"mcbsp2_fck",			&mcbsp2_fck,	CK_443X), +	CLK(NULL,	"func_mcbsp2_gfclk",			&func_mcbsp2_gfclk,	CK_443X),  	CLK(NULL,	"mcbsp3_sync_mux_ck",		&mcbsp3_sync_mux_ck,	CK_443X), -	CLK(NULL,	"mcbsp3_fck",			&mcbsp3_fck,	CK_443X), +	CLK(NULL,	"func_mcbsp3_gfclk",			&func_mcbsp3_gfclk,	CK_443X),  	CLK(NULL,	"mcbsp4_sync_mux_ck",		&mcbsp4_sync_mux_ck,	CK_443X), -	CLK(NULL,	"mcbsp4_fck",			&mcbsp4_fck,	CK_443X), -	CLK(NULL,	"mcpdm_fck",			&mcpdm_fck,	CK_443X), -	CLK(NULL,	"mcspi1_fck",			&mcspi1_fck,	CK_443X), -	CLK(NULL,	"mcspi2_fck",			&mcspi2_fck,	CK_443X), -	CLK(NULL,	"mcspi3_fck",			&mcspi3_fck,	CK_443X), -	CLK(NULL,	"mcspi4_fck",			&mcspi4_fck,	CK_443X), -	CLK(NULL,	"mmc1_fck",			&mmc1_fck,	CK_443X), -	CLK(NULL,	"mmc2_fck",			&mmc2_fck,	CK_443X), -	CLK(NULL,	"mmc3_fck",			&mmc3_fck,	CK_443X), -	CLK(NULL,	"mmc4_fck",			&mmc4_fck,	CK_443X), -	CLK(NULL,	"mmc5_fck",			&mmc5_fck,	CK_443X), -	CLK(NULL,	"ocp2scp_usb_phy_phy_48m",	&ocp2scp_usb_phy_phy_48m,	CK_443X), -	CLK(NULL,	"ocp2scp_usb_phy_ick",		&ocp2scp_usb_phy_ick,	CK_443X), -	CLK(NULL,	"ocp_wp_noc_ick",		&ocp_wp_noc_ick,	CK_443X), -	CLK(NULL,	"rng_ick",			&rng_ick,	CK_443X), -	CLK("omap_rng",	"ick",				&rng_ick,	CK_443X), +	CLK(NULL,	"per_mcbsp4_gfclk",			&per_mcbsp4_gfclk,	CK_443X), +	CLK(NULL,	"hsmmc1_fclk",			&hsmmc1_fclk,	CK_443X), +	CLK(NULL,	"hsmmc2_fclk",			&hsmmc2_fclk,	CK_443X),  	CLK(NULL,	"sha2md5_fck",			&sha2md5_fck,	CK_443X), -	CLK(NULL,	"sl2if_ick",			&sl2if_ick,	CK_443X),  	CLK(NULL,	"slimbus1_fclk_1",		&slimbus1_fclk_1,	CK_443X),  	CLK(NULL,	"slimbus1_fclk_0",		&slimbus1_fclk_0,	CK_443X),  	CLK(NULL,	"slimbus1_fclk_2",		&slimbus1_fclk_2,	CK_443X),  	CLK(NULL,	"slimbus1_slimbus_clk",		&slimbus1_slimbus_clk,	CK_443X), -	CLK(NULL,	"slimbus1_fck",			&slimbus1_fck,	CK_443X),  	CLK(NULL,	"slimbus2_fclk_1",		&slimbus2_fclk_1,	CK_443X),  	CLK(NULL,	"slimbus2_fclk_0",		&slimbus2_fclk_0,	CK_443X),  	CLK(NULL,	"slimbus2_slimbus_clk",		&slimbus2_slimbus_clk,	CK_443X), -	CLK(NULL,	"slimbus2_fck",			&slimbus2_fck,	CK_443X),  	CLK(NULL,	"smartreflex_core_fck",		&smartreflex_core_fck,	CK_443X),  	CLK(NULL,	"smartreflex_iva_fck",		&smartreflex_iva_fck,	CK_443X),  	CLK(NULL,	"smartreflex_mpu_fck",		&smartreflex_mpu_fck,	CK_443X), -	CLK(NULL,	"timer1_fck",			&timer1_fck,	CK_443X), -	CLK(NULL,	"timer10_fck",			&timer10_fck,	CK_443X), -	CLK(NULL,	"timer11_fck",			&timer11_fck,	CK_443X), -	CLK(NULL,	"timer2_fck",			&timer2_fck,	CK_443X), -	CLK(NULL,	"timer3_fck",			&timer3_fck,	CK_443X), -	CLK(NULL,	"timer4_fck",			&timer4_fck,	CK_443X), -	CLK(NULL,	"timer5_fck",			&timer5_fck,	CK_443X), -	CLK(NULL,	"timer6_fck",			&timer6_fck,	CK_443X), -	CLK(NULL,	"timer7_fck",			&timer7_fck,	CK_443X), -	CLK(NULL,	"timer8_fck",			&timer8_fck,	CK_443X), -	CLK(NULL,	"timer9_fck",			&timer9_fck,	CK_443X), -	CLK(NULL,	"uart1_fck",			&uart1_fck,	CK_443X), -	CLK(NULL,	"uart2_fck",			&uart2_fck,	CK_443X), -	CLK(NULL,	"uart3_fck",			&uart3_fck,	CK_443X), -	CLK(NULL,	"uart4_fck",			&uart4_fck,	CK_443X), +	CLK(NULL,	"dmt1_clk_mux",			&dmt1_clk_mux,	CK_443X), +	CLK(NULL,	"cm2_dm10_mux",			&cm2_dm10_mux,	CK_443X), +	CLK(NULL,	"cm2_dm11_mux",			&cm2_dm11_mux,	CK_443X), +	CLK(NULL,	"cm2_dm2_mux",			&cm2_dm2_mux,	CK_443X), +	CLK(NULL,	"cm2_dm3_mux",			&cm2_dm3_mux,	CK_443X), +	CLK(NULL,	"cm2_dm4_mux",			&cm2_dm4_mux,	CK_443X), +	CLK(NULL,	"timer5_sync_mux",		&timer5_sync_mux,	CK_443X), +	CLK(NULL,	"timer6_sync_mux",			&timer6_sync_mux,	CK_443X), +	CLK(NULL,	"timer7_sync_mux",			&timer7_sync_mux,	CK_443X), +	CLK(NULL,	"timer8_sync_mux",			&timer8_sync_mux,	CK_443X), +	CLK(NULL,	"cm2_dm9_mux",			&cm2_dm9_mux,	CK_443X),  	CLK(NULL,	"usb_host_fs_fck",		&usb_host_fs_fck,	CK_443X),  	CLK("usbhs_omap",	"fs_fck",		&usb_host_fs_fck,	CK_443X),  	CLK(NULL,	"utmi_p1_gfclk",		&utmi_p1_gfclk,	CK_443X), @@ -1901,9 +1587,6 @@ static struct omap_clk omap44xx_clks[] = {  	CLK("usbhs_tll",	"usbtll_ick",		&usb_tll_hs_ick,	CK_443X),  	CLK(NULL,	"usim_ck",			&usim_ck,	CK_443X),  	CLK(NULL,	"usim_fclk",			&usim_fclk,	CK_443X), -	CLK(NULL,	"usim_fck",			&usim_fck,	CK_443X), -	CLK(NULL,	"wd_timer2_fck",		&wd_timer2_fck,	CK_443X), -	CLK(NULL,	"wd_timer3_fck",		&wd_timer3_fck,	CK_443X),  	CLK(NULL,	"pmd_stm_clock_mux_ck",		&pmd_stm_clock_mux_ck,	CK_443X),  	CLK(NULL,	"pmd_trace_clk_mux_ck",		&pmd_trace_clk_mux_ck,	CK_443X),  	CLK(NULL,	"stm_clk_div_ck",		&stm_clk_div_ck,	CK_443X), @@ -1980,15 +1663,6 @@ static struct omap_clk omap44xx_clks[] = {  	CLK(NULL,	"cpufreq_ck",	&dpll_mpu_ck,	CK_443X),  }; -static const char *enable_init_clks[] = { -	"emif1_fck", -	"emif2_fck", -	"gpmc_ick", -	"l3_instr_ick", -	"l3_main_3_ick", -	"ocp_wp_noc_ick", -}; -  int __init omap4xxx_clk_init(void)  {  	u32 cpu_clkflg; @@ -2019,9 +1693,6 @@ int __init omap4xxx_clk_init(void)  	omap2_clk_disable_autoidle_all(); -	omap2_clk_enable_init_clocks(enable_init_clks, -				     ARRAY_SIZE(enable_init_clks)); -  	/*  	 * On OMAP4460 the ABE DPLL fails to turn on if in idle low-power  	 * state when turning the ABE clock domain. Workaround this by diff --git a/arch/arm/mach-omap2/clock.h b/arch/arm/mach-omap2/clock.h index b40204837bd..60ddd8612b4 100644 --- a/arch/arm/mach-omap2/clock.h +++ b/arch/arm/mach-omap2/clock.h @@ -65,6 +65,17 @@ struct clockdomain;  		.ops = &_clkops_name,				\  	}; +#define DEFINE_STRUCT_CLK_FLAGS(_name, _parent_array_name,	\ +				_clkops_name, _flags)		\ +	static struct clk _name = {				\ +		.name = #_name,					\ +		.hw = &_name##_hw.hw,				\ +		.parent_names = _parent_array_name,		\ +		.num_parents = ARRAY_SIZE(_parent_array_name),	\ +		.ops = &_clkops_name,				\ +		.flags = _flags,				\ +	}; +  #define DEFINE_STRUCT_CLK_HW_OMAP(_name, _clkdm_name)		\  	static struct clk_hw_omap _name##_hw = {		\  		.hw = {						\ diff --git a/arch/arm/mach-omap2/clockdomain.c b/arch/arm/mach-omap2/clockdomain.c index 7faf82d4e85..2da3b5ec010 100644 --- a/arch/arm/mach-omap2/clockdomain.c +++ b/arch/arm/mach-omap2/clockdomain.c @@ -92,8 +92,6 @@ static int _clkdm_register(struct clockdomain *clkdm)  	pwrdm_add_clkdm(pwrdm, clkdm); -	spin_lock_init(&clkdm->lock); -  	pr_debug("clockdomain: registered %s\n", clkdm->name);  	return 0; @@ -122,7 +120,7 @@ static struct clkdm_dep *_clkdm_deps_lookup(struct clockdomain *clkdm,  	return cd;  } -/* +/**   * _autodep_lookup - resolve autodep clkdm names to clkdm pointers; store   * @autodep: struct clkdm_autodep * to resolve   * @@ -154,88 +152,206 @@ static void _autodep_lookup(struct clkdm_autodep *autodep)  	autodep->clkdm.ptr = clkdm;  } -/* - * _clkdm_add_autodeps - add auto sleepdeps/wkdeps to clkdm upon clock enable - * @clkdm: struct clockdomain * +/** + * _resolve_clkdm_deps() - resolve clkdm_names in @clkdm_deps to clkdms + * @clkdm: clockdomain that we are resolving dependencies for + * @clkdm_deps: ptr to array of struct clkdm_deps to resolve   * - * Add the "autodep" sleep & wakeup dependencies to clockdomain 'clkdm' - * in hardware-supervised mode.  Meant to be called from clock framework - * when a clock inside clockdomain 'clkdm' is enabled.	No return value. + * Iterates through @clkdm_deps, looking up the struct clockdomain named by + * clkdm_name and storing the clockdomain pointer in the struct clkdm_dep. + * No return value. + */ +static void _resolve_clkdm_deps(struct clockdomain *clkdm, +				struct clkdm_dep *clkdm_deps) +{ +	struct clkdm_dep *cd; + +	for (cd = clkdm_deps; cd && cd->clkdm_name; cd++) { +		if (cd->clkdm) +			continue; +		cd->clkdm = _clkdm_lookup(cd->clkdm_name); + +		WARN(!cd->clkdm, "clockdomain: %s: could not find clkdm %s while resolving dependencies - should never happen", +		     clkdm->name, cd->clkdm_name); +	} +} + +/** + * _clkdm_add_wkdep - add a wakeup dependency from clkdm2 to clkdm1 (lockless) + * @clkdm1: wake this struct clockdomain * up (dependent) + * @clkdm2: when this struct clockdomain * wakes up (source)   * - * XXX autodeps are deprecated and should be removed at the earliest - * opportunity + * When the clockdomain represented by @clkdm2 wakes up, wake up + * @clkdm1. Implemented in hardware on the OMAP, this feature is + * designed to reduce wakeup latency of the dependent clockdomain @clkdm1. + * Returns -EINVAL if presented with invalid clockdomain pointers, + * -ENOENT if @clkdm2 cannot wake up clkdm1 in hardware, or 0 upon + * success.   */ -void _clkdm_add_autodeps(struct clockdomain *clkdm) +static int _clkdm_add_wkdep(struct clockdomain *clkdm1, +			    struct clockdomain *clkdm2)  { -	struct clkdm_autodep *autodep; +	struct clkdm_dep *cd; +	int ret = 0; -	if (!autodeps || clkdm->flags & CLKDM_NO_AUTODEPS) -		return; +	if (!clkdm1 || !clkdm2) +		return -EINVAL; -	for (autodep = autodeps; autodep->clkdm.ptr; autodep++) { -		if (IS_ERR(autodep->clkdm.ptr)) -			continue; +	cd = _clkdm_deps_lookup(clkdm2, clkdm1->wkdep_srcs); +	if (IS_ERR(cd)) +		ret = PTR_ERR(cd); -		pr_debug("clockdomain: %s: adding %s sleepdep/wkdep\n", -			 clkdm->name, autodep->clkdm.ptr->name); +	if (!arch_clkdm || !arch_clkdm->clkdm_add_wkdep) +		ret = -EINVAL; + +	if (ret) { +		pr_debug("clockdomain: hardware cannot set/clear wake up of %s when %s wakes up\n", +			 clkdm1->name, clkdm2->name); +		return ret; +	} + +	cd->wkdep_usecount++; +	if (cd->wkdep_usecount == 1) { +		pr_debug("clockdomain: hardware will wake up %s when %s wakes up\n", +			 clkdm1->name, clkdm2->name); -		clkdm_add_sleepdep(clkdm, autodep->clkdm.ptr); -		clkdm_add_wkdep(clkdm, autodep->clkdm.ptr); +		ret = arch_clkdm->clkdm_add_wkdep(clkdm1, clkdm2);  	} + +	return ret;  } -/* - * _clkdm_add_autodeps - remove auto sleepdeps/wkdeps from clkdm - * @clkdm: struct clockdomain * +/** + * _clkdm_del_wkdep - remove a wakeup dep from clkdm2 to clkdm1 (lockless) + * @clkdm1: wake this struct clockdomain * up (dependent) + * @clkdm2: when this struct clockdomain * wakes up (source)   * - * Remove the "autodep" sleep & wakeup dependencies from clockdomain 'clkdm' - * in hardware-supervised mode.  Meant to be called from clock framework - * when a clock inside clockdomain 'clkdm' is disabled.  No return value. + * Remove a wakeup dependency causing @clkdm1 to wake up when @clkdm2 + * wakes up.  Returns -EINVAL if presented with invalid clockdomain + * pointers, -ENOENT if @clkdm2 cannot wake up clkdm1 in hardware, or + * 0 upon success. + */ +static int _clkdm_del_wkdep(struct clockdomain *clkdm1, +			    struct clockdomain *clkdm2) +{ +	struct clkdm_dep *cd; +	int ret = 0; + +	if (!clkdm1 || !clkdm2) +		return -EINVAL; + +	cd = _clkdm_deps_lookup(clkdm2, clkdm1->wkdep_srcs); +	if (IS_ERR(cd)) +		ret = PTR_ERR(cd); + +	if (!arch_clkdm || !arch_clkdm->clkdm_del_wkdep) +		ret = -EINVAL; + +	if (ret) { +		pr_debug("clockdomain: hardware cannot set/clear wake up of %s when %s wakes up\n", +			 clkdm1->name, clkdm2->name); +		return ret; +	} + +	cd->wkdep_usecount--; +	if (cd->wkdep_usecount == 0) { +		pr_debug("clockdomain: hardware will no longer wake up %s after %s wakes up\n", +			 clkdm1->name, clkdm2->name); + +		ret = arch_clkdm->clkdm_del_wkdep(clkdm1, clkdm2); +	} + +	return ret; +} + +/** + * _clkdm_add_sleepdep - add a sleep dependency from clkdm2 to clkdm1 (lockless) + * @clkdm1: prevent this struct clockdomain * from sleeping (dependent) + * @clkdm2: when this struct clockdomain * is active (source)   * - * XXX autodeps are deprecated and should be removed at the earliest - * opportunity + * Prevent @clkdm1 from automatically going inactive (and then to + * retention or off) if @clkdm2 is active.  Returns -EINVAL if + * presented with invalid clockdomain pointers or called on a machine + * that does not support software-configurable hardware sleep + * dependencies, -ENOENT if the specified dependency cannot be set in + * hardware, or 0 upon success.   */ -void _clkdm_del_autodeps(struct clockdomain *clkdm) +static int _clkdm_add_sleepdep(struct clockdomain *clkdm1, +			       struct clockdomain *clkdm2)  { -	struct clkdm_autodep *autodep; +	struct clkdm_dep *cd; +	int ret = 0; -	if (!autodeps || clkdm->flags & CLKDM_NO_AUTODEPS) -		return; +	if (!clkdm1 || !clkdm2) +		return -EINVAL; -	for (autodep = autodeps; autodep->clkdm.ptr; autodep++) { -		if (IS_ERR(autodep->clkdm.ptr)) -			continue; +	cd = _clkdm_deps_lookup(clkdm2, clkdm1->sleepdep_srcs); +	if (IS_ERR(cd)) +		ret = PTR_ERR(cd); -		pr_debug("clockdomain: %s: removing %s sleepdep/wkdep\n", -			 clkdm->name, autodep->clkdm.ptr->name); +	if (!arch_clkdm || !arch_clkdm->clkdm_add_sleepdep) +		ret = -EINVAL; -		clkdm_del_sleepdep(clkdm, autodep->clkdm.ptr); -		clkdm_del_wkdep(clkdm, autodep->clkdm.ptr); +	if (ret) { +		pr_debug("clockdomain: hardware cannot set/clear sleep dependency affecting %s from %s\n", +			 clkdm1->name, clkdm2->name); +		return ret; +	} + +	cd->sleepdep_usecount++; +	if (cd->sleepdep_usecount == 1) { +		pr_debug("clockdomain: will prevent %s from sleeping if %s is active\n", +			 clkdm1->name, clkdm2->name); + +		ret = arch_clkdm->clkdm_add_sleepdep(clkdm1, clkdm2);  	} + +	return ret;  }  /** - * _resolve_clkdm_deps() - resolve clkdm_names in @clkdm_deps to clkdms - * @clkdm: clockdomain that we are resolving dependencies for - * @clkdm_deps: ptr to array of struct clkdm_deps to resolve + * _clkdm_del_sleepdep - remove a sleep dep from clkdm2 to clkdm1 (lockless) + * @clkdm1: prevent this struct clockdomain * from sleeping (dependent) + * @clkdm2: when this struct clockdomain * is active (source)   * - * Iterates through @clkdm_deps, looking up the struct clockdomain named by - * clkdm_name and storing the clockdomain pointer in the struct clkdm_dep. - * No return value. + * Allow @clkdm1 to automatically go inactive (and then to retention or + * off), independent of the activity state of @clkdm2.  Returns -EINVAL + * if presented with invalid clockdomain pointers or called on a machine + * that does not support software-configurable hardware sleep dependencies, + * -ENOENT if the specified dependency cannot be cleared in hardware, or + * 0 upon success.   */ -static void _resolve_clkdm_deps(struct clockdomain *clkdm, -				struct clkdm_dep *clkdm_deps) +static int _clkdm_del_sleepdep(struct clockdomain *clkdm1, +			       struct clockdomain *clkdm2)  {  	struct clkdm_dep *cd; +	int ret = 0; -	for (cd = clkdm_deps; cd && cd->clkdm_name; cd++) { -		if (cd->clkdm) -			continue; -		cd->clkdm = _clkdm_lookup(cd->clkdm_name); +	if (!clkdm1 || !clkdm2) +		return -EINVAL; -		WARN(!cd->clkdm, "clockdomain: %s: could not find clkdm %s while resolving dependencies - should never happen", -		     clkdm->name, cd->clkdm_name); +	cd = _clkdm_deps_lookup(clkdm2, clkdm1->sleepdep_srcs); +	if (IS_ERR(cd)) +		ret = PTR_ERR(cd); + +	if (!arch_clkdm || !arch_clkdm->clkdm_del_sleepdep) +		ret = -EINVAL; + +	if (ret) { +		pr_debug("clockdomain: hardware cannot set/clear sleep dependency affecting %s from %s\n", +			 clkdm1->name, clkdm2->name); +		return ret;  	} + +	cd->sleepdep_usecount--; +	if (cd->sleepdep_usecount == 0) { +		pr_debug("clockdomain: will no longer prevent %s from sleeping if %s is active\n", +			 clkdm1->name, clkdm2->name); + +		ret = arch_clkdm->clkdm_del_sleepdep(clkdm1, clkdm2); +	} + +	return ret;  }  /* Public functions */ @@ -456,30 +572,18 @@ struct powerdomain *clkdm_get_pwrdm(struct clockdomain *clkdm)  int clkdm_add_wkdep(struct clockdomain *clkdm1, struct clockdomain *clkdm2)  {  	struct clkdm_dep *cd; -	int ret = 0; +	int ret;  	if (!clkdm1 || !clkdm2)  		return -EINVAL;  	cd = _clkdm_deps_lookup(clkdm2, clkdm1->wkdep_srcs);  	if (IS_ERR(cd)) -		ret = PTR_ERR(cd); +		return PTR_ERR(cd); -	if (!arch_clkdm || !arch_clkdm->clkdm_add_wkdep) -		ret = -EINVAL; - -	if (ret) { -		pr_debug("clockdomain: hardware cannot set/clear wake up of %s when %s wakes up\n", -			 clkdm1->name, clkdm2->name); -		return ret; -	} - -	if (atomic_inc_return(&cd->wkdep_usecount) == 1) { -		pr_debug("clockdomain: hardware will wake up %s when %s wakes up\n", -			 clkdm1->name, clkdm2->name); - -		ret = arch_clkdm->clkdm_add_wkdep(clkdm1, clkdm2); -	} +	pwrdm_lock(cd->clkdm->pwrdm.ptr); +	ret = _clkdm_add_wkdep(clkdm1, clkdm2); +	pwrdm_unlock(cd->clkdm->pwrdm.ptr);  	return ret;  } @@ -497,30 +601,18 @@ int clkdm_add_wkdep(struct clockdomain *clkdm1, struct clockdomain *clkdm2)  int clkdm_del_wkdep(struct clockdomain *clkdm1, struct clockdomain *clkdm2)  {  	struct clkdm_dep *cd; -	int ret = 0; +	int ret;  	if (!clkdm1 || !clkdm2)  		return -EINVAL;  	cd = _clkdm_deps_lookup(clkdm2, clkdm1->wkdep_srcs);  	if (IS_ERR(cd)) -		ret = PTR_ERR(cd); +		return PTR_ERR(cd); -	if (!arch_clkdm || !arch_clkdm->clkdm_del_wkdep) -		ret = -EINVAL; - -	if (ret) { -		pr_debug("clockdomain: hardware cannot set/clear wake up of %s when %s wakes up\n", -			 clkdm1->name, clkdm2->name); -		return ret; -	} - -	if (atomic_dec_return(&cd->wkdep_usecount) == 0) { -		pr_debug("clockdomain: hardware will no longer wake up %s after %s wakes up\n", -			 clkdm1->name, clkdm2->name); - -		ret = arch_clkdm->clkdm_del_wkdep(clkdm1, clkdm2); -	} +	pwrdm_lock(cd->clkdm->pwrdm.ptr); +	ret = _clkdm_del_wkdep(clkdm1, clkdm2); +	pwrdm_unlock(cd->clkdm->pwrdm.ptr);  	return ret;  } @@ -560,7 +652,7 @@ int clkdm_read_wkdep(struct clockdomain *clkdm1, struct clockdomain *clkdm2)  		return ret;  	} -	/* XXX It's faster to return the atomic wkdep_usecount */ +	/* XXX It's faster to return the wkdep_usecount */  	return arch_clkdm->clkdm_read_wkdep(clkdm1, clkdm2);  } @@ -600,30 +692,18 @@ int clkdm_clear_all_wkdeps(struct clockdomain *clkdm)  int clkdm_add_sleepdep(struct clockdomain *clkdm1, struct clockdomain *clkdm2)  {  	struct clkdm_dep *cd; -	int ret = 0; +	int ret;  	if (!clkdm1 || !clkdm2)  		return -EINVAL; -	cd = _clkdm_deps_lookup(clkdm2, clkdm1->sleepdep_srcs); +	cd = _clkdm_deps_lookup(clkdm2, clkdm1->wkdep_srcs);  	if (IS_ERR(cd)) -		ret = PTR_ERR(cd); +		return PTR_ERR(cd); -	if (!arch_clkdm || !arch_clkdm->clkdm_add_sleepdep) -		ret = -EINVAL; - -	if (ret) { -		pr_debug("clockdomain: hardware cannot set/clear sleep dependency affecting %s from %s\n", -			 clkdm1->name, clkdm2->name); -		return ret; -	} - -	if (atomic_inc_return(&cd->sleepdep_usecount) == 1) { -		pr_debug("clockdomain: will prevent %s from sleeping if %s is active\n", -			 clkdm1->name, clkdm2->name); - -		ret = arch_clkdm->clkdm_add_sleepdep(clkdm1, clkdm2); -	} +	pwrdm_lock(cd->clkdm->pwrdm.ptr); +	ret = _clkdm_add_sleepdep(clkdm1, clkdm2); +	pwrdm_unlock(cd->clkdm->pwrdm.ptr);  	return ret;  } @@ -643,30 +723,18 @@ int clkdm_add_sleepdep(struct clockdomain *clkdm1, struct clockdomain *clkdm2)  int clkdm_del_sleepdep(struct clockdomain *clkdm1, struct clockdomain *clkdm2)  {  	struct clkdm_dep *cd; -	int ret = 0; +	int ret;  	if (!clkdm1 || !clkdm2)  		return -EINVAL; -	cd = _clkdm_deps_lookup(clkdm2, clkdm1->sleepdep_srcs); +	cd = _clkdm_deps_lookup(clkdm2, clkdm1->wkdep_srcs);  	if (IS_ERR(cd)) -		ret = PTR_ERR(cd); +		return PTR_ERR(cd); -	if (!arch_clkdm || !arch_clkdm->clkdm_del_sleepdep) -		ret = -EINVAL; - -	if (ret) { -		pr_debug("clockdomain: hardware cannot set/clear sleep dependency affecting %s from %s\n", -			 clkdm1->name, clkdm2->name); -		return ret; -	} - -	if (atomic_dec_return(&cd->sleepdep_usecount) == 0) { -		pr_debug("clockdomain: will no longer prevent %s from sleeping if %s is active\n", -			 clkdm1->name, clkdm2->name); - -		ret = arch_clkdm->clkdm_del_sleepdep(clkdm1, clkdm2); -	} +	pwrdm_lock(cd->clkdm->pwrdm.ptr); +	ret = _clkdm_del_sleepdep(clkdm1, clkdm2); +	pwrdm_unlock(cd->clkdm->pwrdm.ptr);  	return ret;  } @@ -708,7 +776,7 @@ int clkdm_read_sleepdep(struct clockdomain *clkdm1, struct clockdomain *clkdm2)  		return ret;  	} -	/* XXX It's faster to return the atomic sleepdep_usecount */ +	/* XXX It's faster to return the sleepdep_usecount */  	return arch_clkdm->clkdm_read_sleepdep(clkdm1, clkdm2);  } @@ -734,18 +802,17 @@ int clkdm_clear_all_sleepdeps(struct clockdomain *clkdm)  }  /** - * clkdm_sleep - force clockdomain sleep transition + * clkdm_sleep_nolock - force clockdomain sleep transition (lockless)   * @clkdm: struct clockdomain *   *   * Instruct the CM to force a sleep transition on the specified - * clockdomain @clkdm.  Returns -EINVAL if @clkdm is NULL or if - * clockdomain does not support software-initiated sleep; 0 upon - * success. + * clockdomain @clkdm.  Only for use by the powerdomain code.  Returns + * -EINVAL if @clkdm is NULL or if clockdomain does not support + * software-initiated sleep; 0 upon success.   */ -int clkdm_sleep(struct clockdomain *clkdm) +int clkdm_sleep_nolock(struct clockdomain *clkdm)  {  	int ret; -	unsigned long flags;  	if (!clkdm)  		return -EINVAL; @@ -761,26 +828,45 @@ int clkdm_sleep(struct clockdomain *clkdm)  	pr_debug("clockdomain: forcing sleep on %s\n", clkdm->name); -	spin_lock_irqsave(&clkdm->lock, flags);  	clkdm->_flags &= ~_CLKDM_FLAG_HWSUP_ENABLED;  	ret = arch_clkdm->clkdm_sleep(clkdm); -	spin_unlock_irqrestore(&clkdm->lock, flags); +	ret |= pwrdm_state_switch_nolock(clkdm->pwrdm.ptr); +  	return ret;  }  /** - * clkdm_wakeup - force clockdomain wakeup transition + * clkdm_sleep - force clockdomain sleep transition   * @clkdm: struct clockdomain *   * - * Instruct the CM to force a wakeup transition on the specified - * clockdomain @clkdm.  Returns -EINVAL if @clkdm is NULL or if the - * clockdomain does not support software-controlled wakeup; 0 upon + * Instruct the CM to force a sleep transition on the specified + * clockdomain @clkdm.  Returns -EINVAL if @clkdm is NULL or if + * clockdomain does not support software-initiated sleep; 0 upon   * success.   */ -int clkdm_wakeup(struct clockdomain *clkdm) +int clkdm_sleep(struct clockdomain *clkdm) +{ +	int ret; + +	pwrdm_lock(clkdm->pwrdm.ptr); +	ret = clkdm_sleep_nolock(clkdm); +	pwrdm_unlock(clkdm->pwrdm.ptr); + +	return ret; +} + +/** + * clkdm_wakeup_nolock - force clockdomain wakeup transition (lockless) + * @clkdm: struct clockdomain * + * + * Instruct the CM to force a wakeup transition on the specified + * clockdomain @clkdm.  Only for use by the powerdomain code.  Returns + * -EINVAL if @clkdm is NULL or if the clockdomain does not support + * software-controlled wakeup; 0 upon success. + */ +int clkdm_wakeup_nolock(struct clockdomain *clkdm)  {  	int ret; -	unsigned long flags;  	if (!clkdm)  		return -EINVAL; @@ -796,28 +882,46 @@ int clkdm_wakeup(struct clockdomain *clkdm)  	pr_debug("clockdomain: forcing wakeup on %s\n", clkdm->name); -	spin_lock_irqsave(&clkdm->lock, flags);  	clkdm->_flags &= ~_CLKDM_FLAG_HWSUP_ENABLED;  	ret = arch_clkdm->clkdm_wakeup(clkdm); -	ret |= pwrdm_state_switch(clkdm->pwrdm.ptr); -	spin_unlock_irqrestore(&clkdm->lock, flags); +	ret |= pwrdm_state_switch_nolock(clkdm->pwrdm.ptr); +  	return ret;  }  /** - * clkdm_allow_idle - enable hwsup idle transitions for clkdm + * clkdm_wakeup - force clockdomain wakeup transition   * @clkdm: struct clockdomain *   * - * Allow the hardware to automatically switch the clockdomain @clkdm into - * active or idle states, as needed by downstream clocks.  If the + * Instruct the CM to force a wakeup transition on the specified + * clockdomain @clkdm.  Returns -EINVAL if @clkdm is NULL or if the + * clockdomain does not support software-controlled wakeup; 0 upon + * success. + */ +int clkdm_wakeup(struct clockdomain *clkdm) +{ +	int ret; + +	pwrdm_lock(clkdm->pwrdm.ptr); +	ret = clkdm_wakeup_nolock(clkdm); +	pwrdm_unlock(clkdm->pwrdm.ptr); + +	return ret; +} + +/** + * clkdm_allow_idle_nolock - enable hwsup idle transitions for clkdm + * @clkdm: struct clockdomain * + * + * Allow the hardware to automatically switch the clockdomain @clkdm + * into active or idle states, as needed by downstream clocks.  If the   * clockdomain has any downstream clocks enabled in the clock   * framework, wkdep/sleepdep autodependencies are added; this is so - * device drivers can read and write to the device.  No return value. + * device drivers can read and write to the device.  Only for use by + * the powerdomain code.  No return value.   */ -void clkdm_allow_idle(struct clockdomain *clkdm) +void clkdm_allow_idle_nolock(struct clockdomain *clkdm)  { -	unsigned long flags; -  	if (!clkdm)  		return; @@ -833,11 +937,26 @@ void clkdm_allow_idle(struct clockdomain *clkdm)  	pr_debug("clockdomain: enabling automatic idle transitions for %s\n",  		 clkdm->name); -	spin_lock_irqsave(&clkdm->lock, flags);  	clkdm->_flags |= _CLKDM_FLAG_HWSUP_ENABLED;  	arch_clkdm->clkdm_allow_idle(clkdm); -	pwrdm_state_switch(clkdm->pwrdm.ptr); -	spin_unlock_irqrestore(&clkdm->lock, flags); +	pwrdm_state_switch_nolock(clkdm->pwrdm.ptr); +} + +/** + * clkdm_allow_idle - enable hwsup idle transitions for clkdm + * @clkdm: struct clockdomain * + * + * Allow the hardware to automatically switch the clockdomain @clkdm into + * active or idle states, as needed by downstream clocks.  If the + * clockdomain has any downstream clocks enabled in the clock + * framework, wkdep/sleepdep autodependencies are added; this is so + * device drivers can read and write to the device.  No return value. + */ +void clkdm_allow_idle(struct clockdomain *clkdm) +{ +	pwrdm_lock(clkdm->pwrdm.ptr); +	clkdm_allow_idle_nolock(clkdm); +	pwrdm_unlock(clkdm->pwrdm.ptr);  }  /** @@ -847,12 +966,11 @@ void clkdm_allow_idle(struct clockdomain *clkdm)   * Prevent the hardware from automatically switching the clockdomain   * @clkdm into inactive or idle states.  If the clockdomain has   * downstream clocks enabled in the clock framework, wkdep/sleepdep - * autodependencies are removed.  No return value. + * autodependencies are removed.  Only for use by the powerdomain + * code.  No return value.   */ -void clkdm_deny_idle(struct clockdomain *clkdm) +void clkdm_deny_idle_nolock(struct clockdomain *clkdm)  { -	unsigned long flags; -  	if (!clkdm)  		return; @@ -868,11 +986,25 @@ void clkdm_deny_idle(struct clockdomain *clkdm)  	pr_debug("clockdomain: disabling automatic idle transitions for %s\n",  		 clkdm->name); -	spin_lock_irqsave(&clkdm->lock, flags);  	clkdm->_flags &= ~_CLKDM_FLAG_HWSUP_ENABLED;  	arch_clkdm->clkdm_deny_idle(clkdm); -	pwrdm_state_switch(clkdm->pwrdm.ptr); -	spin_unlock_irqrestore(&clkdm->lock, flags); +	pwrdm_state_switch_nolock(clkdm->pwrdm.ptr); +} + +/** + * clkdm_deny_idle - disable hwsup idle transitions for clkdm + * @clkdm: struct clockdomain * + * + * Prevent the hardware from automatically switching the clockdomain + * @clkdm into inactive or idle states.  If the clockdomain has + * downstream clocks enabled in the clock framework, wkdep/sleepdep + * autodependencies are removed.  No return value. + */ +void clkdm_deny_idle(struct clockdomain *clkdm) +{ +	pwrdm_lock(clkdm->pwrdm.ptr); +	clkdm_deny_idle_nolock(clkdm); +	pwrdm_unlock(clkdm->pwrdm.ptr);  }  /** @@ -889,14 +1021,11 @@ void clkdm_deny_idle(struct clockdomain *clkdm)  bool clkdm_in_hwsup(struct clockdomain *clkdm)  {  	bool ret; -	unsigned long flags;  	if (!clkdm)  		return false; -	spin_lock_irqsave(&clkdm->lock, flags);  	ret = (clkdm->_flags & _CLKDM_FLAG_HWSUP_ENABLED) ? true : false; -	spin_unlock_irqrestore(&clkdm->lock, flags);  	return ret;  } @@ -918,30 +1047,91 @@ bool clkdm_missing_idle_reporting(struct clockdomain *clkdm)  	return (clkdm->flags & CLKDM_MISSING_IDLE_REPORTING) ? true : false;  } +/* Public autodep handling functions (deprecated) */ + +/** + * clkdm_add_autodeps - add auto sleepdeps/wkdeps to clkdm upon clock enable + * @clkdm: struct clockdomain * + * + * Add the "autodep" sleep & wakeup dependencies to clockdomain 'clkdm' + * in hardware-supervised mode.  Meant to be called from clock framework + * when a clock inside clockdomain 'clkdm' is enabled.	No return value. + * + * XXX autodeps are deprecated and should be removed at the earliest + * opportunity + */ +void clkdm_add_autodeps(struct clockdomain *clkdm) +{ +	struct clkdm_autodep *autodep; + +	if (!autodeps || clkdm->flags & CLKDM_NO_AUTODEPS) +		return; + +	for (autodep = autodeps; autodep->clkdm.ptr; autodep++) { +		if (IS_ERR(autodep->clkdm.ptr)) +			continue; + +		pr_debug("clockdomain: %s: adding %s sleepdep/wkdep\n", +			 clkdm->name, autodep->clkdm.ptr->name); + +		_clkdm_add_sleepdep(clkdm, autodep->clkdm.ptr); +		_clkdm_add_wkdep(clkdm, autodep->clkdm.ptr); +	} +} + +/** + * clkdm_del_autodeps - remove auto sleepdeps/wkdeps from clkdm + * @clkdm: struct clockdomain * + * + * Remove the "autodep" sleep & wakeup dependencies from clockdomain 'clkdm' + * in hardware-supervised mode.  Meant to be called from clock framework + * when a clock inside clockdomain 'clkdm' is disabled.  No return value. + * + * XXX autodeps are deprecated and should be removed at the earliest + * opportunity + */ +void clkdm_del_autodeps(struct clockdomain *clkdm) +{ +	struct clkdm_autodep *autodep; + +	if (!autodeps || clkdm->flags & CLKDM_NO_AUTODEPS) +		return; + +	for (autodep = autodeps; autodep->clkdm.ptr; autodep++) { +		if (IS_ERR(autodep->clkdm.ptr)) +			continue; + +		pr_debug("clockdomain: %s: removing %s sleepdep/wkdep\n", +			 clkdm->name, autodep->clkdm.ptr->name); + +		_clkdm_del_sleepdep(clkdm, autodep->clkdm.ptr); +		_clkdm_del_wkdep(clkdm, autodep->clkdm.ptr); +	} +} +  /* Clockdomain-to-clock/hwmod framework interface code */  static int _clkdm_clk_hwmod_enable(struct clockdomain *clkdm)  { -	unsigned long flags; -  	if (!clkdm || !arch_clkdm || !arch_clkdm->clkdm_clk_enable)  		return -EINVAL; -	spin_lock_irqsave(&clkdm->lock, flags); +	pwrdm_lock(clkdm->pwrdm.ptr);  	/*  	 * For arch's with no autodeps, clkcm_clk_enable  	 * should be called for every clock instance or hwmod that is  	 * enabled, so the clkdm can be force woken up.  	 */ -	if ((atomic_inc_return(&clkdm->usecount) > 1) && autodeps) { -		spin_unlock_irqrestore(&clkdm->lock, flags); +	clkdm->usecount++; +	if (clkdm->usecount > 1 && autodeps) { +		pwrdm_unlock(clkdm->pwrdm.ptr);  		return 0;  	}  	arch_clkdm->clkdm_clk_enable(clkdm); -	pwrdm_state_switch(clkdm->pwrdm.ptr); -	spin_unlock_irqrestore(&clkdm->lock, flags); +	pwrdm_state_switch_nolock(clkdm->pwrdm.ptr); +	pwrdm_unlock(clkdm->pwrdm.ptr);  	pr_debug("clockdomain: %s: enabled\n", clkdm->name); @@ -990,36 +1180,34 @@ int clkdm_clk_enable(struct clockdomain *clkdm, struct clk *clk)   */  int clkdm_clk_disable(struct clockdomain *clkdm, struct clk *clk)  { -	unsigned long flags; -  	if (!clkdm || !clk || !arch_clkdm || !arch_clkdm->clkdm_clk_disable)  		return -EINVAL; -	spin_lock_irqsave(&clkdm->lock, flags); +	pwrdm_lock(clkdm->pwrdm.ptr);  	/* corner case: disabling unused clocks */ -	if ((__clk_get_enable_count(clk) == 0) && -	    (atomic_read(&clkdm->usecount) == 0)) +	if ((__clk_get_enable_count(clk) == 0) && clkdm->usecount == 0)  		goto ccd_exit; -	if (atomic_read(&clkdm->usecount) == 0) { -		spin_unlock_irqrestore(&clkdm->lock, flags); +	if (clkdm->usecount == 0) { +		pwrdm_unlock(clkdm->pwrdm.ptr);  		WARN_ON(1); /* underflow */  		return -ERANGE;  	} -	if (atomic_dec_return(&clkdm->usecount) > 0) { -		spin_unlock_irqrestore(&clkdm->lock, flags); +	clkdm->usecount--; +	if (clkdm->usecount > 0) { +		pwrdm_unlock(clkdm->pwrdm.ptr);  		return 0;  	}  	arch_clkdm->clkdm_clk_disable(clkdm); -	pwrdm_state_switch(clkdm->pwrdm.ptr); +	pwrdm_state_switch_nolock(clkdm->pwrdm.ptr);  	pr_debug("clockdomain: %s: disabled\n", clkdm->name);  ccd_exit: -	spin_unlock_irqrestore(&clkdm->lock, flags); +	pwrdm_unlock(clkdm->pwrdm.ptr);  	return 0;  } @@ -1072,8 +1260,6 @@ int clkdm_hwmod_enable(struct clockdomain *clkdm, struct omap_hwmod *oh)   */  int clkdm_hwmod_disable(struct clockdomain *clkdm, struct omap_hwmod *oh)  { -	unsigned long flags; -  	/* The clkdm attribute does not exist yet prior OMAP4 */  	if (cpu_is_omap24xx() || cpu_is_omap34xx())  		return 0; @@ -1086,22 +1272,23 @@ int clkdm_hwmod_disable(struct clockdomain *clkdm, struct omap_hwmod *oh)  	if (!clkdm || !oh || !arch_clkdm || !arch_clkdm->clkdm_clk_disable)  		return -EINVAL; -	spin_lock_irqsave(&clkdm->lock, flags); +	pwrdm_lock(clkdm->pwrdm.ptr); -	if (atomic_read(&clkdm->usecount) == 0) { -		spin_unlock_irqrestore(&clkdm->lock, flags); +	if (clkdm->usecount == 0) { +		pwrdm_unlock(clkdm->pwrdm.ptr);  		WARN_ON(1); /* underflow */  		return -ERANGE;  	} -	if (atomic_dec_return(&clkdm->usecount) > 0) { -		spin_unlock_irqrestore(&clkdm->lock, flags); +	clkdm->usecount--; +	if (clkdm->usecount > 0) { +		pwrdm_unlock(clkdm->pwrdm.ptr);  		return 0;  	}  	arch_clkdm->clkdm_clk_disable(clkdm); -	pwrdm_state_switch(clkdm->pwrdm.ptr); -	spin_unlock_irqrestore(&clkdm->lock, flags); +	pwrdm_state_switch_nolock(clkdm->pwrdm.ptr); +	pwrdm_unlock(clkdm->pwrdm.ptr);  	pr_debug("clockdomain: %s: disabled\n", clkdm->name); diff --git a/arch/arm/mach-omap2/clockdomain.h b/arch/arm/mach-omap2/clockdomain.h index bc42446e23a..2da37656a69 100644 --- a/arch/arm/mach-omap2/clockdomain.h +++ b/arch/arm/mach-omap2/clockdomain.h @@ -15,7 +15,6 @@  #define __ARCH_ARM_MACH_OMAP2_CLOCKDOMAIN_H  #include <linux/init.h> -#include <linux/spinlock.h>  #include "powerdomain.h"  #include "clock.h" @@ -92,8 +91,8 @@ struct clkdm_autodep {  struct clkdm_dep {  	const char *clkdm_name;  	struct clockdomain *clkdm; -	atomic_t wkdep_usecount; -	atomic_t sleepdep_usecount; +	s16 wkdep_usecount; +	s16 sleepdep_usecount;  };  /* Possible flags for struct clockdomain._flags */ @@ -137,9 +136,8 @@ struct clockdomain {  	const u16 clkdm_offs;  	struct clkdm_dep *wkdep_srcs;  	struct clkdm_dep *sleepdep_srcs; -	atomic_t usecount; +	int usecount;  	struct list_head node; -	spinlock_t lock;  };  /** @@ -196,12 +194,16 @@ int clkdm_del_sleepdep(struct clockdomain *clkdm1, struct clockdomain *clkdm2);  int clkdm_read_sleepdep(struct clockdomain *clkdm1, struct clockdomain *clkdm2);  int clkdm_clear_all_sleepdeps(struct clockdomain *clkdm); +void clkdm_allow_idle_nolock(struct clockdomain *clkdm);  void clkdm_allow_idle(struct clockdomain *clkdm); +void clkdm_deny_idle_nolock(struct clockdomain *clkdm);  void clkdm_deny_idle(struct clockdomain *clkdm);  bool clkdm_in_hwsup(struct clockdomain *clkdm);  bool clkdm_missing_idle_reporting(struct clockdomain *clkdm); +int clkdm_wakeup_nolock(struct clockdomain *clkdm);  int clkdm_wakeup(struct clockdomain *clkdm); +int clkdm_sleep_nolock(struct clockdomain *clkdm);  int clkdm_sleep(struct clockdomain *clkdm);  int clkdm_clk_enable(struct clockdomain *clkdm, struct clk *clk); @@ -214,8 +216,9 @@ extern void __init omap243x_clockdomains_init(void);  extern void __init omap3xxx_clockdomains_init(void);  extern void __init am33xx_clockdomains_init(void);  extern void __init omap44xx_clockdomains_init(void); -extern void _clkdm_add_autodeps(struct clockdomain *clkdm); -extern void _clkdm_del_autodeps(struct clockdomain *clkdm); + +extern void clkdm_add_autodeps(struct clockdomain *clkdm); +extern void clkdm_del_autodeps(struct clockdomain *clkdm);  extern struct clkdm_ops omap2_clkdm_operations;  extern struct clkdm_ops omap3_clkdm_operations; diff --git a/arch/arm/mach-omap2/cm2xxx.c b/arch/arm/mach-omap2/cm2xxx.c index db650690e9d..6774a53a387 100644 --- a/arch/arm/mach-omap2/cm2xxx.c +++ b/arch/arm/mach-omap2/cm2xxx.c @@ -273,9 +273,6 @@ int omap2xxx_cm_wait_module_ready(s16 prcm_mod, u8 idlest_id, u8 idlest_shift)  static void omap2xxx_clkdm_allow_idle(struct clockdomain *clkdm)  { -	if (atomic_read(&clkdm->usecount) > 0) -		_clkdm_add_autodeps(clkdm); -  	omap2xxx_cm_clkdm_enable_hwsup(clkdm->pwrdm.ptr->prcm_offs,  				       clkdm->clktrctrl_mask);  } @@ -284,9 +281,6 @@ static void omap2xxx_clkdm_deny_idle(struct clockdomain *clkdm)  {  	omap2xxx_cm_clkdm_disable_hwsup(clkdm->pwrdm.ptr->prcm_offs,  					clkdm->clktrctrl_mask); - -	if (atomic_read(&clkdm->usecount) > 0) -		_clkdm_del_autodeps(clkdm);  }  static int omap2xxx_clkdm_clk_enable(struct clockdomain *clkdm) @@ -298,18 +292,8 @@ static int omap2xxx_clkdm_clk_enable(struct clockdomain *clkdm)  	hwsup = omap2xxx_cm_is_clkdm_in_hwsup(clkdm->pwrdm.ptr->prcm_offs,  					      clkdm->clktrctrl_mask); - -	if (hwsup) { -		/* Disable HW transitions when we are changing deps */ -		omap2xxx_cm_clkdm_disable_hwsup(clkdm->pwrdm.ptr->prcm_offs, -						clkdm->clktrctrl_mask); -		_clkdm_add_autodeps(clkdm); -		omap2xxx_cm_clkdm_enable_hwsup(clkdm->pwrdm.ptr->prcm_offs, -					       clkdm->clktrctrl_mask); -	} else { -		if (clkdm->flags & CLKDM_CAN_FORCE_WAKEUP) -			omap2xxx_clkdm_wakeup(clkdm); -	} +	if (!hwsup && clkdm->flags & CLKDM_CAN_FORCE_WAKEUP) +		omap2xxx_clkdm_wakeup(clkdm);  	return 0;  } @@ -324,17 +308,8 @@ static int omap2xxx_clkdm_clk_disable(struct clockdomain *clkdm)  	hwsup = omap2xxx_cm_is_clkdm_in_hwsup(clkdm->pwrdm.ptr->prcm_offs,  					      clkdm->clktrctrl_mask); -	if (hwsup) { -		/* Disable HW transitions when we are changing deps */ -		omap2xxx_cm_clkdm_disable_hwsup(clkdm->pwrdm.ptr->prcm_offs, -						clkdm->clktrctrl_mask); -		_clkdm_del_autodeps(clkdm); -		omap2xxx_cm_clkdm_enable_hwsup(clkdm->pwrdm.ptr->prcm_offs, -					       clkdm->clktrctrl_mask); -	} else { -		if (clkdm->flags & CLKDM_CAN_FORCE_SLEEP) -			omap2xxx_clkdm_sleep(clkdm); -	} +	if (!hwsup && clkdm->flags & CLKDM_CAN_FORCE_SLEEP) +		omap2xxx_clkdm_sleep(clkdm);  	return 0;  } diff --git a/arch/arm/mach-omap2/cm33xx.c b/arch/arm/mach-omap2/cm33xx.c index 058ce3c0873..325a5157657 100644 --- a/arch/arm/mach-omap2/cm33xx.c +++ b/arch/arm/mach-omap2/cm33xx.c @@ -241,9 +241,6 @@ int am33xx_cm_wait_module_ready(u16 inst, s16 cdoffs, u16 clkctrl_offs)  {  	int i = 0; -	if (!clkctrl_offs) -		return 0; -  	omap_test_timeout(_is_module_ready(inst, cdoffs, clkctrl_offs),  			  MAX_MODULE_READY_TIME, i); diff --git a/arch/arm/mach-omap2/cm33xx.h b/arch/arm/mach-omap2/cm33xx.h index 5fa0b62e1a7..64f4bafe7bd 100644 --- a/arch/arm/mach-omap2/cm33xx.h +++ b/arch/arm/mach-omap2/cm33xx.h @@ -17,16 +17,11 @@  #ifndef __ARCH_ARM_MACH_OMAP2_CM_33XX_H  #define __ARCH_ARM_MACH_OMAP2_CM_33XX_H -#include <linux/delay.h> -#include <linux/errno.h> -#include <linux/err.h> -#include <linux/io.h> -  #include "common.h"  #include "cm.h"  #include "cm-regbits-33xx.h" -#include "cm33xx.h" +#include "iomap.h"  /* CM base address */  #define AM33XX_CM_BASE		0x44e00000 @@ -381,6 +376,7 @@  #define AM33XX_CM_CEFUSE_CEFUSE_CLKCTRL			AM33XX_CM_REGADDR(AM33XX_CM_CEFUSE_MOD, 0x0020) +#ifndef __ASSEMBLER__  extern bool am33xx_cm_is_clkdm_in_hwsup(s16 inst, u16 cdoffs);  extern void am33xx_cm_clkdm_enable_hwsup(s16 inst, u16 cdoffs);  extern void am33xx_cm_clkdm_disable_hwsup(s16 inst, u16 cdoffs); @@ -417,4 +413,5 @@ static inline int am33xx_cm_wait_module_ready(u16 inst, s16 cdoffs,  }  #endif +#endif /* ASSEMBLER */  #endif diff --git a/arch/arm/mach-omap2/cm3xxx.c b/arch/arm/mach-omap2/cm3xxx.c index c2086f2e86b..9061c307d91 100644 --- a/arch/arm/mach-omap2/cm3xxx.c +++ b/arch/arm/mach-omap2/cm3xxx.c @@ -186,7 +186,7 @@ static int omap3xxx_clkdm_clear_all_sleepdeps(struct clockdomain *clkdm)  			continue; /* only happens if data is erroneous */  		mask |= 1 << cd->clkdm->dep_bit; -		atomic_set(&cd->sleepdep_usecount, 0); +		cd->sleepdep_usecount = 0;  	}  	omap2_cm_clear_mod_reg_bits(mask, clkdm->pwrdm.ptr->prcm_offs,  				    OMAP3430_CM_SLEEPDEP); @@ -209,8 +209,8 @@ static int omap3xxx_clkdm_wakeup(struct clockdomain *clkdm)  static void omap3xxx_clkdm_allow_idle(struct clockdomain *clkdm)  { -	if (atomic_read(&clkdm->usecount) > 0) -		_clkdm_add_autodeps(clkdm); +	if (clkdm->usecount > 0) +		clkdm_add_autodeps(clkdm);  	omap3xxx_cm_clkdm_enable_hwsup(clkdm->pwrdm.ptr->prcm_offs,  				       clkdm->clktrctrl_mask); @@ -221,8 +221,8 @@ static void omap3xxx_clkdm_deny_idle(struct clockdomain *clkdm)  	omap3xxx_cm_clkdm_disable_hwsup(clkdm->pwrdm.ptr->prcm_offs,  					clkdm->clktrctrl_mask); -	if (atomic_read(&clkdm->usecount) > 0) -		_clkdm_del_autodeps(clkdm); +	if (clkdm->usecount > 0) +		clkdm_del_autodeps(clkdm);  }  static int omap3xxx_clkdm_clk_enable(struct clockdomain *clkdm) @@ -250,7 +250,7 @@ static int omap3xxx_clkdm_clk_enable(struct clockdomain *clkdm)  		/* Disable HW transitions when we are changing deps */  		omap3xxx_cm_clkdm_disable_hwsup(clkdm->pwrdm.ptr->prcm_offs,  						clkdm->clktrctrl_mask); -		_clkdm_add_autodeps(clkdm); +		clkdm_add_autodeps(clkdm);  		omap3xxx_cm_clkdm_enable_hwsup(clkdm->pwrdm.ptr->prcm_offs,  					       clkdm->clktrctrl_mask);  	} else { @@ -287,7 +287,7 @@ static int omap3xxx_clkdm_clk_disable(struct clockdomain *clkdm)  		/* Disable HW transitions when we are changing deps */  		omap3xxx_cm_clkdm_disable_hwsup(clkdm->pwrdm.ptr->prcm_offs,  						clkdm->clktrctrl_mask); -		_clkdm_del_autodeps(clkdm); +		clkdm_del_autodeps(clkdm);  		omap3xxx_cm_clkdm_enable_hwsup(clkdm->pwrdm.ptr->prcm_offs,  					       clkdm->clktrctrl_mask);  	} else { diff --git a/arch/arm/mach-omap2/cminst44xx.c b/arch/arm/mach-omap2/cminst44xx.c index 7f9a464f01e..f0290f5566f 100644 --- a/arch/arm/mach-omap2/cminst44xx.c +++ b/arch/arm/mach-omap2/cminst44xx.c @@ -393,7 +393,7 @@ static int omap4_clkdm_clear_all_wkup_sleep_deps(struct clockdomain *clkdm)  			continue; /* only happens if data is erroneous */  		mask |= 1 << cd->clkdm->dep_bit; -		atomic_set(&cd->wkdep_usecount, 0); +		cd->wkdep_usecount = 0;  	}  	omap4_cminst_clear_inst_reg_bits(mask, clkdm->prcm_partition, diff --git a/arch/arm/mach-omap2/common.h b/arch/arm/mach-omap2/common.h index 948bcaa82eb..0c3a991a240 100644 --- a/arch/arm/mach-omap2/common.h +++ b/arch/arm/mach-omap2/common.h @@ -119,6 +119,14 @@ static inline void omap2xxx_restart(char mode, const char *cmd)  }  #endif +#ifdef CONFIG_SOC_AM33XX +void am33xx_restart(char mode, const char *cmd); +#else +static inline void am33xx_restart(char mode, const char *cmd) +{ +} +#endif +  #ifdef CONFIG_ARCH_OMAP3  void omap3xxx_restart(char mode, const char *cmd);  #else diff --git a/arch/arm/mach-omap2/cpuidle34xx.c b/arch/arm/mach-omap2/cpuidle34xx.c index 22590dbe8f1..80392fca86c 100644 --- a/arch/arm/mach-omap2/cpuidle34xx.c +++ b/arch/arm/mach-omap2/cpuidle34xx.c @@ -36,40 +36,66 @@  /* Mach specific information to be recorded in the C-state driver_data */  struct omap3_idle_statedata { -	u32 mpu_state; -	u32 core_state; +	u8 mpu_state; +	u8 core_state; +	u8 per_min_state; +	u8 flags;  };  static struct powerdomain *mpu_pd, *core_pd, *per_pd, *cam_pd; +/* + * Possible flag bits for struct omap3_idle_statedata.flags: + * + * OMAP_CPUIDLE_CX_NO_CLKDM_IDLE: don't allow the MPU clockdomain to go + *    inactive.  This in turn prevents the MPU DPLL from entering autoidle + *    mode, so wakeup latency is greatly reduced, at the cost of additional + *    energy consumption.  This also prevents the CORE clockdomain from + *    entering idle. + */ +#define OMAP_CPUIDLE_CX_NO_CLKDM_IDLE		BIT(0) + +/* + * Prevent PER OFF if CORE is not in RETention or OFF as this would + * disable PER wakeups completely. + */  static struct omap3_idle_statedata omap3_idle_data[] = {  	{  		.mpu_state = PWRDM_POWER_ON,  		.core_state = PWRDM_POWER_ON, +		/* In C1 do not allow PER state lower than CORE state */ +		.per_min_state = PWRDM_POWER_ON, +		.flags = OMAP_CPUIDLE_CX_NO_CLKDM_IDLE,  	},  	{  		.mpu_state = PWRDM_POWER_ON,  		.core_state = PWRDM_POWER_ON, +		.per_min_state = PWRDM_POWER_RET,  	},  	{  		.mpu_state = PWRDM_POWER_RET,  		.core_state = PWRDM_POWER_ON, +		.per_min_state = PWRDM_POWER_RET,  	},  	{  		.mpu_state = PWRDM_POWER_OFF,  		.core_state = PWRDM_POWER_ON, +		.per_min_state = PWRDM_POWER_RET,  	},  	{  		.mpu_state = PWRDM_POWER_RET,  		.core_state = PWRDM_POWER_RET, +		.per_min_state = PWRDM_POWER_OFF,  	},  	{  		.mpu_state = PWRDM_POWER_OFF,  		.core_state = PWRDM_POWER_RET, +		.per_min_state = PWRDM_POWER_OFF,  	},  	{  		.mpu_state = PWRDM_POWER_OFF,  		.core_state = PWRDM_POWER_OFF, +		.per_min_state = PWRDM_POWER_OFF,  	},  }; @@ -80,27 +106,25 @@ static int __omap3_enter_idle(struct cpuidle_device *dev,  				int index)  {  	struct omap3_idle_statedata *cx = &omap3_idle_data[index]; -	u32 mpu_state = cx->mpu_state, core_state = cx->core_state;  	local_fiq_disable(); -	pwrdm_set_next_pwrst(mpu_pd, mpu_state); -	pwrdm_set_next_pwrst(core_pd, core_state); -  	if (omap_irq_pending() || need_resched())  		goto return_sleep_time;  	/* Deny idle for C1 */ -	if (index == 0) { +	if (cx->flags & OMAP_CPUIDLE_CX_NO_CLKDM_IDLE) {  		clkdm_deny_idle(mpu_pd->pwrdm_clkdms[0]); -		clkdm_deny_idle(core_pd->pwrdm_clkdms[0]); +	} else { +		pwrdm_set_next_pwrst(mpu_pd, cx->mpu_state); +		pwrdm_set_next_pwrst(core_pd, cx->core_state);  	}  	/*  	 * Call idle CPU PM enter notifier chain so that  	 * VFP context is saved.  	 */ -	if (mpu_state == PWRDM_POWER_OFF) +	if (cx->mpu_state == PWRDM_POWER_OFF)  		cpu_pm_enter();  	/* Execute ARM wfi */ @@ -110,17 +134,15 @@ static int __omap3_enter_idle(struct cpuidle_device *dev,  	 * Call idle CPU PM enter notifier chain to restore  	 * VFP context.  	 */ -	if (pwrdm_read_prev_pwrst(mpu_pd) == PWRDM_POWER_OFF) +	if (cx->mpu_state == PWRDM_POWER_OFF && +	    pwrdm_read_prev_pwrst(mpu_pd) == PWRDM_POWER_OFF)  		cpu_pm_exit();  	/* Re-allow idle for C1 */ -	if (index == 0) { +	if (cx->flags & OMAP_CPUIDLE_CX_NO_CLKDM_IDLE)  		clkdm_allow_idle(mpu_pd->pwrdm_clkdms[0]); -		clkdm_allow_idle(core_pd->pwrdm_clkdms[0]); -	}  return_sleep_time: -  	local_fiq_enable();  	return index; @@ -185,7 +207,7 @@ static int next_valid_state(struct cpuidle_device *dev,  	 * Start search from the next (lower) state.  	 */  	for (idx = index - 1; idx >= 0; idx--) { -		cx =  &omap3_idle_data[idx]; +		cx = &omap3_idle_data[idx];  		if ((cx->mpu_state >= mpu_deepest_state) &&  		    (cx->core_state >= core_deepest_state)) {  			next_index = idx; @@ -209,10 +231,9 @@ static int omap3_enter_idle_bm(struct cpuidle_device *dev,  			       struct cpuidle_driver *drv,  			       int index)  { -	int new_state_idx; -	u32 core_next_state, per_next_state = 0, per_saved_state = 0; +	int new_state_idx, ret; +	u8 per_next_state, per_saved_state;  	struct omap3_idle_statedata *cx; -	int ret;  	/*  	 * Use only C1 if CAM is active. @@ -233,25 +254,13 @@ static int omap3_enter_idle_bm(struct cpuidle_device *dev,  	/* Program PER state */  	cx = &omap3_idle_data[new_state_idx]; -	core_next_state = cx->core_state; -	per_next_state = per_saved_state = pwrdm_read_next_pwrst(per_pd); -	if (new_state_idx == 0) { -		/* In C1 do not allow PER state lower than CORE state */ -		if (per_next_state < core_next_state) -			per_next_state = core_next_state; -	} else { -		/* -		 * Prevent PER OFF if CORE is not in RETention or OFF as this -		 * would disable PER wakeups completely. -		 */ -		if ((per_next_state == PWRDM_POWER_OFF) && -		    (core_next_state > PWRDM_POWER_RET)) -			per_next_state = PWRDM_POWER_RET; -	} -	/* Are we changing PER target state? */ -	if (per_next_state != per_saved_state) +	per_next_state = pwrdm_read_next_pwrst(per_pd); +	per_saved_state = per_next_state; +	if (per_next_state < cx->per_min_state) { +		per_next_state = cx->per_min_state;  		pwrdm_set_next_pwrst(per_pd, per_next_state); +	}  	ret = omap3_enter_idle(dev, drv, new_state_idx); diff --git a/arch/arm/mach-omap2/devices.c b/arch/arm/mach-omap2/devices.c index 626f3ea3142..6ecc89adda8 100644 --- a/arch/arm/mach-omap2/devices.c +++ b/arch/arm/mach-omap2/devices.c @@ -61,8 +61,7 @@ static int __init omap3_l3_init(void)  	if (!oh)  		pr_err("could not look up %s\n", oh_name); -	pdev = omap_device_build("omap_l3_smx", 0, oh, NULL, 0, -							   NULL, 0, 0); +	pdev = omap_device_build("omap_l3_smx", 0, oh, NULL, 0);  	WARN(IS_ERR(pdev), "could not build omap_device for %s\n", oh_name); @@ -96,8 +95,7 @@ static int __init omap4_l3_init(void)  			pr_err("could not look up %s\n", oh_name);  	} -	pdev = omap_device_build_ss("omap_l3_noc", 0, oh, 3, NULL, -						     0, NULL, 0, 0); +	pdev = omap_device_build_ss("omap_l3_noc", 0, oh, 3, NULL, 0);  	WARN(IS_ERR(pdev), "could not build omap_device for %s\n", oh_name); @@ -273,7 +271,7 @@ int __init omap4_keyboard_init(struct omap4_keypad_platform_data  	keypad_data = sdp4430_keypad_data;  	pdev = omap_device_build(name, id, oh, keypad_data, -			sizeof(struct omap4_keypad_platform_data), NULL, 0, 0); +				 sizeof(struct omap4_keypad_platform_data));  	if (IS_ERR(pdev)) {  		WARN(1, "Can't build omap_device for %s:%s.\n", @@ -297,7 +295,7 @@ static inline void __init omap_init_mbox(void)  		return;  	} -	pdev = omap_device_build("omap-mailbox", -1, oh, NULL, 0, NULL, 0, 0); +	pdev = omap_device_build("omap-mailbox", -1, oh, NULL, 0);  	WARN(IS_ERR(pdev), "%s: could not build device, err %ld\n",  						__func__, PTR_ERR(pdev));  } @@ -337,7 +335,7 @@ static void __init omap_init_mcpdm(void)  		return;  	} -	pdev = omap_device_build("omap-mcpdm", -1, oh, NULL, 0, NULL, 0, 0); +	pdev = omap_device_build("omap-mcpdm", -1, oh, NULL, 0);  	WARN(IS_ERR(pdev), "Can't build omap_device for omap-mcpdm.\n");  }  #else @@ -358,7 +356,7 @@ static void __init omap_init_dmic(void)  		return;  	} -	pdev = omap_device_build("omap-dmic", -1, oh, NULL, 0, NULL, 0, 0); +	pdev = omap_device_build("omap-dmic", -1, oh, NULL, 0);  	WARN(IS_ERR(pdev), "Can't build omap_device for omap-dmic.\n");  }  #else @@ -384,8 +382,7 @@ static void __init omap_init_hdmi_audio(void)  		return;  	} -	pdev = omap_device_build("omap-hdmi-audio-dai", -		-1, oh, NULL, 0, NULL, 0, 0); +	pdev = omap_device_build("omap-hdmi-audio-dai", -1, oh, NULL, 0);  	WARN(IS_ERR(pdev),  	     "Can't build omap_device for omap-hdmi-audio-dai.\n"); @@ -429,8 +426,7 @@ static int __init omap_mcspi_init(struct omap_hwmod *oh, void *unused)  	}  	spi_num++; -	pdev = omap_device_build(name, spi_num, oh, pdata, -				sizeof(*pdata),	NULL, 0, 0); +	pdev = omap_device_build(name, spi_num, oh, pdata, sizeof(*pdata));  	WARN(IS_ERR(pdev), "Can't build omap_device for %s:%s\n",  				name, oh->name);  	kfree(pdata); @@ -460,7 +456,7 @@ static void omap_init_rng(void)  	if (!oh)  		return; -	pdev = omap_device_build("omap_rng", -1, oh, NULL, 0, NULL, 0, 0); +	pdev = omap_device_build("omap_rng", -1, oh, NULL, 0);  	WARN(IS_ERR(pdev), "Can't build omap_device for omap_rng\n");  } @@ -689,8 +685,7 @@ static void __init omap_init_ocp2scp(void)  	pdata->dev_cnt	= dev_cnt; -	pdev = omap_device_build(name, bus_id, oh, pdata, sizeof(*pdata), NULL, -								0, false); +	pdev = omap_device_build(name, bus_id, oh, pdata, sizeof(*pdata));  	if (IS_ERR(pdev)) {  		pr_err("Could not build omap_device for %s %s\n",  						name, oh_name); diff --git a/arch/arm/mach-omap2/display.c b/arch/arm/mach-omap2/display.c index cc75aaf6e76..ff37be1f6f9 100644 --- a/arch/arm/mach-omap2/display.c +++ b/arch/arm/mach-omap2/display.c @@ -226,7 +226,7 @@ static struct platform_device *create_dss_pdev(const char *pdev_name,  		dev_set_name(&pdev->dev, "%s", pdev->name);  	ohs[0] = oh; -	od = omap_device_alloc(pdev, ohs, 1, NULL, 0); +	od = omap_device_alloc(pdev, ohs, 1);  	if (IS_ERR(od)) {  		pr_err("Could not alloc omap_device for %s\n", pdev_name);  		r = -ENOMEM; diff --git a/arch/arm/mach-omap2/dma.c b/arch/arm/mach-omap2/dma.c index 612b9824987..491c5c8837f 100644 --- a/arch/arm/mach-omap2/dma.c +++ b/arch/arm/mach-omap2/dma.c @@ -248,7 +248,7 @@ static int __init omap2_system_dma_init_dev(struct omap_hwmod *oh, void *unused)  	p->errata		= configure_dma_errata(); -	pdev = omap_device_build(name, 0, oh, p, sizeof(*p), NULL, 0, 0); +	pdev = omap_device_build(name, 0, oh, p, sizeof(*p));  	kfree(p);  	if (IS_ERR(pdev)) {  		pr_err("%s: Can't build omap_device for %s:%s.\n", diff --git a/arch/arm/mach-omap2/dpll3xxx.c b/arch/arm/mach-omap2/dpll3xxx.c index 0a02aab5df6..3aed4b0b956 100644 --- a/arch/arm/mach-omap2/dpll3xxx.c +++ b/arch/arm/mach-omap2/dpll3xxx.c @@ -500,8 +500,9 @@ int omap3_noncore_dpll_set_rate(struct clk_hw *hw, unsigned long rate,  		if (dd->last_rounded_rate == 0)  			return -EINVAL; -		/* No freqsel on OMAP4 and OMAP3630 */ -		if (!cpu_is_omap44xx() && !cpu_is_omap3630()) { +		/* No freqsel on AM335x, OMAP4 and OMAP3630 */ +		if (!soc_is_am33xx() && !cpu_is_omap44xx() && +		    !cpu_is_omap3630()) {  			freqsel = _omap3_dpll_compute_freqsel(clk,  						dd->last_rounded_n);  			WARN_ON(!freqsel); diff --git a/arch/arm/mach-omap2/drm.c b/arch/arm/mach-omap2/drm.c index 2a2cfa88ddb..4d8d1a52ffe 100644 --- a/arch/arm/mach-omap2/drm.c +++ b/arch/arm/mach-omap2/drm.c @@ -51,8 +51,7 @@ static int __init omap_init_drm(void)  	oh = omap_hwmod_lookup("dmm");  	if (oh) { -		pdev = omap_device_build(oh->name, -1, oh, NULL, 0, NULL, 0, -					false); +		pdev = omap_device_build(oh->name, -1, oh, NULL, 0);  		WARN(IS_ERR(pdev), "Could not build omap_device for %s\n",  			oh->name);  	} diff --git a/arch/arm/mach-omap2/gpio.c b/arch/arm/mach-omap2/gpio.c index 399acabc3d0..482ade1923b 100644 --- a/arch/arm/mach-omap2/gpio.c +++ b/arch/arm/mach-omap2/gpio.c @@ -131,8 +131,7 @@ static int __init omap2_gpio_dev_init(struct omap_hwmod *oh, void *unused)  	pwrdm = omap_hwmod_get_pwrdm(oh);  	pdata->loses_context = pwrdm_can_ever_lose_context(pwrdm); -	pdev = omap_device_build(name, id - 1, oh, pdata, -				sizeof(*pdata),	NULL, 0, false); +	pdev = omap_device_build(name, id - 1, oh, pdata, sizeof(*pdata));  	kfree(pdata);  	if (IS_ERR(pdev)) { diff --git a/arch/arm/mach-omap2/gpmc.c b/arch/arm/mach-omap2/gpmc.c index 8033cb747c8..bc0783364ad 100644 --- a/arch/arm/mach-omap2/gpmc.c +++ b/arch/arm/mach-omap2/gpmc.c @@ -1220,7 +1220,7 @@ static int __init omap_gpmc_init(void)  		return -ENODEV;  	} -	pdev = omap_device_build(DEVICE_NAME, -1, oh, NULL, 0, NULL, 0, 0); +	pdev = omap_device_build(DEVICE_NAME, -1, oh, NULL, 0);  	WARN(IS_ERR(pdev), "could not build omap_device for %s\n", oh_name);  	return IS_ERR(pdev) ? PTR_ERR(pdev) : 0; diff --git a/arch/arm/mach-omap2/hdq1w.c b/arch/arm/mach-omap2/hdq1w.c index ab7bf181a10..b7aa8ba2ccb 100644 --- a/arch/arm/mach-omap2/hdq1w.c +++ b/arch/arm/mach-omap2/hdq1w.c @@ -87,7 +87,7 @@ static int __init omap_init_hdq(void)  	if (!oh)  		return 0; -	pdev = omap_device_build(devname, id, oh, NULL, 0, NULL, 0, 0); +	pdev = omap_device_build(devname, id, oh, NULL, 0);  	WARN(IS_ERR(pdev), "Can't build omap_device for %s:%s.\n",  	     devname, oh->name); diff --git a/arch/arm/mach-omap2/hsmmc.c b/arch/arm/mach-omap2/hsmmc.c index 4a964338992..2ef1f8714fc 100644 --- a/arch/arm/mach-omap2/hsmmc.c +++ b/arch/arm/mach-omap2/hsmmc.c @@ -522,7 +522,7 @@ static void __init omap_hsmmc_init_one(struct omap2_hsmmc_info *hsmmcinfo,  	}  	dev_set_name(&pdev->dev, "%s.%d", pdev->name, pdev->id); -	od = omap_device_alloc(pdev, ohs, 1, NULL, 0); +	od = omap_device_alloc(pdev, ohs, 1);  	if (IS_ERR(od)) {  		pr_err("Could not allocate od for %s\n", name);  		goto put_pdev; diff --git a/arch/arm/mach-omap2/hwspinlock.c b/arch/arm/mach-omap2/hwspinlock.c index 1df9b5feda1..c3688903f3d 100644 --- a/arch/arm/mach-omap2/hwspinlock.c +++ b/arch/arm/mach-omap2/hwspinlock.c @@ -46,8 +46,7 @@ static int __init hwspinlocks_init(void)  		return -EINVAL;  	pdev = omap_device_build(dev_name, 0, oh, &omap_hwspinlock_pdata, -				sizeof(struct hwspinlock_pdata), -				NULL, 0, false); +				sizeof(struct hwspinlock_pdata));  	if (IS_ERR(pdev)) {  		pr_err("Can't build omap_device for %s:%s\n", dev_name,  								oh_name); diff --git a/arch/arm/mach-omap2/i2c.c b/arch/arm/mach-omap2/i2c.c index b9074dde3b9..c11a23fa966 100644 --- a/arch/arm/mach-omap2/i2c.c +++ b/arch/arm/mach-omap2/i2c.c @@ -178,8 +178,7 @@ int __init omap_i2c_add_bus(struct omap_i2c_bus_platform_data *i2c_pdata,  	if (cpu_is_omap34xx())  		pdata->set_mpu_wkup_lat = omap_pm_set_max_mpu_wakeup_lat_compat;  	pdev = omap_device_build(name, bus_id, oh, pdata, -			sizeof(struct omap_i2c_bus_platform_data), -			NULL, 0, 0); +				 sizeof(struct omap_i2c_bus_platform_data));  	WARN(IS_ERR(pdev), "Could not build omap_device for %s\n", name);  	return PTR_RET(pdev); diff --git a/arch/arm/mach-omap2/id.c b/arch/arm/mach-omap2/id.c index 45cc7ed4dd5..8a68f1ec66b 100644 --- a/arch/arm/mach-omap2/id.c +++ b/arch/arm/mach-omap2/id.c @@ -399,8 +399,18 @@ void __init omap3xxx_check_revision(void)  		}  		break;  	case 0xb944: -		omap_revision = AM335X_REV_ES1_0; -		cpu_rev = "1.0"; +		switch (rev) { +		case 0: +			omap_revision = AM335X_REV_ES1_0; +			cpu_rev = "1.0"; +			break; +		case 1: +		/* FALLTHROUGH */ +		default: +			omap_revision = AM335X_REV_ES2_0; +			cpu_rev = "2.0"; +			break; +		}  		break;  	case 0xb8f2:  		switch (rev) { diff --git a/arch/arm/mach-omap2/mcbsp.c b/arch/arm/mach-omap2/mcbsp.c index df49f2a4946..453580410ae 100644 --- a/arch/arm/mach-omap2/mcbsp.c +++ b/arch/arm/mach-omap2/mcbsp.c @@ -101,7 +101,7 @@ static int __init omap_init_mcbsp(struct omap_hwmod *oh, void *unused)  		count++;  	}  	pdev = omap_device_build_ss(name, id, oh_device, count, pdata, -				sizeof(*pdata), NULL, 0, false); +				    sizeof(*pdata));  	kfree(pdata);  	if (IS_ERR(pdev))  {  		pr_err("%s: Can't build omap_device for %s:%s.\n", __func__, diff --git a/arch/arm/mach-omap2/msdi.c b/arch/arm/mach-omap2/msdi.c index aafdd4ca9f4..c52d8b4a3e9 100644 --- a/arch/arm/mach-omap2/msdi.c +++ b/arch/arm/mach-omap2/msdi.c @@ -150,7 +150,7 @@ void __init omap242x_init_mmc(struct omap_mmc_platform_data **mmc_data)  		return;  	}  	pdev = omap_device_build(dev_name, id, oh, mmc_data[0], -				 sizeof(struct omap_mmc_platform_data), NULL, 0, 0); +				 sizeof(struct omap_mmc_platform_data));  	if (IS_ERR(pdev))  		WARN(1, "Can'd build omap_device for %s:%s.\n",  					dev_name, oh->name); diff --git a/arch/arm/mach-omap2/omap-iommu.c b/arch/arm/mach-omap2/omap-iommu.c index 6da4f7ae9d7..f7f38c7fd5f 100644 --- a/arch/arm/mach-omap2/omap-iommu.c +++ b/arch/arm/mach-omap2/omap-iommu.c @@ -41,8 +41,7 @@ static int __init omap_iommu_dev_init(struct omap_hwmod *oh, void *unused)  		pdata->deassert_reset = omap_device_deassert_hardreset;  	} -	pdev = omap_device_build("omap-iommu", i, oh, pdata, sizeof(*pdata), -				NULL, 0, 0); +	pdev = omap_device_build("omap-iommu", i, oh, pdata, sizeof(*pdata));  	kfree(pdata); diff --git a/arch/arm/mach-omap2/omap-mpuss-lowpower.c b/arch/arm/mach-omap2/omap-mpuss-lowpower.c index aac46bfdbeb..8bcb64bcdcd 100644 --- a/arch/arm/mach-omap2/omap-mpuss-lowpower.c +++ b/arch/arm/mach-omap2/omap-mpuss-lowpower.c @@ -87,37 +87,6 @@ static inline void set_cpu_wakeup_addr(unsigned int cpu_id, u32 addr)  }  /* - * Set the CPUx powerdomain's previous power state - */ -static inline void set_cpu_next_pwrst(unsigned int cpu_id, -				unsigned int power_state) -{ -	struct omap4_cpu_pm_info *pm_info = &per_cpu(omap4_pm_info, cpu_id); - -	pwrdm_set_next_pwrst(pm_info->pwrdm, power_state); -} - -/* - * Read CPU's previous power state - */ -static inline unsigned int read_cpu_prev_pwrst(unsigned int cpu_id) -{ -	struct omap4_cpu_pm_info *pm_info = &per_cpu(omap4_pm_info, cpu_id); - -	return pwrdm_read_prev_pwrst(pm_info->pwrdm); -} - -/* - * Clear the CPUx powerdomain's previous power state - */ -static inline void clear_cpu_prev_pwrst(unsigned int cpu_id) -{ -	struct omap4_cpu_pm_info *pm_info = &per_cpu(omap4_pm_info, cpu_id); - -	pwrdm_clear_all_prev_pwrst(pm_info->pwrdm); -} - -/*   * Store the SCU power status value to scratchpad memory   */  static void scu_pwrst_prepare(unsigned int cpu_id, unsigned int cpu_state) @@ -230,6 +199,7 @@ static void save_l2x0_context(void)   */  int omap4_enter_lowpower(unsigned int cpu, unsigned int power_state)  { +	struct omap4_cpu_pm_info *pm_info = &per_cpu(omap4_pm_info, cpu);  	unsigned int save_state = 0;  	unsigned int wakeup_cpu; @@ -268,7 +238,7 @@ int omap4_enter_lowpower(unsigned int cpu, unsigned int power_state)  		save_state = 2;  	cpu_clear_prev_logic_pwrst(cpu); -	set_cpu_next_pwrst(cpu, power_state); +	pwrdm_set_next_pwrst(pm_info->pwrdm, power_state);  	set_cpu_wakeup_addr(cpu, virt_to_phys(omap4_cpu_resume));  	scu_pwrst_prepare(cpu, power_state);  	l2x0_pwrst_prepare(cpu, save_state); @@ -286,7 +256,7 @@ int omap4_enter_lowpower(unsigned int cpu, unsigned int power_state)  	 * domain transition  	 */  	wakeup_cpu = smp_processor_id(); -	set_cpu_next_pwrst(wakeup_cpu, PWRDM_POWER_ON); +	pwrdm_set_next_pwrst(pm_info->pwrdm, PWRDM_POWER_ON);  	pwrdm_post_transition(NULL); @@ -300,8 +270,8 @@ int omap4_enter_lowpower(unsigned int cpu, unsigned int power_state)   */  int __cpuinit omap4_hotplug_cpu(unsigned int cpu, unsigned int power_state)  { -	unsigned int cpu_state = 0;  	struct omap4_cpu_pm_info *pm_info = &per_cpu(omap4_pm_info, cpu); +	unsigned int cpu_state = 0;  	if (omap_rev() == OMAP4430_REV_ES1_0)  		return -ENXIO; @@ -309,8 +279,8 @@ int __cpuinit omap4_hotplug_cpu(unsigned int cpu, unsigned int power_state)  	if (power_state == PWRDM_POWER_OFF)  		cpu_state = 1; -	clear_cpu_prev_pwrst(cpu); -	set_cpu_next_pwrst(cpu, power_state); +	pwrdm_clear_all_prev_pwrst(pm_info->pwrdm); +	pwrdm_set_next_pwrst(pm_info->pwrdm, power_state);  	set_cpu_wakeup_addr(cpu, virt_to_phys(pm_info->secondary_startup));  	scu_pwrst_prepare(cpu, power_state); @@ -321,7 +291,7 @@ int __cpuinit omap4_hotplug_cpu(unsigned int cpu, unsigned int power_state)  	 */  	omap4_finish_suspend(cpu_state); -	set_cpu_next_pwrst(cpu, PWRDM_POWER_ON); +	pwrdm_set_next_pwrst(pm_info->pwrdm, PWRDM_POWER_ON);  	return 0;  } diff --git a/arch/arm/mach-omap2/omap_device.c b/arch/arm/mach-omap2/omap_device.c index e065daa537c..6ee3ad3dd95 100644 --- a/arch/arm/mach-omap2/omap_device.c +++ b/arch/arm/mach-omap2/omap_device.c @@ -17,68 +17,15 @@   * to control power management and interconnect properties of their   * devices.   * - * In the medium- to long-term, this code should either be - * a) implemented via arch-specific pointers in platform_data - * or - * b) implemented as a proper omap_bus/omap_device in Linux, no more - *    platform_data func pointers + * In the medium- to long-term, this code should be implemented as a + * proper omap_bus/omap_device in Linux, no more platform_data func + * pointers   *   * - * Guidelines for usage by driver authors: - * - * 1. These functions are intended to be used by device drivers via - * function pointers in struct platform_data.  As an example, - * omap_device_enable() should be passed to the driver as - * - * struct foo_driver_platform_data { - * ... - *      int (*device_enable)(struct platform_device *pdev); - * ... - * } - * - * Note that the generic "device_enable" name is used, rather than - * "omap_device_enable".  This is so other architectures can pass in their - * own enable/disable functions here. - * - * This should be populated during device setup: - * - * ... - * pdata->device_enable = omap_device_enable; - * ... - * - * 2. Drivers should first check to ensure the function pointer is not null - * before calling it, as in: - * - * if (pdata->device_enable) - *     pdata->device_enable(pdev); - * - * This allows other architectures that don't use similar device_enable()/ - * device_shutdown() functions to execute normally. - * - * ... - * - * Suggested usage by device drivers: - * - * During device initialization: - * device_enable() - * - * During device idle: - * (save remaining device context if necessary) - * device_idle(); - * - * During device resume: - * device_enable(); - * (restore context if necessary) - * - * During device shutdown: - * device_shutdown() - * (device must be reinitialized at this point to use it again) - *   */  #undef DEBUG  #include <linux/kernel.h> -#include <linux/export.h>  #include <linux/platform_device.h>  #include <linux/slab.h>  #include <linux/err.h> @@ -92,155 +39,8 @@  #include "omap_device.h"  #include "omap_hwmod.h" -/* These parameters are passed to _omap_device_{de,}activate() */ -#define USE_WAKEUP_LAT			0 -#define IGNORE_WAKEUP_LAT		1 - -static int omap_early_device_register(struct platform_device *pdev); - -static struct omap_device_pm_latency omap_default_latency[] = { -	{ -		.deactivate_func = omap_device_idle_hwmods, -		.activate_func   = omap_device_enable_hwmods, -		.flags = OMAP_DEVICE_LATENCY_AUTO_ADJUST, -	} -}; -  /* Private functions */ -/** - * _omap_device_activate - increase device readiness - * @od: struct omap_device * - * @ignore_lat: increase to latency target (0) or full readiness (1)? - * - * Increase readiness of omap_device @od (thus decreasing device - * wakeup latency, but consuming more power).  If @ignore_lat is - * IGNORE_WAKEUP_LAT, make the omap_device fully active.  Otherwise, - * if @ignore_lat is USE_WAKEUP_LAT, and the device's maximum wakeup - * latency is greater than the requested maximum wakeup latency, step - * backwards in the omap_device_pm_latency table to ensure the - * device's maximum wakeup latency is less than or equal to the - * requested maximum wakeup latency.  Returns 0. - */ -static int _omap_device_activate(struct omap_device *od, u8 ignore_lat) -{ -	struct timespec a, b, c; - -	dev_dbg(&od->pdev->dev, "omap_device: activating\n"); - -	while (od->pm_lat_level > 0) { -		struct omap_device_pm_latency *odpl; -		unsigned long long act_lat = 0; - -		od->pm_lat_level--; - -		odpl = od->pm_lats + od->pm_lat_level; - -		if (!ignore_lat && -		    (od->dev_wakeup_lat <= od->_dev_wakeup_lat_limit)) -			break; - -		read_persistent_clock(&a); - -		/* XXX check return code */ -		odpl->activate_func(od); - -		read_persistent_clock(&b); - -		c = timespec_sub(b, a); -		act_lat = timespec_to_ns(&c); - -		dev_dbg(&od->pdev->dev, -			"omap_device: pm_lat %d: activate: elapsed time %llu nsec\n", -			od->pm_lat_level, act_lat); - -		if (act_lat > odpl->activate_lat) { -			odpl->activate_lat_worst = act_lat; -			if (odpl->flags & OMAP_DEVICE_LATENCY_AUTO_ADJUST) { -				odpl->activate_lat = act_lat; -				dev_dbg(&od->pdev->dev, -					"new worst case activate latency %d: %llu\n", -					od->pm_lat_level, act_lat); -			} else -				dev_warn(&od->pdev->dev, -					 "activate latency %d higher than expected. (%llu > %d)\n", -					 od->pm_lat_level, act_lat, -					 odpl->activate_lat); -		} - -		od->dev_wakeup_lat -= odpl->activate_lat; -	} - -	return 0; -} - -/** - * _omap_device_deactivate - decrease device readiness - * @od: struct omap_device * - * @ignore_lat: decrease to latency target (0) or full inactivity (1)? - * - * Decrease readiness of omap_device @od (thus increasing device - * wakeup latency, but conserving power).  If @ignore_lat is - * IGNORE_WAKEUP_LAT, make the omap_device fully inactive.  Otherwise, - * if @ignore_lat is USE_WAKEUP_LAT, and the device's maximum wakeup - * latency is less than the requested maximum wakeup latency, step - * forwards in the omap_device_pm_latency table to ensure the device's - * maximum wakeup latency is less than or equal to the requested - * maximum wakeup latency.  Returns 0. - */ -static int _omap_device_deactivate(struct omap_device *od, u8 ignore_lat) -{ -	struct timespec a, b, c; - -	dev_dbg(&od->pdev->dev, "omap_device: deactivating\n"); - -	while (od->pm_lat_level < od->pm_lats_cnt) { -		struct omap_device_pm_latency *odpl; -		unsigned long long deact_lat = 0; - -		odpl = od->pm_lats + od->pm_lat_level; - -		if (!ignore_lat && -		    ((od->dev_wakeup_lat + odpl->activate_lat) > -		     od->_dev_wakeup_lat_limit)) -			break; - -		read_persistent_clock(&a); - -		/* XXX check return code */ -		odpl->deactivate_func(od); - -		read_persistent_clock(&b); - -		c = timespec_sub(b, a); -		deact_lat = timespec_to_ns(&c); - -		dev_dbg(&od->pdev->dev, -			"omap_device: pm_lat %d: deactivate: elapsed time %llu nsec\n", -			od->pm_lat_level, deact_lat); - -		if (deact_lat > odpl->deactivate_lat) { -			odpl->deactivate_lat_worst = deact_lat; -			if (odpl->flags & OMAP_DEVICE_LATENCY_AUTO_ADJUST) { -				odpl->deactivate_lat = deact_lat; -				dev_dbg(&od->pdev->dev, -					"new worst case deactivate latency %d: %llu\n", -					od->pm_lat_level, deact_lat); -			} else -				dev_warn(&od->pdev->dev, -					 "deactivate latency %d higher than expected. (%llu > %d)\n", -					 od->pm_lat_level, deact_lat, -					 odpl->deactivate_lat); -		} - -		od->dev_wakeup_lat += odpl->activate_lat; - -		od->pm_lat_level++; -	} - -	return 0; -} -  static void _add_clkdev(struct omap_device *od, const char *clk_alias,  		       const char *clk_name)  { @@ -315,9 +115,6 @@ static void _add_hwmod_clocks_clkdev(struct omap_device *od,   * @oh: ptr to the single omap_hwmod that backs this omap_device   * @pdata: platform_data ptr to associate with the platform_device   * @pdata_len: amount of memory pointed to by @pdata - * @pm_lats: pointer to a omap_device_pm_latency array for this device - * @pm_lats_cnt: ARRAY_SIZE() of @pm_lats - * @is_early_device: should the device be registered as an early device or not   *   * Function for building an omap_device already registered from device-tree   * @@ -356,7 +153,7 @@ static int omap_device_build_from_dt(struct platform_device *pdev)  		hwmods[i] = oh;  	} -	od = omap_device_alloc(pdev, hwmods, oh_cnt, NULL, 0); +	od = omap_device_alloc(pdev, hwmods, oh_cnt);  	if (!od) {  		dev_err(&pdev->dev, "Cannot allocate omap_device for :%s\n",  			oh_name); @@ -407,6 +204,39 @@ static int _omap_device_notifier_call(struct notifier_block *nb,  	return NOTIFY_DONE;  } +/** + * _omap_device_enable_hwmods - call omap_hwmod_enable() on all hwmods + * @od: struct omap_device *od + * + * Enable all underlying hwmods.  Returns 0. + */ +static int _omap_device_enable_hwmods(struct omap_device *od) +{ +	int i; + +	for (i = 0; i < od->hwmods_cnt; i++) +		omap_hwmod_enable(od->hwmods[i]); + +	/* XXX pass along return value here? */ +	return 0; +} + +/** + * _omap_device_idle_hwmods - call omap_hwmod_idle() on all hwmods + * @od: struct omap_device *od + * + * Idle all underlying hwmods.  Returns 0. + */ +static int _omap_device_idle_hwmods(struct omap_device *od) +{ +	int i; + +	for (i = 0; i < od->hwmods_cnt; i++) +		omap_hwmod_idle(od->hwmods[i]); + +	/* XXX pass along return value here? */ +	return 0; +}  /* Public functions for use by core code */ @@ -526,18 +356,14 @@ static int _od_fill_dma_resources(struct omap_device *od,   * @oh: ptr to the single omap_hwmod that backs this omap_device   * @pdata: platform_data ptr to associate with the platform_device   * @pdata_len: amount of memory pointed to by @pdata - * @pm_lats: pointer to a omap_device_pm_latency array for this device - * @pm_lats_cnt: ARRAY_SIZE() of @pm_lats   *   * Convenience function for allocating an omap_device structure and filling - * hwmods, resources and pm_latency attributes. + * hwmods, and resources.   *   * Returns an struct omap_device pointer or ERR_PTR() on error;   */  struct omap_device *omap_device_alloc(struct platform_device *pdev, -					struct omap_hwmod **ohs, int oh_cnt, -					struct omap_device_pm_latency *pm_lats, -					int pm_lats_cnt) +					struct omap_hwmod **ohs, int oh_cnt)  {  	int ret = -ENOMEM;  	struct omap_device *od; @@ -626,18 +452,6 @@ struct omap_device *omap_device_alloc(struct platform_device *pdev,  		goto oda_exit3;  have_everything: -	if (!pm_lats) { -		pm_lats = omap_default_latency; -		pm_lats_cnt = ARRAY_SIZE(omap_default_latency); -	} - -	od->pm_lats_cnt = pm_lats_cnt; -	od->pm_lats = kmemdup(pm_lats, -			sizeof(struct omap_device_pm_latency) * pm_lats_cnt, -			GFP_KERNEL); -	if (!od->pm_lats) -		goto oda_exit3; -  	pdev->archdata.od = od;  	for (i = 0; i < oh_cnt; i++) { @@ -663,7 +477,6 @@ void omap_device_delete(struct omap_device *od)  		return;  	od->pdev->archdata.od = NULL; -	kfree(od->pm_lats);  	kfree(od->hwmods);  	kfree(od);  } @@ -675,9 +488,6 @@ void omap_device_delete(struct omap_device *od)   * @oh: ptr to the single omap_hwmod that backs this omap_device   * @pdata: platform_data ptr to associate with the platform_device   * @pdata_len: amount of memory pointed to by @pdata - * @pm_lats: pointer to a omap_device_pm_latency array for this device - * @pm_lats_cnt: ARRAY_SIZE() of @pm_lats - * @is_early_device: should the device be registered as an early device or not   *   * Convenience function for building and registering a single   * omap_device record, which in turn builds and registers a @@ -685,11 +495,10 @@ void omap_device_delete(struct omap_device *od)   * information.  Returns ERR_PTR(-EINVAL) if @oh is NULL; otherwise,   * passes along the return value of omap_device_build_ss().   */ -struct platform_device __init *omap_device_build(const char *pdev_name, int pdev_id, -				      struct omap_hwmod *oh, void *pdata, -				      int pdata_len, -				      struct omap_device_pm_latency *pm_lats, -				      int pm_lats_cnt, int is_early_device) +struct platform_device __init *omap_device_build(const char *pdev_name, +						 int pdev_id, +						 struct omap_hwmod *oh, +						 void *pdata, int pdata_len)  {  	struct omap_hwmod *ohs[] = { oh }; @@ -697,8 +506,7 @@ struct platform_device __init *omap_device_build(const char *pdev_name, int pdev  		return ERR_PTR(-EINVAL);  	return omap_device_build_ss(pdev_name, pdev_id, ohs, 1, pdata, -				    pdata_len, pm_lats, pm_lats_cnt, -				    is_early_device); +				    pdata_len);  }  /** @@ -708,9 +516,6 @@ struct platform_device __init *omap_device_build(const char *pdev_name, int pdev   * @oh: ptr to the single omap_hwmod that backs this omap_device   * @pdata: platform_data ptr to associate with the platform_device   * @pdata_len: amount of memory pointed to by @pdata - * @pm_lats: pointer to a omap_device_pm_latency array for this device - * @pm_lats_cnt: ARRAY_SIZE() of @pm_lats - * @is_early_device: should the device be registered as an early device or not   *   * Convenience function for building and registering an omap_device   * subsystem record.  Subsystem records consist of multiple @@ -718,11 +523,11 @@ struct platform_device __init *omap_device_build(const char *pdev_name, int pdev   * platform_device record.  Returns an ERR_PTR() on error, or passes   * along the return value of omap_device_register().   */ -struct platform_device __init *omap_device_build_ss(const char *pdev_name, int pdev_id, -					 struct omap_hwmod **ohs, int oh_cnt, -					 void *pdata, int pdata_len, -					 struct omap_device_pm_latency *pm_lats, -					 int pm_lats_cnt, int is_early_device) +struct platform_device __init *omap_device_build_ss(const char *pdev_name, +						    int pdev_id, +						    struct omap_hwmod **ohs, +						    int oh_cnt, void *pdata, +						    int pdata_len)  {  	int ret = -ENOMEM;  	struct platform_device *pdev; @@ -746,7 +551,7 @@ struct platform_device __init *omap_device_build_ss(const char *pdev_name, int p  	else  		dev_set_name(&pdev->dev, "%s", pdev->name); -	od = omap_device_alloc(pdev, ohs, oh_cnt, pm_lats, pm_lats_cnt); +	od = omap_device_alloc(pdev, ohs, oh_cnt);  	if (IS_ERR(od))  		goto odbs_exit1; @@ -754,10 +559,7 @@ struct platform_device __init *omap_device_build_ss(const char *pdev_name, int p  	if (ret)  		goto odbs_exit2; -	if (is_early_device) -		ret = omap_early_device_register(pdev); -	else -		ret = omap_device_register(pdev); +	ret = omap_device_register(pdev);  	if (ret)  		goto odbs_exit2; @@ -774,24 +576,6 @@ odbs_exit:  	return ERR_PTR(ret);  } -/** - * omap_early_device_register - register an omap_device as an early platform - * device. - * @od: struct omap_device * to register - * - * Register the omap_device structure.  This currently just calls - * platform_early_add_device() on the underlying platform_device. - * Returns 0 by default. - */ -static int __init omap_early_device_register(struct platform_device *pdev) -{ -	struct platform_device *devices[1]; - -	devices[0] = pdev; -	early_platform_add_devices(devices, 1); -	return 0; -} -  #ifdef CONFIG_PM_RUNTIME  static int _od_runtime_suspend(struct device *dev)  { @@ -902,10 +686,9 @@ int omap_device_register(struct platform_device *pdev)   * to be accessible and ready to operate.  This generally involves   * enabling clocks, setting SYSCONFIG registers; and in the future may   * involve remuxing pins.  Device drivers should call this function - * (through platform_data function pointers) where they would normally - * enable clocks, etc.  Returns -EINVAL if called when the omap_device - * is already enabled, or passes along the return value of - * _omap_device_activate(). + * indirectly via pm_runtime_get*().  Returns -EINVAL if called when + * the omap_device is already enabled, or passes along the return + * value of _omap_device_enable_hwmods().   */  int omap_device_enable(struct platform_device *pdev)  { @@ -921,14 +704,8 @@ int omap_device_enable(struct platform_device *pdev)  		return -EINVAL;  	} -	/* Enable everything if we're enabling this device from scratch */ -	if (od->_state == OMAP_DEVICE_STATE_UNKNOWN) -		od->pm_lat_level = od->pm_lats_cnt; - -	ret = _omap_device_activate(od, IGNORE_WAKEUP_LAT); +	ret = _omap_device_enable_hwmods(od); -	od->dev_wakeup_lat = 0; -	od->_dev_wakeup_lat_limit = UINT_MAX;  	od->_state = OMAP_DEVICE_STATE_ENABLED;  	return ret; @@ -938,14 +715,10 @@ int omap_device_enable(struct platform_device *pdev)   * omap_device_idle - idle an omap_device   * @od: struct omap_device * to idle   * - * Idle omap_device @od by calling as many .deactivate_func() entries - * in the omap_device's pm_lats table as is possible without exceeding - * the device's maximum wakeup latency limit, pm_lat_limit.  Device - * drivers should call this function (through platform_data function - * pointers) where they would normally disable clocks after operations - * complete, etc..  Returns -EINVAL if the omap_device is not + * Idle omap_device @od.  Device drivers call this function indirectly + * via pm_runtime_put*().  Returns -EINVAL if the omap_device is not   * currently enabled, or passes along the return value of - * _omap_device_deactivate(). + * _omap_device_idle_hwmods().   */  int omap_device_idle(struct platform_device *pdev)  { @@ -961,7 +734,7 @@ int omap_device_idle(struct platform_device *pdev)  		return -EINVAL;  	} -	ret = _omap_device_deactivate(od, USE_WAKEUP_LAT); +	ret = _omap_device_idle_hwmods(od);  	od->_state = OMAP_DEVICE_STATE_IDLE; @@ -969,42 +742,6 @@ int omap_device_idle(struct platform_device *pdev)  }  /** - * omap_device_shutdown - shut down an omap_device - * @od: struct omap_device * to shut down - * - * Shut down omap_device @od by calling all .deactivate_func() entries - * in the omap_device's pm_lats table and then shutting down all of - * the underlying omap_hwmods.  Used when a device is being "removed" - * or a device driver is being unloaded.  Returns -EINVAL if the - * omap_device is not currently enabled or idle, or passes along the - * return value of _omap_device_deactivate(). - */ -int omap_device_shutdown(struct platform_device *pdev) -{ -	int ret, i; -	struct omap_device *od; - -	od = to_omap_device(pdev); - -	if (od->_state != OMAP_DEVICE_STATE_ENABLED && -	    od->_state != OMAP_DEVICE_STATE_IDLE) { -		dev_warn(&pdev->dev, -			 "omap_device: %s() called from invalid state %d\n", -			 __func__, od->_state); -		return -EINVAL; -	} - -	ret = _omap_device_deactivate(od, IGNORE_WAKEUP_LAT); - -	for (i = 0; i < od->hwmods_cnt; i++) -		omap_hwmod_shutdown(od->hwmods[i]); - -	od->_state = OMAP_DEVICE_STATE_SHUTDOWN; - -	return ret; -} - -/**   * omap_device_assert_hardreset - set a device's hardreset line   * @pdev: struct platform_device * to reset   * @name: const char * name of the reset line @@ -1060,86 +797,6 @@ int omap_device_deassert_hardreset(struct platform_device *pdev,  }  /** - * omap_device_align_pm_lat - activate/deactivate device to match wakeup lat lim - * @od: struct omap_device * - * - * When a device's maximum wakeup latency limit changes, call some of - * the .activate_func or .deactivate_func function pointers in the - * omap_device's pm_lats array to ensure that the device's maximum - * wakeup latency is less than or equal to the new latency limit. - * Intended to be called by OMAP PM code whenever a device's maximum - * wakeup latency limit changes (e.g., via - * omap_pm_set_dev_wakeup_lat()).  Returns 0 if nothing needs to be - * done (e.g., if the omap_device is not currently idle, or if the - * wakeup latency is already current with the new limit) or passes - * along the return value of _omap_device_deactivate() or - * _omap_device_activate(). - */ -int omap_device_align_pm_lat(struct platform_device *pdev, -			     u32 new_wakeup_lat_limit) -{ -	int ret = -EINVAL; -	struct omap_device *od; - -	od = to_omap_device(pdev); - -	if (new_wakeup_lat_limit == od->dev_wakeup_lat) -		return 0; - -	od->_dev_wakeup_lat_limit = new_wakeup_lat_limit; - -	if (od->_state != OMAP_DEVICE_STATE_IDLE) -		return 0; -	else if (new_wakeup_lat_limit > od->dev_wakeup_lat) -		ret = _omap_device_deactivate(od, USE_WAKEUP_LAT); -	else if (new_wakeup_lat_limit < od->dev_wakeup_lat) -		ret = _omap_device_activate(od, USE_WAKEUP_LAT); - -	return ret; -} - -/** - * omap_device_get_pwrdm - return the powerdomain * associated with @od - * @od: struct omap_device * - * - * Return the powerdomain associated with the first underlying - * omap_hwmod for this omap_device.  Intended for use by core OMAP PM - * code.  Returns NULL on error or a struct powerdomain * upon - * success. - */ -struct powerdomain *omap_device_get_pwrdm(struct omap_device *od) -{ -	/* -	 * XXX Assumes that all omap_hwmod powerdomains are identical. -	 * This may not necessarily be true.  There should be a sanity -	 * check in here to WARN() if any difference appears. -	 */ -	if (!od->hwmods_cnt) -		return NULL; - -	return omap_hwmod_get_pwrdm(od->hwmods[0]); -} - -/** - * omap_device_get_mpu_rt_va - return the MPU's virtual addr for the hwmod base - * @od: struct omap_device * - * - * Return the MPU's virtual address for the base of the hwmod, from - * the ioremap() that the hwmod code does.  Only valid if there is one - * hwmod associated with this device.  Returns NULL if there are zero - * or more than one hwmods associated with this omap_device; - * otherwise, passes along the return value from - * omap_hwmod_get_mpu_rt_va(). - */ -void __iomem *omap_device_get_rt_va(struct omap_device *od) -{ -	if (od->hwmods_cnt != 1) -		return NULL; - -	return omap_hwmod_get_mpu_rt_va(od->hwmods[0]); -} - -/**   * omap_device_get_by_hwmod_name() - convert a hwmod name to   * device pointer.   * @oh_name: name of the hwmod device @@ -1173,82 +830,6 @@ struct device *omap_device_get_by_hwmod_name(const char *oh_name)  	return &oh->od->pdev->dev;  } -EXPORT_SYMBOL(omap_device_get_by_hwmod_name); - -/* - * Public functions intended for use in omap_device_pm_latency - * .activate_func and .deactivate_func function pointers - */ - -/** - * omap_device_enable_hwmods - call omap_hwmod_enable() on all hwmods - * @od: struct omap_device *od - * - * Enable all underlying hwmods.  Returns 0. - */ -int omap_device_enable_hwmods(struct omap_device *od) -{ -	int i; - -	for (i = 0; i < od->hwmods_cnt; i++) -		omap_hwmod_enable(od->hwmods[i]); - -	/* XXX pass along return value here? */ -	return 0; -} - -/** - * omap_device_idle_hwmods - call omap_hwmod_idle() on all hwmods - * @od: struct omap_device *od - * - * Idle all underlying hwmods.  Returns 0. - */ -int omap_device_idle_hwmods(struct omap_device *od) -{ -	int i; - -	for (i = 0; i < od->hwmods_cnt; i++) -		omap_hwmod_idle(od->hwmods[i]); - -	/* XXX pass along return value here? */ -	return 0; -} - -/** - * omap_device_disable_clocks - disable all main and interface clocks - * @od: struct omap_device *od - * - * Disable the main functional clock and interface clock for all of the - * omap_hwmods associated with the omap_device.  Returns 0. - */ -int omap_device_disable_clocks(struct omap_device *od) -{ -	int i; - -	for (i = 0; i < od->hwmods_cnt; i++) -		omap_hwmod_disable_clocks(od->hwmods[i]); - -	/* XXX pass along return value here? */ -	return 0; -} - -/** - * omap_device_enable_clocks - enable all main and interface clocks - * @od: struct omap_device *od - * - * Enable the main functional clock and interface clock for all of the - * omap_hwmods associated with the omap_device.  Returns 0. - */ -int omap_device_enable_clocks(struct omap_device *od) -{ -	int i; - -	for (i = 0; i < od->hwmods_cnt; i++) -		omap_hwmod_enable_clocks(od->hwmods[i]); - -	/* XXX pass along return value here? */ -	return 0; -}  static struct notifier_block platform_nb = {  	.notifier_call = _omap_device_notifier_call, diff --git a/arch/arm/mach-omap2/omap_device.h b/arch/arm/mach-omap2/omap_device.h index 0933c599bf8..044c31d50e5 100644 --- a/arch/arm/mach-omap2/omap_device.h +++ b/arch/arm/mach-omap2/omap_device.h @@ -13,20 +13,12 @@   * it under the terms of the GNU General Public License version 2 as   * published by the Free Software Foundation.   * - * Eventually this type of functionality should either be - * a) implemented via arch-specific pointers in platform_device - * or - * b) implemented as a proper omap_bus/omap_device in Linux, no more - *    platform_device + * This type of functionality should be implemented as a proper + * omap_bus/omap_device in Linux.   *   * omap_device differs from omap_hwmod in that it includes external   * (e.g., board- and system-level) integration details.  omap_hwmod   * stores hardware data that is invariant for a given OMAP chip. - * - * To do: - * - GPIO integration - * - regulator integration - *   */  #ifndef __ARCH_ARM_PLAT_OMAP_INCLUDE_MACH_OMAP_DEVICE_H  #define __ARCH_ARM_PLAT_OMAP_INCLUDE_MACH_OMAP_DEVICE_H @@ -45,19 +37,14 @@ extern struct dev_pm_domain omap_device_pm_domain;  #define OMAP_DEVICE_STATE_SHUTDOWN	3  /* omap_device.flags values */ -#define OMAP_DEVICE_SUSPENDED BIT(0) -#define OMAP_DEVICE_NO_IDLE_ON_SUSPEND BIT(1) +#define OMAP_DEVICE_SUSPENDED		BIT(0) +#define OMAP_DEVICE_NO_IDLE_ON_SUSPEND	BIT(1)  /**   * struct omap_device - omap_device wrapper for platform_devices   * @pdev: platform_device   * @hwmods: (one .. many per omap_device)   * @hwmods_cnt: ARRAY_SIZE() of @hwmods - * @pm_lats: ptr to an omap_device_pm_latency table - * @pm_lats_cnt: ARRAY_SIZE() of what is passed to @pm_lats - * @pm_lat_level: array index of the last odpl entry executed - -1 if never - * @dev_wakeup_lat: dev wakeup latency in nanoseconds - * @_dev_wakeup_lat_limit: dev wakeup latency limit in nsec - set by OMAP PM   * @_state: one of OMAP_DEVICE_STATE_* (see above)   * @flags: device flags   * @_driver_status: one of BUS_NOTIFY_*_DRIVER from <linux/device.h> @@ -71,12 +58,7 @@ extern struct dev_pm_domain omap_device_pm_domain;  struct omap_device {  	struct platform_device		*pdev;  	struct omap_hwmod		**hwmods; -	struct omap_device_pm_latency	*pm_lats; -	u32				dev_wakeup_lat; -	u32				_dev_wakeup_lat_limit;  	unsigned long			_driver_status; -	u8				pm_lats_cnt; -	s8				pm_lat_level;  	u8				hwmods_cnt;  	u8				_state;  	u8                              flags; @@ -86,36 +68,25 @@ struct omap_device {  int omap_device_enable(struct platform_device *pdev);  int omap_device_idle(struct platform_device *pdev); -int omap_device_shutdown(struct platform_device *pdev);  /* Core code interface */  struct platform_device *omap_device_build(const char *pdev_name, int pdev_id, -				      struct omap_hwmod *oh, void *pdata, -				      int pdata_len, -				      struct omap_device_pm_latency *pm_lats, -				      int pm_lats_cnt, int is_early_device); +					  struct omap_hwmod *oh, void *pdata, +					  int pdata_len);  struct platform_device *omap_device_build_ss(const char *pdev_name, int pdev_id,  					 struct omap_hwmod **oh, int oh_cnt, -					 void *pdata, int pdata_len, -					 struct omap_device_pm_latency *pm_lats, -					 int pm_lats_cnt, int is_early_device); +					 void *pdata, int pdata_len);  struct omap_device *omap_device_alloc(struct platform_device *pdev, -				      struct omap_hwmod **ohs, int oh_cnt, -				      struct omap_device_pm_latency *pm_lats, -				      int pm_lats_cnt); +				      struct omap_hwmod **ohs, int oh_cnt);  void omap_device_delete(struct omap_device *od);  int omap_device_register(struct platform_device *pdev); -void __iomem *omap_device_get_rt_va(struct omap_device *od);  struct device *omap_device_get_by_hwmod_name(const char *oh_name);  /* OMAP PM interface */ -int omap_device_align_pm_lat(struct platform_device *pdev, -			     u32 new_wakeup_lat_limit); -struct powerdomain *omap_device_get_pwrdm(struct omap_device *od);  int omap_device_get_context_loss_count(struct platform_device *pdev);  /* Other */ @@ -124,40 +95,6 @@ int omap_device_assert_hardreset(struct platform_device *pdev,  				 const char *name);  int omap_device_deassert_hardreset(struct platform_device *pdev,  				 const char *name); -int omap_device_idle_hwmods(struct omap_device *od); -int omap_device_enable_hwmods(struct omap_device *od); - -int omap_device_disable_clocks(struct omap_device *od); -int omap_device_enable_clocks(struct omap_device *od); - -/* - * Entries should be kept in latency order ascending - * - * deact_lat is the maximum number of microseconds required to complete - * deactivate_func() at the device's slowest OPP. - * - * act_lat is the maximum number of microseconds required to complete - * activate_func() at the device's slowest OPP. - * - * This will result in some suboptimal power management decisions at fast - * OPPs, but avoids having to recompute all device power management decisions - * if the system shifts from a fast OPP to a slow OPP (in order to meet - * latency requirements). - * - * XXX should deactivate_func/activate_func() take platform_device pointers - * rather than omap_device pointers? - */ -struct omap_device_pm_latency { -	u32 deactivate_lat; -	u32 deactivate_lat_worst; -	int (*deactivate_func)(struct omap_device *od); -	u32 activate_lat; -	u32 activate_lat_worst; -	int (*activate_func)(struct omap_device *od); -	u32 flags; -}; - -#define OMAP_DEVICE_LATENCY_AUTO_ADJUST BIT(1)  /* Get omap_device pointer from platform_device pointer */  static inline struct omap_device *to_omap_device(struct platform_device *pdev) diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c index f37d22c597f..ffe7a69cd17 100644 --- a/arch/arm/mach-omap2/omap_hwmod.c +++ b/arch/arm/mach-omap2/omap_hwmod.c @@ -139,6 +139,8 @@  #include <linux/slab.h>  #include <linux/bootmem.h> +#include <asm/system_misc.h> +  #include "clock.h"  #include "omap_hwmod.h" @@ -2151,6 +2153,8 @@ static int _enable(struct omap_hwmod *oh)  	_enable_clocks(oh);  	if (soc_ops.enable_module)  		soc_ops.enable_module(oh); +	if (oh->flags & HWMOD_BLOCK_WFI) +		disable_hlt();  	if (soc_ops.update_context_lost)  		soc_ops.update_context_lost(oh); @@ -2213,6 +2217,8 @@ static int _idle(struct omap_hwmod *oh)  		_idle_sysc(oh);  	_del_initiator_dep(oh, mpu_oh); +	if (oh->flags & HWMOD_BLOCK_WFI) +		enable_hlt();  	if (soc_ops.disable_module)  		soc_ops.disable_module(oh); @@ -2321,6 +2327,8 @@ static int _shutdown(struct omap_hwmod *oh)  	if (oh->_state == _HWMOD_STATE_ENABLED) {  		_del_initiator_dep(oh, mpu_oh);  		/* XXX what about the other system initiators here? dma, dsp */ +		if (oh->flags & HWMOD_BLOCK_WFI) +			enable_hlt();  		if (soc_ops.disable_module)  			soc_ops.disable_module(oh);  		_disable_clocks(oh); @@ -3059,11 +3067,8 @@ static int _am33xx_assert_hardreset(struct omap_hwmod *oh,  static int _am33xx_deassert_hardreset(struct omap_hwmod *oh,  				     struct omap_hwmod_rst_info *ohri)  { -	if (ohri->st_shift) -		pr_err("omap_hwmod: %s: %s: hwmod data error: OMAP4 does not support st_shift\n", -		       oh->name, ohri->name); -  	return am33xx_prm_deassert_hardreset(ohri->rst_shift, +				ohri->st_shift,  				oh->clkdm->pwrdm.ptr->prcm_offs,  				oh->prcm.omap4.rstctrl_offs,  				oh->prcm.omap4.rstst_offs); diff --git a/arch/arm/mach-omap2/omap_hwmod.h b/arch/arm/mach-omap2/omap_hwmod.h index 6ec73cbc30c..d43d9b608ed 100644 --- a/arch/arm/mach-omap2/omap_hwmod.h +++ b/arch/arm/mach-omap2/omap_hwmod.h @@ -451,6 +451,14 @@ struct omap_hwmod_omap4_prcm {   *     enabled.  This prevents the hwmod code from being able to   *     enable and reset the IP block early.  XXX Eventually it should   *     be possible to query the clock framework for this information. + * HWMOD_BLOCK_WFI: Some OMAP peripherals apparently don't work + *     correctly if the MPU is allowed to go idle while the + *     peripherals are active.  This is apparently true for the I2C on + *     OMAP2420, and also the EMAC on AM3517/3505.  It's unlikely that + *     this is really true -- we're probably not configuring something + *     correctly, or this is being abused to deal with some PM latency + *     issues -- but we're currently suffering from a shortage of + *     folks who are able to track these issues down properly.   */  #define HWMOD_SWSUP_SIDLE			(1 << 0)  #define HWMOD_SWSUP_MSTANDBY			(1 << 1) @@ -462,6 +470,7 @@ struct omap_hwmod_omap4_prcm {  #define HWMOD_CONTROL_OPT_CLKS_IN_RESET		(1 << 7)  #define HWMOD_16BIT_REG				(1 << 8)  #define HWMOD_EXT_OPT_MAIN_CLK			(1 << 9) +#define HWMOD_BLOCK_WFI				(1 << 10)  /*   * omap_hwmod._int_flags definitions diff --git a/arch/arm/mach-omap2/omap_hwmod_2420_data.c b/arch/arm/mach-omap2/omap_hwmod_2420_data.c index b5efe58c0be..6a764af6c6d 100644 --- a/arch/arm/mach-omap2/omap_hwmod_2420_data.c +++ b/arch/arm/mach-omap2/omap_hwmod_2420_data.c @@ -121,7 +121,12 @@ static struct omap_hwmod omap2420_i2c1_hwmod = {  	},  	.class		= &i2c_class,  	.dev_attr	= &i2c_dev_attr, -	.flags		= HWMOD_16BIT_REG, +	/* +	 * From mach-omap2/pm24xx.c: "Putting MPU into the WFI state +	 * while a transfer is active seems to cause the I2C block to +	 * timeout. Why? Good question." +	 */ +	.flags		= (HWMOD_16BIT_REG | HWMOD_BLOCK_WFI),  };  /* I2C2 */ diff --git a/arch/arm/mach-omap2/omap_hwmod_33xx_data.c b/arch/arm/mach-omap2/omap_hwmod_33xx_data.c index 646c14d9fdb..26eee4a556a 100644 --- a/arch/arm/mach-omap2/omap_hwmod_33xx_data.c +++ b/arch/arm/mach-omap2/omap_hwmod_33xx_data.c @@ -262,13 +262,15 @@ static struct omap_hwmod am33xx_wkup_m3_hwmod = {  	.name		= "wkup_m3",  	.class		= &am33xx_wkup_m3_hwmod_class,  	.clkdm_name	= "l4_wkup_aon_clkdm", -	.flags		= HWMOD_INIT_NO_RESET,	/* Keep hardreset asserted */ +	/* Keep hardreset asserted */ +	.flags		= HWMOD_INIT_NO_RESET | HWMOD_NO_IDLEST,  	.mpu_irqs	= am33xx_wkup_m3_irqs,  	.main_clk	= "dpll_core_m4_div2_ck",  	.prcm		= {  		.omap4	= {  			.clkctrl_offs	= AM33XX_CM_WKUP_WKUP_M3_CLKCTRL_OFFSET,  			.rstctrl_offs	= AM33XX_RM_WKUP_RSTCTRL_OFFSET, +			.rstst_offs	= AM33XX_RM_WKUP_RSTST_OFFSET,  			.modulemode	= MODULEMODE_SWCTRL,  		},  	}, @@ -414,7 +416,6 @@ static struct omap_hwmod am33xx_adc_tsc_hwmod = {   *    - cEFUSE (doesn't fall under any ocp_if)   *    - clkdiv32k   *    - debugss - *    - ocmc ram   *    - ocp watch point   *    - aes0   *    - sha0 @@ -481,25 +482,6 @@ static struct omap_hwmod am33xx_debugss_hwmod = {  	},  }; -/* ocmcram */ -static struct omap_hwmod_class am33xx_ocmcram_hwmod_class = { -	.name = "ocmcram", -}; - -static struct omap_hwmod am33xx_ocmcram_hwmod = { -	.name		= "ocmcram", -	.class		= &am33xx_ocmcram_hwmod_class, -	.clkdm_name	= "l3_clkdm", -	.flags		= (HWMOD_INIT_NO_IDLE | HWMOD_INIT_NO_RESET), -	.main_clk	= "l3_gclk", -	.prcm		= { -		.omap4	= { -			.clkctrl_offs	= AM33XX_CM_PER_OCMCRAM_CLKCTRL_OFFSET, -			.modulemode	= MODULEMODE_SWCTRL, -		}, -	}, -}; -  /* ocpwp */  static struct omap_hwmod_class am33xx_ocpwp_hwmod_class = {  	.name		= "ocpwp", @@ -570,6 +552,25 @@ static struct omap_hwmod am33xx_sha0_hwmod = {  #endif +/* ocmcram */ +static struct omap_hwmod_class am33xx_ocmcram_hwmod_class = { +	.name = "ocmcram", +}; + +static struct omap_hwmod am33xx_ocmcram_hwmod = { +	.name		= "ocmcram", +	.class		= &am33xx_ocmcram_hwmod_class, +	.clkdm_name	= "l3_clkdm", +	.flags		= (HWMOD_INIT_NO_IDLE | HWMOD_INIT_NO_RESET), +	.main_clk	= "l3_gclk", +	.prcm		= { +		.omap4	= { +			.clkctrl_offs	= AM33XX_CM_PER_OCMCRAM_CLKCTRL_OFFSET, +			.modulemode	= MODULEMODE_SWCTRL, +		}, +	}, +}; +  /* 'smartreflex' class */  static struct omap_hwmod_class am33xx_smartreflex_hwmod_class = {  	.name		= "smartreflex", @@ -783,9 +784,7 @@ static struct omap_hwmod am33xx_elm_hwmod = {  	},  }; -/* - * 'epwmss' class: ecap0,1,2,  ehrpwm0,1,2 - */ +/* pwmss  */  static struct omap_hwmod_class_sysconfig am33xx_epwmss_sysc = {  	.rev_offs	= 0x0,  	.sysc_offs	= 0x4, @@ -801,18 +800,23 @@ static struct omap_hwmod_class am33xx_epwmss_hwmod_class = {  	.sysc		= &am33xx_epwmss_sysc,  }; -/* ehrpwm0 */ -static struct omap_hwmod_irq_info am33xx_ehrpwm0_irqs[] = { -	{ .name = "int", .irq = 86 + OMAP_INTC_START, }, -	{ .name = "tzint", .irq = 58 + OMAP_INTC_START, }, -	{ .irq = -1 }, +static struct omap_hwmod_class am33xx_ecap_hwmod_class = { +	.name		= "ecap",  }; -static struct omap_hwmod am33xx_ehrpwm0_hwmod = { -	.name		= "ehrpwm0", +static struct omap_hwmod_class am33xx_eqep_hwmod_class = { +	.name		= "eqep", +}; + +static struct omap_hwmod_class am33xx_ehrpwm_hwmod_class = { +	.name		= "ehrpwm", +}; + +/* epwmss0 */ +static struct omap_hwmod am33xx_epwmss0_hwmod = { +	.name		= "epwmss0",  	.class		= &am33xx_epwmss_hwmod_class,  	.clkdm_name	= "l4ls_clkdm", -	.mpu_irqs	= am33xx_ehrpwm0_irqs,  	.main_clk	= "l4ls_gclk",  	.prcm		= {  		.omap4	= { @@ -822,63 +826,58 @@ static struct omap_hwmod am33xx_ehrpwm0_hwmod = {  	},  }; -/* ehrpwm1 */ -static struct omap_hwmod_irq_info am33xx_ehrpwm1_irqs[] = { -	{ .name = "int", .irq = 87 + OMAP_INTC_START, }, -	{ .name = "tzint", .irq = 59 + OMAP_INTC_START, }, +/* ecap0 */ +static struct omap_hwmod_irq_info am33xx_ecap0_irqs[] = { +	{ .irq = 31 + OMAP_INTC_START, },  	{ .irq = -1 },  }; -static struct omap_hwmod am33xx_ehrpwm1_hwmod = { -	.name		= "ehrpwm1", -	.class		= &am33xx_epwmss_hwmod_class, +static struct omap_hwmod am33xx_ecap0_hwmod = { +	.name		= "ecap0", +	.class		= &am33xx_ecap_hwmod_class,  	.clkdm_name	= "l4ls_clkdm", -	.mpu_irqs	= am33xx_ehrpwm1_irqs, +	.mpu_irqs	= am33xx_ecap0_irqs,  	.main_clk	= "l4ls_gclk", -	.prcm		= { -		.omap4	= { -			.clkctrl_offs	= AM33XX_CM_PER_EPWMSS1_CLKCTRL_OFFSET, -			.modulemode	= MODULEMODE_SWCTRL, -		}, -	},  }; -/* ehrpwm2 */ -static struct omap_hwmod_irq_info am33xx_ehrpwm2_irqs[] = { -	{ .name = "int", .irq = 39 + OMAP_INTC_START, }, -	{ .name = "tzint", .irq = 60 + OMAP_INTC_START, }, +/* eqep0 */ +static struct omap_hwmod_irq_info am33xx_eqep0_irqs[] = { +	{ .irq = 79 + OMAP_INTC_START, },  	{ .irq = -1 },  }; -static struct omap_hwmod am33xx_ehrpwm2_hwmod = { -	.name		= "ehrpwm2", -	.class		= &am33xx_epwmss_hwmod_class, +static struct omap_hwmod am33xx_eqep0_hwmod = { +	.name		= "eqep0", +	.class		= &am33xx_eqep_hwmod_class,  	.clkdm_name	= "l4ls_clkdm", -	.mpu_irqs	= am33xx_ehrpwm2_irqs, +	.mpu_irqs	= am33xx_eqep0_irqs,  	.main_clk	= "l4ls_gclk", -	.prcm		= { -		.omap4	= { -			.clkctrl_offs	= AM33XX_CM_PER_EPWMSS2_CLKCTRL_OFFSET, -			.modulemode	= MODULEMODE_SWCTRL, -		}, -	},  }; -/* ecap0 */ -static struct omap_hwmod_irq_info am33xx_ecap0_irqs[] = { -	{ .irq = 31 + OMAP_INTC_START, }, +/* ehrpwm0 */ +static struct omap_hwmod_irq_info am33xx_ehrpwm0_irqs[] = { +	{ .name = "int", .irq = 86 + OMAP_INTC_START, }, +	{ .name = "tzint", .irq = 58 + OMAP_INTC_START, },  	{ .irq = -1 },  }; -static struct omap_hwmod am33xx_ecap0_hwmod = { -	.name		= "ecap0", +static struct omap_hwmod am33xx_ehrpwm0_hwmod = { +	.name		= "ehrpwm0", +	.class		= &am33xx_ehrpwm_hwmod_class, +	.clkdm_name	= "l4ls_clkdm", +	.mpu_irqs	= am33xx_ehrpwm0_irqs, +	.main_clk	= "l4ls_gclk", +}; + +/* epwmss1 */ +static struct omap_hwmod am33xx_epwmss1_hwmod = { +	.name		= "epwmss1",  	.class		= &am33xx_epwmss_hwmod_class,  	.clkdm_name	= "l4ls_clkdm", -	.mpu_irqs	= am33xx_ecap0_irqs,  	.main_clk	= "l4ls_gclk",  	.prcm		= {  		.omap4	= { -			.clkctrl_offs	= AM33XX_CM_PER_EPWMSS0_CLKCTRL_OFFSET, +			.clkctrl_offs	= AM33XX_CM_PER_EPWMSS1_CLKCTRL_OFFSET,  			.modulemode	= MODULEMODE_SWCTRL,  		},  	}, @@ -892,13 +891,50 @@ static struct omap_hwmod_irq_info am33xx_ecap1_irqs[] = {  static struct omap_hwmod am33xx_ecap1_hwmod = {  	.name		= "ecap1", -	.class		= &am33xx_epwmss_hwmod_class, +	.class		= &am33xx_ecap_hwmod_class,  	.clkdm_name	= "l4ls_clkdm",  	.mpu_irqs	= am33xx_ecap1_irqs,  	.main_clk	= "l4ls_gclk", +}; + +/* eqep1 */ +static struct omap_hwmod_irq_info am33xx_eqep1_irqs[] = { +	{ .irq = 88 + OMAP_INTC_START, }, +	{ .irq = -1 }, +}; + +static struct omap_hwmod am33xx_eqep1_hwmod = { +	.name		= "eqep1", +	.class		= &am33xx_eqep_hwmod_class, +	.clkdm_name	= "l4ls_clkdm", +	.mpu_irqs	= am33xx_eqep1_irqs, +	.main_clk	= "l4ls_gclk", +}; + +/* ehrpwm1 */ +static struct omap_hwmod_irq_info am33xx_ehrpwm1_irqs[] = { +	{ .name = "int", .irq = 87 + OMAP_INTC_START, }, +	{ .name = "tzint", .irq = 59 + OMAP_INTC_START, }, +	{ .irq = -1 }, +}; + +static struct omap_hwmod am33xx_ehrpwm1_hwmod = { +	.name		= "ehrpwm1", +	.class		= &am33xx_ehrpwm_hwmod_class, +	.clkdm_name	= "l4ls_clkdm", +	.mpu_irqs	= am33xx_ehrpwm1_irqs, +	.main_clk	= "l4ls_gclk", +}; + +/* epwmss2 */ +static struct omap_hwmod am33xx_epwmss2_hwmod = { +	.name		= "epwmss2", +	.class		= &am33xx_epwmss_hwmod_class, +	.clkdm_name	= "l4ls_clkdm", +	.main_clk	= "l4ls_gclk",  	.prcm		= {  		.omap4	= { -			.clkctrl_offs	= AM33XX_CM_PER_EPWMSS1_CLKCTRL_OFFSET, +			.clkctrl_offs	= AM33XX_CM_PER_EPWMSS2_CLKCTRL_OFFSET,  			.modulemode	= MODULEMODE_SWCTRL,  		},  	}, @@ -912,16 +948,39 @@ static struct omap_hwmod_irq_info am33xx_ecap2_irqs[] = {  static struct omap_hwmod am33xx_ecap2_hwmod = {  	.name		= "ecap2", +	.class		= &am33xx_ecap_hwmod_class, +	.clkdm_name	= "l4ls_clkdm",  	.mpu_irqs	= am33xx_ecap2_irqs, -	.class		= &am33xx_epwmss_hwmod_class, +	.main_clk	= "l4ls_gclk", +}; + +/* eqep2 */ +static struct omap_hwmod_irq_info am33xx_eqep2_irqs[] = { +	{ .irq = 89 + OMAP_INTC_START, }, +	{ .irq = -1 }, +}; + +static struct omap_hwmod am33xx_eqep2_hwmod = { +	.name		= "eqep2", +	.class		= &am33xx_eqep_hwmod_class,  	.clkdm_name	= "l4ls_clkdm", +	.mpu_irqs	= am33xx_eqep2_irqs, +	.main_clk	= "l4ls_gclk", +}; + +/* ehrpwm2 */ +static struct omap_hwmod_irq_info am33xx_ehrpwm2_irqs[] = { +	{ .name = "int", .irq = 39 + OMAP_INTC_START, }, +	{ .name = "tzint", .irq = 60 + OMAP_INTC_START, }, +	{ .irq = -1 }, +}; + +static struct omap_hwmod am33xx_ehrpwm2_hwmod = { +	.name		= "ehrpwm2", +	.class		= &am33xx_ehrpwm_hwmod_class, +	.clkdm_name	= "l4ls_clkdm", +	.mpu_irqs	= am33xx_ehrpwm2_irqs,  	.main_clk	= "l4ls_gclk", -	.prcm		= { -		.omap4	= { -			.clkctrl_offs	= AM33XX_CM_PER_EPWMSS2_CLKCTRL_OFFSET, -			.modulemode	= MODULEMODE_SWCTRL, -		}, -	},  };  /* @@ -1824,6 +1883,7 @@ static struct omap_hwmod am33xx_tptc0_hwmod = {  	.class		= &am33xx_tptc_hwmod_class,  	.clkdm_name	= "l3_clkdm",  	.mpu_irqs	= am33xx_tptc0_irqs, +	.flags		= HWMOD_SWSUP_SIDLE | HWMOD_SWSUP_MSTANDBY,  	.main_clk	= "l3_gclk",  	.prcm		= {  		.omap4	= { @@ -2496,7 +2556,6 @@ static struct omap_hwmod_addr_space am33xx_cpgmac0_addr_space[] = {  	{  		.pa_start	= 0x4a100000,  		.pa_end		= 0x4a100000 + SZ_2K - 1, -		.flags		= ADDR_TYPE_RT,  	},  	/* cpsw wr */  	{ @@ -2547,162 +2606,202 @@ static struct omap_hwmod_ocp_if am33xx_l4_ls__elm = {  	.user		= OCP_USER_MPU,  }; -/* - * Splitting the resources to handle access of PWMSS config space - * and module specific part independently - */ -static struct omap_hwmod_addr_space am33xx_ehrpwm0_addr_space[] = { +static struct omap_hwmod_addr_space am33xx_epwmss0_addr_space[] = {  	{  		.pa_start	= 0x48300000,  		.pa_end		= 0x48300000 + SZ_16 - 1,  		.flags		= ADDR_TYPE_RT  	}, -	{ -		.pa_start	= 0x48300200, -		.pa_end		= 0x48300200 + SZ_256 - 1, -		.flags		= ADDR_TYPE_RT -	},  	{ }  }; -static struct omap_hwmod_ocp_if am33xx_l4_ls__ehrpwm0 = { +static struct omap_hwmod_ocp_if am33xx_l4_ls__epwmss0 = {  	.master		= &am33xx_l4_ls_hwmod, -	.slave		= &am33xx_ehrpwm0_hwmod, +	.slave		= &am33xx_epwmss0_hwmod,  	.clk		= "l4ls_gclk", -	.addr		= am33xx_ehrpwm0_addr_space, +	.addr		= am33xx_epwmss0_addr_space,  	.user		= OCP_USER_MPU,  }; -/* - * Splitting the resources to handle access of PWMSS config space - * and module specific part independently - */ -static struct omap_hwmod_addr_space am33xx_ehrpwm1_addr_space[] = { -	{ -		.pa_start	= 0x48302000, -		.pa_end		= 0x48302000 + SZ_16 - 1, -		.flags		= ADDR_TYPE_RT -	}, +static struct omap_hwmod_addr_space am33xx_ecap0_addr_space[] = {  	{ -		.pa_start	= 0x48302200, -		.pa_end		= 0x48302200 + SZ_256 - 1, -		.flags		= ADDR_TYPE_RT +		.pa_start	= 0x48300100, +		.pa_end		= 0x48300100 + SZ_128 - 1,  	},  	{ }  }; -static struct omap_hwmod_ocp_if am33xx_l4_ls__ehrpwm1 = { -	.master		= &am33xx_l4_ls_hwmod, -	.slave		= &am33xx_ehrpwm1_hwmod, +static struct omap_hwmod_ocp_if am33xx_epwmss0__ecap0 = { +	.master		= &am33xx_epwmss0_hwmod, +	.slave		= &am33xx_ecap0_hwmod,  	.clk		= "l4ls_gclk", -	.addr		= am33xx_ehrpwm1_addr_space, +	.addr		= am33xx_ecap0_addr_space,  	.user		= OCP_USER_MPU,  }; -/* - * Splitting the resources to handle access of PWMSS config space - * and module specific part independently - */ -static struct omap_hwmod_addr_space am33xx_ehrpwm2_addr_space[] = { +static struct omap_hwmod_addr_space am33xx_eqep0_addr_space[] = {  	{ -		.pa_start	= 0x48304000, -		.pa_end		= 0x48304000 + SZ_16 - 1, -		.flags		= ADDR_TYPE_RT -	}, -	{ -		.pa_start	= 0x48304200, -		.pa_end		= 0x48304200 + SZ_256 - 1, -		.flags		= ADDR_TYPE_RT +		.pa_start	= 0x48300180, +		.pa_end		= 0x48300180 + SZ_128 - 1,  	},  	{ }  }; -static struct omap_hwmod_ocp_if am33xx_l4_ls__ehrpwm2 = { -	.master		= &am33xx_l4_ls_hwmod, -	.slave		= &am33xx_ehrpwm2_hwmod, +static struct omap_hwmod_ocp_if am33xx_epwmss0__eqep0 = { +	.master		= &am33xx_epwmss0_hwmod, +	.slave		= &am33xx_eqep0_hwmod,  	.clk		= "l4ls_gclk", -	.addr		= am33xx_ehrpwm2_addr_space, +	.addr		= am33xx_eqep0_addr_space,  	.user		= OCP_USER_MPU,  }; -/* - * Splitting the resources to handle access of PWMSS config space - * and module specific part independently - */ -static struct omap_hwmod_addr_space am33xx_ecap0_addr_space[] = { -	{ -		.pa_start	= 0x48300000, -		.pa_end		= 0x48300000 + SZ_16 - 1, -		.flags		= ADDR_TYPE_RT -	}, +static struct omap_hwmod_addr_space am33xx_ehrpwm0_addr_space[] = {  	{ -		.pa_start	= 0x48300100, -		.pa_end		= 0x48300100 + SZ_256 - 1, -		.flags		= ADDR_TYPE_RT +		.pa_start	= 0x48300200, +		.pa_end		= 0x48300200 + SZ_128 - 1,  	},  	{ }  }; -static struct omap_hwmod_ocp_if am33xx_l4_ls__ecap0 = { -	.master		= &am33xx_l4_ls_hwmod, -	.slave		= &am33xx_ecap0_hwmod, +static struct omap_hwmod_ocp_if am33xx_epwmss0__ehrpwm0 = { +	.master		= &am33xx_epwmss0_hwmod, +	.slave		= &am33xx_ehrpwm0_hwmod,  	.clk		= "l4ls_gclk", -	.addr		= am33xx_ecap0_addr_space, +	.addr		= am33xx_ehrpwm0_addr_space,  	.user		= OCP_USER_MPU,  }; -/* - * Splitting the resources to handle access of PWMSS config space - * and module specific part independently - */ -static struct omap_hwmod_addr_space am33xx_ecap1_addr_space[] = { + +static struct omap_hwmod_addr_space am33xx_epwmss1_addr_space[] = {  	{  		.pa_start	= 0x48302000,  		.pa_end		= 0x48302000 + SZ_16 - 1,  		.flags		= ADDR_TYPE_RT  	}, +	{ } +}; + +static struct omap_hwmod_ocp_if am33xx_l4_ls__epwmss1 = { +	.master		= &am33xx_l4_ls_hwmod, +	.slave		= &am33xx_epwmss1_hwmod, +	.clk		= "l4ls_gclk", +	.addr		= am33xx_epwmss1_addr_space, +	.user		= OCP_USER_MPU, +}; + +static struct omap_hwmod_addr_space am33xx_ecap1_addr_space[] = {  	{  		.pa_start	= 0x48302100, -		.pa_end		= 0x48302100 + SZ_256 - 1, -		.flags		= ADDR_TYPE_RT +		.pa_end		= 0x48302100 + SZ_128 - 1,  	},  	{ }  }; -static struct omap_hwmod_ocp_if am33xx_l4_ls__ecap1 = { -	.master		= &am33xx_l4_ls_hwmod, +static struct omap_hwmod_ocp_if am33xx_epwmss1__ecap1 = { +	.master		= &am33xx_epwmss1_hwmod,  	.slave		= &am33xx_ecap1_hwmod,  	.clk		= "l4ls_gclk",  	.addr		= am33xx_ecap1_addr_space,  	.user		= OCP_USER_MPU,  }; -/* - * Splitting the resources to handle access of PWMSS config space - * and module specific part independently - */ -static struct omap_hwmod_addr_space am33xx_ecap2_addr_space[] = { +static struct omap_hwmod_addr_space am33xx_eqep1_addr_space[] = { +	{ +		.pa_start	= 0x48302180, +		.pa_end		= 0x48302180 + SZ_128 - 1, +	}, +	{ } +}; + +static struct omap_hwmod_ocp_if am33xx_epwmss1__eqep1 = { +	.master		= &am33xx_epwmss1_hwmod, +	.slave		= &am33xx_eqep1_hwmod, +	.clk		= "l4ls_gclk", +	.addr		= am33xx_eqep1_addr_space, +	.user		= OCP_USER_MPU, +}; + +static struct omap_hwmod_addr_space am33xx_ehrpwm1_addr_space[] = { +	{ +		.pa_start	= 0x48302200, +		.pa_end		= 0x48302200 + SZ_128 - 1, +	}, +	{ } +}; + +static struct omap_hwmod_ocp_if am33xx_epwmss1__ehrpwm1 = { +	.master		= &am33xx_epwmss1_hwmod, +	.slave		= &am33xx_ehrpwm1_hwmod, +	.clk		= "l4ls_gclk", +	.addr		= am33xx_ehrpwm1_addr_space, +	.user		= OCP_USER_MPU, +}; + +static struct omap_hwmod_addr_space am33xx_epwmss2_addr_space[] = {  	{  		.pa_start	= 0x48304000,  		.pa_end		= 0x48304000 + SZ_16 - 1,  		.flags		= ADDR_TYPE_RT  	}, +	{ } +}; + +static struct omap_hwmod_ocp_if am33xx_l4_ls__epwmss2 = { +	.master		= &am33xx_l4_ls_hwmod, +	.slave		= &am33xx_epwmss2_hwmod, +	.clk		= "l4ls_gclk", +	.addr		= am33xx_epwmss2_addr_space, +	.user		= OCP_USER_MPU, +}; + +static struct omap_hwmod_addr_space am33xx_ecap2_addr_space[] = {  	{  		.pa_start	= 0x48304100, -		.pa_end		= 0x48304100 + SZ_256 - 1, -		.flags		= ADDR_TYPE_RT +		.pa_end		= 0x48304100 + SZ_128 - 1,  	},  	{ }  }; -static struct omap_hwmod_ocp_if am33xx_l4_ls__ecap2 = { -	.master		= &am33xx_l4_ls_hwmod, +static struct omap_hwmod_ocp_if am33xx_epwmss2__ecap2 = { +	.master		= &am33xx_epwmss2_hwmod,  	.slave		= &am33xx_ecap2_hwmod,  	.clk		= "l4ls_gclk",  	.addr		= am33xx_ecap2_addr_space,  	.user		= OCP_USER_MPU,  }; +static struct omap_hwmod_addr_space am33xx_eqep2_addr_space[] = { +	{ +		.pa_start	= 0x48304180, +		.pa_end		= 0x48304180 + SZ_128 - 1, +	}, +	{ } +}; + +static struct omap_hwmod_ocp_if am33xx_epwmss2__eqep2 = { +	.master		= &am33xx_epwmss2_hwmod, +	.slave		= &am33xx_eqep2_hwmod, +	.clk		= "l4ls_gclk", +	.addr		= am33xx_eqep2_addr_space, +	.user		= OCP_USER_MPU, +}; + +static struct omap_hwmod_addr_space am33xx_ehrpwm2_addr_space[] = { +	{ +		.pa_start	= 0x48304200, +		.pa_end		= 0x48304200 + SZ_128 - 1, +	}, +	{ } +}; + +static struct omap_hwmod_ocp_if am33xx_epwmss2__ehrpwm2 = { +	.master		= &am33xx_epwmss2_hwmod, +	.slave		= &am33xx_ehrpwm2_hwmod, +	.clk		= "l4ls_gclk", +	.addr		= am33xx_ehrpwm2_addr_space, +	.user		= OCP_USER_MPU, +}; +  /* l3s cfg -> gpmc */  static struct omap_hwmod_addr_space am33xx_gpmc_addr_space[] = {  	{ @@ -3328,6 +3427,13 @@ static struct omap_hwmod_ocp_if am33xx_l3_s__usbss = {  	.flags		= OCPIF_SWSUP_IDLE,  }; +/* l3 main -> ocmc */ +static struct omap_hwmod_ocp_if am33xx_l3_main__ocmc = { +	.master		= &am33xx_l3_main_hwmod, +	.slave		= &am33xx_ocmcram_hwmod, +	.user		= OCP_USER_MPU | OCP_USER_SDMA, +}; +  static struct omap_hwmod_ocp_if *am33xx_hwmod_ocp_ifs[] __initdata = {  	&am33xx_l4_fw__emif_fw,  	&am33xx_l3_main__emif, @@ -3385,12 +3491,18 @@ static struct omap_hwmod_ocp_if *am33xx_hwmod_ocp_ifs[] __initdata = {  	&am33xx_l4_ls__uart6,  	&am33xx_l4_ls__spinlock,  	&am33xx_l4_ls__elm, -	&am33xx_l4_ls__ehrpwm0, -	&am33xx_l4_ls__ehrpwm1, -	&am33xx_l4_ls__ehrpwm2, -	&am33xx_l4_ls__ecap0, -	&am33xx_l4_ls__ecap1, -	&am33xx_l4_ls__ecap2, +	&am33xx_l4_ls__epwmss0, +	&am33xx_epwmss0__ecap0, +	&am33xx_epwmss0__eqep0, +	&am33xx_epwmss0__ehrpwm0, +	&am33xx_l4_ls__epwmss1, +	&am33xx_epwmss1__ecap1, +	&am33xx_epwmss1__eqep1, +	&am33xx_epwmss1__ehrpwm1, +	&am33xx_l4_ls__epwmss2, +	&am33xx_epwmss2__ecap2, +	&am33xx_epwmss2__eqep2, +	&am33xx_epwmss2__ehrpwm2,  	&am33xx_l3_s__gpmc,  	&am33xx_l3_main__lcdc,  	&am33xx_l4_ls__mcspi0, @@ -3398,6 +3510,7 @@ static struct omap_hwmod_ocp_if *am33xx_hwmod_ocp_ifs[] __initdata = {  	&am33xx_l3_main__tptc0,  	&am33xx_l3_main__tptc1,  	&am33xx_l3_main__tptc2, +	&am33xx_l3_main__ocmc,  	&am33xx_l3_s__usbss,  	&am33xx_l4_hs__cpgmac0,  	&am33xx_cpgmac0__mdio, diff --git a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c index 8bb2628df34..ac7e03ec952 100644 --- a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c +++ b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c @@ -3493,7 +3493,12 @@ static struct omap_hwmod am35xx_emac_hwmod = {  	.name		= "davinci_emac",  	.mpu_irqs	= am35xx_emac_mpu_irqs,  	.class		= &am35xx_emac_class, -	.flags		= HWMOD_NO_IDLEST, +	/* +	 * According to Mark Greer, the MPU will not return from WFI +	 * when the EMAC signals an interrupt. +	 * http://www.spinics.net/lists/arm-kernel/msg174734.html +	 */ +	.flags		= (HWMOD_NO_IDLEST | HWMOD_BLOCK_WFI),  };  /* l3_core -> davinci emac interface */ diff --git a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c index f9084949b1e..1e2993883b4 100644 --- a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c +++ b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c @@ -617,7 +617,7 @@ static struct omap_hwmod omap44xx_dmic_hwmod = {  	.clkdm_name	= "abe_clkdm",  	.mpu_irqs	= omap44xx_dmic_irqs,  	.sdma_reqs	= omap44xx_dmic_sdma_reqs, -	.main_clk	= "dmic_fck", +	.main_clk	= "func_dmic_abe_gfclk",  	.prcm = {  		.omap4 = {  			.clkctrl_offs = OMAP4_CM1_ABE_DMIC_CLKCTRL_OFFSET, @@ -1162,7 +1162,7 @@ static struct omap_hwmod omap44xx_gpio1_hwmod = {  	.class		= &omap44xx_gpio_hwmod_class,  	.clkdm_name	= "l4_wkup_clkdm",  	.mpu_irqs	= omap44xx_gpio1_irqs, -	.main_clk	= "gpio1_ick", +	.main_clk	= "l4_wkup_clk_mux_ck",  	.prcm = {  		.omap4 = {  			.clkctrl_offs = OMAP4_CM_WKUP_GPIO1_CLKCTRL_OFFSET, @@ -1191,7 +1191,7 @@ static struct omap_hwmod omap44xx_gpio2_hwmod = {  	.clkdm_name	= "l4_per_clkdm",  	.flags		= HWMOD_CONTROL_OPT_CLKS_IN_RESET,  	.mpu_irqs	= omap44xx_gpio2_irqs, -	.main_clk	= "gpio2_ick", +	.main_clk	= "l4_div_ck",  	.prcm = {  		.omap4 = {  			.clkctrl_offs = OMAP4_CM_L4PER_GPIO2_CLKCTRL_OFFSET, @@ -1220,7 +1220,7 @@ static struct omap_hwmod omap44xx_gpio3_hwmod = {  	.clkdm_name	= "l4_per_clkdm",  	.flags		= HWMOD_CONTROL_OPT_CLKS_IN_RESET,  	.mpu_irqs	= omap44xx_gpio3_irqs, -	.main_clk	= "gpio3_ick", +	.main_clk	= "l4_div_ck",  	.prcm = {  		.omap4 = {  			.clkctrl_offs = OMAP4_CM_L4PER_GPIO3_CLKCTRL_OFFSET, @@ -1249,7 +1249,7 @@ static struct omap_hwmod omap44xx_gpio4_hwmod = {  	.clkdm_name	= "l4_per_clkdm",  	.flags		= HWMOD_CONTROL_OPT_CLKS_IN_RESET,  	.mpu_irqs	= omap44xx_gpio4_irqs, -	.main_clk	= "gpio4_ick", +	.main_clk	= "l4_div_ck",  	.prcm = {  		.omap4 = {  			.clkctrl_offs = OMAP4_CM_L4PER_GPIO4_CLKCTRL_OFFSET, @@ -1278,7 +1278,7 @@ static struct omap_hwmod omap44xx_gpio5_hwmod = {  	.clkdm_name	= "l4_per_clkdm",  	.flags		= HWMOD_CONTROL_OPT_CLKS_IN_RESET,  	.mpu_irqs	= omap44xx_gpio5_irqs, -	.main_clk	= "gpio5_ick", +	.main_clk	= "l4_div_ck",  	.prcm = {  		.omap4 = {  			.clkctrl_offs = OMAP4_CM_L4PER_GPIO5_CLKCTRL_OFFSET, @@ -1307,7 +1307,7 @@ static struct omap_hwmod omap44xx_gpio6_hwmod = {  	.clkdm_name	= "l4_per_clkdm",  	.flags		= HWMOD_CONTROL_OPT_CLKS_IN_RESET,  	.mpu_irqs	= omap44xx_gpio6_irqs, -	.main_clk	= "gpio6_ick", +	.main_clk	= "l4_div_ck",  	.prcm = {  		.omap4 = {  			.clkctrl_offs = OMAP4_CM_L4PER_GPIO6_CLKCTRL_OFFSET, @@ -1406,7 +1406,7 @@ static struct omap_hwmod omap44xx_gpu_hwmod = {  	.class		= &omap44xx_gpu_hwmod_class,  	.clkdm_name	= "l3_gfx_clkdm",  	.mpu_irqs	= omap44xx_gpu_irqs, -	.main_clk	= "gpu_fck", +	.main_clk	= "sgx_clk_mux",  	.prcm = {  		.omap4 = {  			.clkctrl_offs = OMAP4_CM_GFX_GFX_CLKCTRL_OFFSET, @@ -1447,7 +1447,7 @@ static struct omap_hwmod omap44xx_hdq1w_hwmod = {  	.clkdm_name	= "l4_per_clkdm",  	.flags		= HWMOD_INIT_NO_RESET, /* XXX temporary */  	.mpu_irqs	= omap44xx_hdq1w_irqs, -	.main_clk	= "hdq1w_fck", +	.main_clk	= "func_12m_fclk",  	.prcm = {  		.omap4 = {  			.clkctrl_offs = OMAP4_CM_L4PER_HDQ1W_CLKCTRL_OFFSET, @@ -1551,7 +1551,7 @@ static struct omap_hwmod omap44xx_i2c1_hwmod = {  	.flags		= HWMOD_16BIT_REG | HWMOD_SET_DEFAULT_CLOCKACT,  	.mpu_irqs	= omap44xx_i2c1_irqs,  	.sdma_reqs	= omap44xx_i2c1_sdma_reqs, -	.main_clk	= "i2c1_fck", +	.main_clk	= "func_96m_fclk",  	.prcm = {  		.omap4 = {  			.clkctrl_offs = OMAP4_CM_L4PER_I2C1_CLKCTRL_OFFSET, @@ -1581,7 +1581,7 @@ static struct omap_hwmod omap44xx_i2c2_hwmod = {  	.flags		= HWMOD_16BIT_REG | HWMOD_SET_DEFAULT_CLOCKACT,  	.mpu_irqs	= omap44xx_i2c2_irqs,  	.sdma_reqs	= omap44xx_i2c2_sdma_reqs, -	.main_clk	= "i2c2_fck", +	.main_clk	= "func_96m_fclk",  	.prcm = {  		.omap4 = {  			.clkctrl_offs = OMAP4_CM_L4PER_I2C2_CLKCTRL_OFFSET, @@ -1611,7 +1611,7 @@ static struct omap_hwmod omap44xx_i2c3_hwmod = {  	.flags		= HWMOD_16BIT_REG | HWMOD_SET_DEFAULT_CLOCKACT,  	.mpu_irqs	= omap44xx_i2c3_irqs,  	.sdma_reqs	= omap44xx_i2c3_sdma_reqs, -	.main_clk	= "i2c3_fck", +	.main_clk	= "func_96m_fclk",  	.prcm = {  		.omap4 = {  			.clkctrl_offs = OMAP4_CM_L4PER_I2C3_CLKCTRL_OFFSET, @@ -1641,7 +1641,7 @@ static struct omap_hwmod omap44xx_i2c4_hwmod = {  	.flags		= HWMOD_16BIT_REG | HWMOD_SET_DEFAULT_CLOCKACT,  	.mpu_irqs	= omap44xx_i2c4_irqs,  	.sdma_reqs	= omap44xx_i2c4_sdma_reqs, -	.main_clk	= "i2c4_fck", +	.main_clk	= "func_96m_fclk",  	.prcm = {  		.omap4 = {  			.clkctrl_offs = OMAP4_CM_L4PER_I2C4_CLKCTRL_OFFSET, @@ -1744,7 +1744,7 @@ static struct omap_hwmod omap44xx_iss_hwmod = {  	.clkdm_name	= "iss_clkdm",  	.mpu_irqs	= omap44xx_iss_irqs,  	.sdma_reqs	= omap44xx_iss_sdma_reqs, -	.main_clk	= "iss_fck", +	.main_clk	= "ducati_clk_mux_ck",  	.prcm = {  		.omap4 = {  			.clkctrl_offs = OMAP4_CM_CAM_ISS_CLKCTRL_OFFSET, @@ -1786,7 +1786,7 @@ static struct omap_hwmod omap44xx_iva_hwmod = {  	.mpu_irqs	= omap44xx_iva_irqs,  	.rst_lines	= omap44xx_iva_resets,  	.rst_lines_cnt	= ARRAY_SIZE(omap44xx_iva_resets), -	.main_clk	= "iva_fck", +	.main_clk	= "dpll_iva_m5x2_ck",  	.prcm = {  		.omap4 = {  			.clkctrl_offs = OMAP4_CM_IVAHD_IVAHD_CLKCTRL_OFFSET, @@ -1830,7 +1830,7 @@ static struct omap_hwmod omap44xx_kbd_hwmod = {  	.class		= &omap44xx_kbd_hwmod_class,  	.clkdm_name	= "l4_wkup_clkdm",  	.mpu_irqs	= omap44xx_kbd_irqs, -	.main_clk	= "kbd_fck", +	.main_clk	= "sys_32k_ck",  	.prcm = {  		.omap4 = {  			.clkctrl_offs = OMAP4_CM_WKUP_KEYBOARD_CLKCTRL_OFFSET, @@ -1921,7 +1921,7 @@ static struct omap_hwmod omap44xx_mcasp_hwmod = {  	.clkdm_name	= "abe_clkdm",  	.mpu_irqs	= omap44xx_mcasp_irqs,  	.sdma_reqs	= omap44xx_mcasp_sdma_reqs, -	.main_clk	= "mcasp_fck", +	.main_clk	= "func_mcasp_abe_gfclk",  	.prcm = {  		.omap4 = {  			.clkctrl_offs = OMAP4_CM1_ABE_MCASP_CLKCTRL_OFFSET, @@ -1973,7 +1973,7 @@ static struct omap_hwmod omap44xx_mcbsp1_hwmod = {  	.clkdm_name	= "abe_clkdm",  	.mpu_irqs	= omap44xx_mcbsp1_irqs,  	.sdma_reqs	= omap44xx_mcbsp1_sdma_reqs, -	.main_clk	= "mcbsp1_fck", +	.main_clk	= "func_mcbsp1_gfclk",  	.prcm = {  		.omap4 = {  			.clkctrl_offs = OMAP4_CM1_ABE_MCBSP1_CLKCTRL_OFFSET, @@ -2008,7 +2008,7 @@ static struct omap_hwmod omap44xx_mcbsp2_hwmod = {  	.clkdm_name	= "abe_clkdm",  	.mpu_irqs	= omap44xx_mcbsp2_irqs,  	.sdma_reqs	= omap44xx_mcbsp2_sdma_reqs, -	.main_clk	= "mcbsp2_fck", +	.main_clk	= "func_mcbsp2_gfclk",  	.prcm = {  		.omap4 = {  			.clkctrl_offs = OMAP4_CM1_ABE_MCBSP2_CLKCTRL_OFFSET, @@ -2043,7 +2043,7 @@ static struct omap_hwmod omap44xx_mcbsp3_hwmod = {  	.clkdm_name	= "abe_clkdm",  	.mpu_irqs	= omap44xx_mcbsp3_irqs,  	.sdma_reqs	= omap44xx_mcbsp3_sdma_reqs, -	.main_clk	= "mcbsp3_fck", +	.main_clk	= "func_mcbsp3_gfclk",  	.prcm = {  		.omap4 = {  			.clkctrl_offs = OMAP4_CM1_ABE_MCBSP3_CLKCTRL_OFFSET, @@ -2078,7 +2078,7 @@ static struct omap_hwmod omap44xx_mcbsp4_hwmod = {  	.clkdm_name	= "l4_per_clkdm",  	.mpu_irqs	= omap44xx_mcbsp4_irqs,  	.sdma_reqs	= omap44xx_mcbsp4_sdma_reqs, -	.main_clk	= "mcbsp4_fck", +	.main_clk	= "per_mcbsp4_gfclk",  	.prcm = {  		.omap4 = {  			.clkctrl_offs = OMAP4_CM_L4PER_MCBSP4_CLKCTRL_OFFSET, @@ -2141,7 +2141,7 @@ static struct omap_hwmod omap44xx_mcpdm_hwmod = {  	.flags		= HWMOD_EXT_OPT_MAIN_CLK | HWMOD_SWSUP_SIDLE,  	.mpu_irqs	= omap44xx_mcpdm_irqs,  	.sdma_reqs	= omap44xx_mcpdm_sdma_reqs, -	.main_clk	= "mcpdm_fck", +	.main_clk	= "pad_clks_ck",  	.prcm = {  		.omap4 = {  			.clkctrl_offs = OMAP4_CM1_ABE_PDM_CLKCTRL_OFFSET, @@ -2202,7 +2202,7 @@ static struct omap_hwmod omap44xx_mcspi1_hwmod = {  	.clkdm_name	= "l4_per_clkdm",  	.mpu_irqs	= omap44xx_mcspi1_irqs,  	.sdma_reqs	= omap44xx_mcspi1_sdma_reqs, -	.main_clk	= "mcspi1_fck", +	.main_clk	= "func_48m_fclk",  	.prcm = {  		.omap4 = {  			.clkctrl_offs = OMAP4_CM_L4PER_MCSPI1_CLKCTRL_OFFSET, @@ -2238,7 +2238,7 @@ static struct omap_hwmod omap44xx_mcspi2_hwmod = {  	.clkdm_name	= "l4_per_clkdm",  	.mpu_irqs	= omap44xx_mcspi2_irqs,  	.sdma_reqs	= omap44xx_mcspi2_sdma_reqs, -	.main_clk	= "mcspi2_fck", +	.main_clk	= "func_48m_fclk",  	.prcm = {  		.omap4 = {  			.clkctrl_offs = OMAP4_CM_L4PER_MCSPI2_CLKCTRL_OFFSET, @@ -2274,7 +2274,7 @@ static struct omap_hwmod omap44xx_mcspi3_hwmod = {  	.clkdm_name	= "l4_per_clkdm",  	.mpu_irqs	= omap44xx_mcspi3_irqs,  	.sdma_reqs	= omap44xx_mcspi3_sdma_reqs, -	.main_clk	= "mcspi3_fck", +	.main_clk	= "func_48m_fclk",  	.prcm = {  		.omap4 = {  			.clkctrl_offs = OMAP4_CM_L4PER_MCSPI3_CLKCTRL_OFFSET, @@ -2308,7 +2308,7 @@ static struct omap_hwmod omap44xx_mcspi4_hwmod = {  	.clkdm_name	= "l4_per_clkdm",  	.mpu_irqs	= omap44xx_mcspi4_irqs,  	.sdma_reqs	= omap44xx_mcspi4_sdma_reqs, -	.main_clk	= "mcspi4_fck", +	.main_clk	= "func_48m_fclk",  	.prcm = {  		.omap4 = {  			.clkctrl_offs = OMAP4_CM_L4PER_MCSPI4_CLKCTRL_OFFSET, @@ -2364,7 +2364,7 @@ static struct omap_hwmod omap44xx_mmc1_hwmod = {  	.clkdm_name	= "l3_init_clkdm",  	.mpu_irqs	= omap44xx_mmc1_irqs,  	.sdma_reqs	= omap44xx_mmc1_sdma_reqs, -	.main_clk	= "mmc1_fck", +	.main_clk	= "hsmmc1_fclk",  	.prcm = {  		.omap4 = {  			.clkctrl_offs = OMAP4_CM_L3INIT_MMC1_CLKCTRL_OFFSET, @@ -2393,7 +2393,7 @@ static struct omap_hwmod omap44xx_mmc2_hwmod = {  	.clkdm_name	= "l3_init_clkdm",  	.mpu_irqs	= omap44xx_mmc2_irqs,  	.sdma_reqs	= omap44xx_mmc2_sdma_reqs, -	.main_clk	= "mmc2_fck", +	.main_clk	= "hsmmc2_fclk",  	.prcm = {  		.omap4 = {  			.clkctrl_offs = OMAP4_CM_L3INIT_MMC2_CLKCTRL_OFFSET, @@ -2421,7 +2421,7 @@ static struct omap_hwmod omap44xx_mmc3_hwmod = {  	.clkdm_name	= "l4_per_clkdm",  	.mpu_irqs	= omap44xx_mmc3_irqs,  	.sdma_reqs	= omap44xx_mmc3_sdma_reqs, -	.main_clk	= "mmc3_fck", +	.main_clk	= "func_48m_fclk",  	.prcm = {  		.omap4 = {  			.clkctrl_offs = OMAP4_CM_L4PER_MMCSD3_CLKCTRL_OFFSET, @@ -2449,7 +2449,7 @@ static struct omap_hwmod omap44xx_mmc4_hwmod = {  	.clkdm_name	= "l4_per_clkdm",  	.mpu_irqs	= omap44xx_mmc4_irqs,  	.sdma_reqs	= omap44xx_mmc4_sdma_reqs, -	.main_clk	= "mmc4_fck", +	.main_clk	= "func_48m_fclk",  	.prcm = {  		.omap4 = {  			.clkctrl_offs = OMAP4_CM_L4PER_MMCSD4_CLKCTRL_OFFSET, @@ -2477,7 +2477,7 @@ static struct omap_hwmod omap44xx_mmc5_hwmod = {  	.clkdm_name	= "l4_per_clkdm",  	.mpu_irqs	= omap44xx_mmc5_irqs,  	.sdma_reqs	= omap44xx_mmc5_sdma_reqs, -	.main_clk	= "mmc5_fck", +	.main_clk	= "func_48m_fclk",  	.prcm = {  		.omap4 = {  			.clkctrl_offs = OMAP4_CM_L4PER_MMCSD5_CLKCTRL_OFFSET, @@ -2726,7 +2726,7 @@ static struct omap_hwmod omap44xx_ocp2scp_usb_phy_hwmod = {  	.name		= "ocp2scp_usb_phy",  	.class		= &omap44xx_ocp2scp_hwmod_class,  	.clkdm_name	= "l3_init_clkdm", -	.main_clk	= "ocp2scp_usb_phy_phy_48m", +	.main_clk	= "func_48m_fclk",  	.prcm = {  		.omap4 = {  			.clkctrl_offs = OMAP4_CM_L3INIT_USBPHYOCP2SCP_CLKCTRL_OFFSET, @@ -3163,7 +3163,7 @@ static struct omap_hwmod omap44xx_timer1_hwmod = {  	.clkdm_name	= "l4_wkup_clkdm",  	.flags		= HWMOD_SET_DEFAULT_CLOCKACT,  	.mpu_irqs	= omap44xx_timer1_irqs, -	.main_clk	= "timer1_fck", +	.main_clk	= "dmt1_clk_mux",  	.prcm = {  		.omap4 = {  			.clkctrl_offs = OMAP4_CM_WKUP_TIMER1_CLKCTRL_OFFSET, @@ -3186,7 +3186,7 @@ static struct omap_hwmod omap44xx_timer2_hwmod = {  	.clkdm_name	= "l4_per_clkdm",  	.flags		= HWMOD_SET_DEFAULT_CLOCKACT,  	.mpu_irqs	= omap44xx_timer2_irqs, -	.main_clk	= "timer2_fck", +	.main_clk	= "cm2_dm2_mux",  	.prcm = {  		.omap4 = {  			.clkctrl_offs = OMAP4_CM_L4PER_DMTIMER2_CLKCTRL_OFFSET, @@ -3207,7 +3207,7 @@ static struct omap_hwmod omap44xx_timer3_hwmod = {  	.class		= &omap44xx_timer_hwmod_class,  	.clkdm_name	= "l4_per_clkdm",  	.mpu_irqs	= omap44xx_timer3_irqs, -	.main_clk	= "timer3_fck", +	.main_clk	= "cm2_dm3_mux",  	.prcm = {  		.omap4 = {  			.clkctrl_offs = OMAP4_CM_L4PER_DMTIMER3_CLKCTRL_OFFSET, @@ -3228,7 +3228,7 @@ static struct omap_hwmod omap44xx_timer4_hwmod = {  	.class		= &omap44xx_timer_hwmod_class,  	.clkdm_name	= "l4_per_clkdm",  	.mpu_irqs	= omap44xx_timer4_irqs, -	.main_clk	= "timer4_fck", +	.main_clk	= "cm2_dm4_mux",  	.prcm = {  		.omap4 = {  			.clkctrl_offs = OMAP4_CM_L4PER_DMTIMER4_CLKCTRL_OFFSET, @@ -3249,7 +3249,7 @@ static struct omap_hwmod omap44xx_timer5_hwmod = {  	.class		= &omap44xx_timer_hwmod_class,  	.clkdm_name	= "abe_clkdm",  	.mpu_irqs	= omap44xx_timer5_irqs, -	.main_clk	= "timer5_fck", +	.main_clk	= "timer5_sync_mux",  	.prcm = {  		.omap4 = {  			.clkctrl_offs = OMAP4_CM1_ABE_TIMER5_CLKCTRL_OFFSET, @@ -3271,8 +3271,7 @@ static struct omap_hwmod omap44xx_timer6_hwmod = {  	.class		= &omap44xx_timer_hwmod_class,  	.clkdm_name	= "abe_clkdm",  	.mpu_irqs	= omap44xx_timer6_irqs, - -	.main_clk	= "timer6_fck", +	.main_clk	= "timer6_sync_mux",  	.prcm = {  		.omap4 = {  			.clkctrl_offs = OMAP4_CM1_ABE_TIMER6_CLKCTRL_OFFSET, @@ -3294,7 +3293,7 @@ static struct omap_hwmod omap44xx_timer7_hwmod = {  	.class		= &omap44xx_timer_hwmod_class,  	.clkdm_name	= "abe_clkdm",  	.mpu_irqs	= omap44xx_timer7_irqs, -	.main_clk	= "timer7_fck", +	.main_clk	= "timer7_sync_mux",  	.prcm = {  		.omap4 = {  			.clkctrl_offs = OMAP4_CM1_ABE_TIMER7_CLKCTRL_OFFSET, @@ -3316,7 +3315,7 @@ static struct omap_hwmod omap44xx_timer8_hwmod = {  	.class		= &omap44xx_timer_hwmod_class,  	.clkdm_name	= "abe_clkdm",  	.mpu_irqs	= omap44xx_timer8_irqs, -	.main_clk	= "timer8_fck", +	.main_clk	= "timer8_sync_mux",  	.prcm = {  		.omap4 = {  			.clkctrl_offs = OMAP4_CM1_ABE_TIMER8_CLKCTRL_OFFSET, @@ -3338,7 +3337,7 @@ static struct omap_hwmod omap44xx_timer9_hwmod = {  	.class		= &omap44xx_timer_hwmod_class,  	.clkdm_name	= "l4_per_clkdm",  	.mpu_irqs	= omap44xx_timer9_irqs, -	.main_clk	= "timer9_fck", +	.main_clk	= "cm2_dm9_mux",  	.prcm = {  		.omap4 = {  			.clkctrl_offs = OMAP4_CM_L4PER_DMTIMER9_CLKCTRL_OFFSET, @@ -3361,7 +3360,7 @@ static struct omap_hwmod omap44xx_timer10_hwmod = {  	.clkdm_name	= "l4_per_clkdm",  	.flags		= HWMOD_SET_DEFAULT_CLOCKACT,  	.mpu_irqs	= omap44xx_timer10_irqs, -	.main_clk	= "timer10_fck", +	.main_clk	= "cm2_dm10_mux",  	.prcm = {  		.omap4 = {  			.clkctrl_offs = OMAP4_CM_L4PER_DMTIMER10_CLKCTRL_OFFSET, @@ -3383,7 +3382,7 @@ static struct omap_hwmod omap44xx_timer11_hwmod = {  	.class		= &omap44xx_timer_hwmod_class,  	.clkdm_name	= "l4_per_clkdm",  	.mpu_irqs	= omap44xx_timer11_irqs, -	.main_clk	= "timer11_fck", +	.main_clk	= "cm2_dm11_mux",  	.prcm = {  		.omap4 = {  			.clkctrl_offs = OMAP4_CM_L4PER_DMTIMER11_CLKCTRL_OFFSET, @@ -3434,7 +3433,7 @@ static struct omap_hwmod omap44xx_uart1_hwmod = {  	.clkdm_name	= "l4_per_clkdm",  	.mpu_irqs	= omap44xx_uart1_irqs,  	.sdma_reqs	= omap44xx_uart1_sdma_reqs, -	.main_clk	= "uart1_fck", +	.main_clk	= "func_48m_fclk",  	.prcm = {  		.omap4 = {  			.clkctrl_offs = OMAP4_CM_L4PER_UART1_CLKCTRL_OFFSET, @@ -3462,7 +3461,7 @@ static struct omap_hwmod omap44xx_uart2_hwmod = {  	.clkdm_name	= "l4_per_clkdm",  	.mpu_irqs	= omap44xx_uart2_irqs,  	.sdma_reqs	= omap44xx_uart2_sdma_reqs, -	.main_clk	= "uart2_fck", +	.main_clk	= "func_48m_fclk",  	.prcm = {  		.omap4 = {  			.clkctrl_offs = OMAP4_CM_L4PER_UART2_CLKCTRL_OFFSET, @@ -3491,7 +3490,7 @@ static struct omap_hwmod omap44xx_uart3_hwmod = {  	.flags		= HWMOD_INIT_NO_IDLE | HWMOD_INIT_NO_RESET,  	.mpu_irqs	= omap44xx_uart3_irqs,  	.sdma_reqs	= omap44xx_uart3_sdma_reqs, -	.main_clk	= "uart3_fck", +	.main_clk	= "func_48m_fclk",  	.prcm = {  		.omap4 = {  			.clkctrl_offs = OMAP4_CM_L4PER_UART3_CLKCTRL_OFFSET, @@ -3519,7 +3518,7 @@ static struct omap_hwmod omap44xx_uart4_hwmod = {  	.clkdm_name	= "l4_per_clkdm",  	.mpu_irqs	= omap44xx_uart4_irqs,  	.sdma_reqs	= omap44xx_uart4_sdma_reqs, -	.main_clk	= "uart4_fck", +	.main_clk	= "func_48m_fclk",  	.prcm = {  		.omap4 = {  			.clkctrl_offs = OMAP4_CM_L4PER_UART4_CLKCTRL_OFFSET, @@ -3798,7 +3797,7 @@ static struct omap_hwmod omap44xx_wd_timer2_hwmod = {  	.class		= &omap44xx_wd_timer_hwmod_class,  	.clkdm_name	= "l4_wkup_clkdm",  	.mpu_irqs	= omap44xx_wd_timer2_irqs, -	.main_clk	= "wd_timer2_fck", +	.main_clk	= "sys_32k_ck",  	.prcm = {  		.omap4 = {  			.clkctrl_offs = OMAP4_CM_WKUP_WDT2_CLKCTRL_OFFSET, @@ -3819,7 +3818,7 @@ static struct omap_hwmod omap44xx_wd_timer3_hwmod = {  	.class		= &omap44xx_wd_timer_hwmod_class,  	.clkdm_name	= "abe_clkdm",  	.mpu_irqs	= omap44xx_wd_timer3_irqs, -	.main_clk	= "wd_timer3_fck", +	.main_clk	= "sys_32k_ck",  	.prcm = {  		.omap4 = {  			.clkctrl_offs = OMAP4_CM1_ABE_WDT3_CLKCTRL_OFFSET, diff --git a/arch/arm/mach-omap2/pm-debug.c b/arch/arm/mach-omap2/pm-debug.c index e2c291f52f9..6db89ae9238 100644 --- a/arch/arm/mach-omap2/pm-debug.c +++ b/arch/arm/mach-omap2/pm-debug.c @@ -83,10 +83,8 @@ static int clkdm_dbg_show_counter(struct clockdomain *clkdm, void *user)  		strncmp(clkdm->name, "dpll", 4) == 0)  		return 0; -	seq_printf(s, "%s->%s (%d)", clkdm->name, -			clkdm->pwrdm.ptr->name, -			atomic_read(&clkdm->usecount)); -	seq_printf(s, "\n"); +	seq_printf(s, "%s->%s (%d)\n", clkdm->name, clkdm->pwrdm.ptr->name, +		   clkdm->usecount);  	return 0;  } diff --git a/arch/arm/mach-omap2/pm.c b/arch/arm/mach-omap2/pm.c index 1ec429964b7..8d5e6e6b14a 100644 --- a/arch/arm/mach-omap2/pm.c +++ b/arch/arm/mach-omap2/pm.c @@ -32,8 +32,6 @@  #include "pm.h"  #include "twl-common.h" -static struct omap_device_pm_latency *pm_lats; -  /*   * omap_pm_suspend: points to a function that does the SoC-specific   * suspend work @@ -82,7 +80,7 @@ static int __init _init_omap_device(char *name)  		 __func__, name))  		return -ENODEV; -	pdev = omap_device_build(oh->name, 0, oh, NULL, 0, pm_lats, 0, false); +	pdev = omap_device_build(oh->name, 0, oh, NULL, 0);  	if (WARN(IS_ERR(pdev), "%s: could not build omap_device for %s\n",  		 __func__, name))  		return -ENODEV; @@ -108,80 +106,19 @@ static void __init omap2_init_processor_devices(void)  	}  } -/* Types of sleep_switch used in omap_set_pwrdm_state */ -#define FORCEWAKEUP_SWITCH	0 -#define LOWPOWERSTATE_SWITCH	1 -  int __init omap_pm_clkdms_setup(struct clockdomain *clkdm, void *unused)  { +	/* XXX The usecount test is racy */  	if ((clkdm->flags & CLKDM_CAN_ENABLE_AUTO) &&  	    !(clkdm->flags & CLKDM_MISSING_IDLE_REPORTING))  		clkdm_allow_idle(clkdm);  	else if (clkdm->flags & CLKDM_CAN_FORCE_SLEEP && -		 atomic_read(&clkdm->usecount) == 0) +		 clkdm->usecount == 0)  		clkdm_sleep(clkdm);  	return 0;  }  /* - * This sets pwrdm state (other than mpu & core. Currently only ON & - * RET are supported. - */ -int omap_set_pwrdm_state(struct powerdomain *pwrdm, u32 pwrst) -{ -	u8 curr_pwrst, next_pwrst; -	int sleep_switch = -1, ret = 0, hwsup = 0; - -	if (!pwrdm || IS_ERR(pwrdm)) -		return -EINVAL; - -	while (!(pwrdm->pwrsts & (1 << pwrst))) { -		if (pwrst == PWRDM_POWER_OFF) -			return ret; -		pwrst--; -	} - -	next_pwrst = pwrdm_read_next_pwrst(pwrdm); -	if (next_pwrst == pwrst) -		return ret; - -	curr_pwrst = pwrdm_read_pwrst(pwrdm); -	if (curr_pwrst < PWRDM_POWER_ON) { -		if ((curr_pwrst > pwrst) && -			(pwrdm->flags & PWRDM_HAS_LOWPOWERSTATECHANGE)) { -			sleep_switch = LOWPOWERSTATE_SWITCH; -		} else { -			hwsup = clkdm_in_hwsup(pwrdm->pwrdm_clkdms[0]); -			clkdm_wakeup(pwrdm->pwrdm_clkdms[0]); -			sleep_switch = FORCEWAKEUP_SWITCH; -		} -	} - -	ret = pwrdm_set_next_pwrst(pwrdm, pwrst); -	if (ret) -		pr_err("%s: unable to set power state of powerdomain: %s\n", -		       __func__, pwrdm->name); - -	switch (sleep_switch) { -	case FORCEWAKEUP_SWITCH: -		if (hwsup) -			clkdm_allow_idle(pwrdm->pwrdm_clkdms[0]); -		else -			clkdm_sleep(pwrdm->pwrdm_clkdms[0]); -		break; -	case LOWPOWERSTATE_SWITCH: -		pwrdm_set_lowpwrstchange(pwrdm); -		pwrdm_wait_transition(pwrdm); -		pwrdm_state_switch(pwrdm); -		break; -	} - -	return ret; -} - - - -/*   * This API is to be called during init to set the various voltage   * domains to the voltage as per the opp table. Typically we boot up   * at the nominal voltage. So this function finds out the rate of diff --git a/arch/arm/mach-omap2/pm.h b/arch/arm/mach-omap2/pm.h index c22503b17ab..7bdd22afce6 100644 --- a/arch/arm/mach-omap2/pm.h +++ b/arch/arm/mach-omap2/pm.h @@ -33,7 +33,6 @@ static inline int omap4_idle_init(void)  extern void *omap3_secure_ram_storage;  extern void omap3_pm_off_mode_enable(int);  extern void omap_sram_idle(void); -extern int omap_set_pwrdm_state(struct powerdomain *pwrdm, u32 state);  extern int omap_pm_clkdms_setup(struct clockdomain *clkdm, void *unused);  extern int (*omap_pm_suspend)(void); diff --git a/arch/arm/mach-omap2/pm24xx.c b/arch/arm/mach-omap2/pm24xx.c index c333fa6dffa..b59d9390834 100644 --- a/arch/arm/mach-omap2/pm24xx.c +++ b/arch/arm/mach-omap2/pm24xx.c @@ -54,7 +54,6 @@  #include "powerdomain.h"  #include "clockdomain.h" -static void (*omap2_sram_idle)(void);  static void (*omap2_sram_suspend)(u32 dllctrl, void __iomem *sdrc_dlla_ctrl,  				  void __iomem *sdrc_power); @@ -90,11 +89,7 @@ static int omap2_enter_full_retention(void)  	omap2_prm_write_mod_reg(0xffffffff, CORE_MOD, OMAP24XX_PM_WKST2);  	omap2_prm_write_mod_reg(0xffffffff, WKUP_MOD, PM_WKST); -	/* -	 * Set MPU powerdomain's next power state to RETENTION; -	 * preserve logic state during retention -	 */ -	pwrdm_set_logic_retst(mpu_pwrdm, PWRDM_POWER_RET); +	pwrdm_set_next_pwrst(core_pwrdm, PWRDM_POWER_RET);  	pwrdm_set_next_pwrst(mpu_pwrdm, PWRDM_POWER_RET);  	/* Workaround to kill USB */ @@ -137,15 +132,10 @@ no_sleep:  	/* Mask future PRCM-to-MPU interrupts */  	omap2_prm_write_mod_reg(0x0, OCP_MOD, OMAP2_PRCM_IRQSTATUS_MPU_OFFSET); -	return 0; -} - -static int omap2_i2c_active(void) -{ -	u32 l; +	pwrdm_set_next_pwrst(mpu_pwrdm, PWRDM_POWER_ON); +	pwrdm_set_next_pwrst(core_pwrdm, PWRDM_POWER_ON); -	l = omap2_cm_read_mod_reg(CORE_MOD, CM_FCLKEN1); -	return l & (OMAP2420_EN_I2C2_MASK | OMAP2420_EN_I2C1_MASK); +	return 0;  }  static int sti_console_enabled; @@ -172,10 +162,7 @@ static int omap2_allow_mpu_retention(void)  static void omap2_enter_mpu_retention(void)  { -	/* Putting MPU into the WFI state while a transfer is active -	 * seems to cause the I2C block to timeout. Why? Good question. */ -	if (omap2_i2c_active()) -		return; +	const int zero = 0;  	/* The peripherals seem not to be able to wake up the MPU when  	 * it is in retention mode. */ @@ -186,17 +173,17 @@ static void omap2_enter_mpu_retention(void)  		omap2_prm_write_mod_reg(0xffffffff, WKUP_MOD, PM_WKST);  		/* Try to enter MPU retention */ -		omap2_prm_write_mod_reg((0x01 << OMAP_POWERSTATE_SHIFT) | -				  OMAP_LOGICRETSTATE_MASK, -				  MPU_MOD, OMAP2_PM_PWSTCTRL); +		pwrdm_set_next_pwrst(mpu_pwrdm, PWRDM_POWER_RET); +  	} else {  		/* Block MPU retention */ - -		omap2_prm_write_mod_reg(OMAP_LOGICRETSTATE_MASK, MPU_MOD, -						 OMAP2_PM_PWSTCTRL); +		pwrdm_set_next_pwrst(mpu_pwrdm, PWRDM_POWER_ON);  	} -	omap2_sram_idle(); +	/* WFI */ +	asm("mcr p15, 0, %0, c7, c0, 4" : : "r" (zero) : "memory", "cc"); + +	pwrdm_set_next_pwrst(mpu_pwrdm, PWRDM_POWER_ON);  }  static int omap2_can_sleep(void) @@ -251,25 +238,17 @@ static void __init prcm_setup_regs(void)  	for (i = 0; i < num_mem_banks; i++)  		pwrdm_set_mem_retst(core_pwrdm, i, PWRDM_POWER_RET); -	/* Set CORE powerdomain's next power state to RETENTION */ -	pwrdm_set_next_pwrst(core_pwrdm, PWRDM_POWER_RET); +	pwrdm_set_logic_retst(core_pwrdm, PWRDM_POWER_RET); -	/* -	 * Set MPU powerdomain's next power state to RETENTION; -	 * preserve logic state during retention -	 */  	pwrdm_set_logic_retst(mpu_pwrdm, PWRDM_POWER_RET); -	pwrdm_set_next_pwrst(mpu_pwrdm, PWRDM_POWER_RET);  	/* Force-power down DSP, GFX powerdomains */  	pwrdm = clkdm_get_pwrdm(dsp_clkdm);  	pwrdm_set_next_pwrst(pwrdm, PWRDM_POWER_OFF); -	clkdm_sleep(dsp_clkdm);  	pwrdm = clkdm_get_pwrdm(gfx_clkdm);  	pwrdm_set_next_pwrst(pwrdm, PWRDM_POWER_OFF); -	clkdm_sleep(gfx_clkdm);  	/* Enable hardware-supervised idle for all clkdms */  	clkdm_for_each(omap_pm_clkdms_setup, NULL); @@ -356,11 +335,9 @@ int __init omap2_pm_init(void)  	/*  	 * We copy the assembler sleep/wakeup routines to SRAM.  	 * These routines need to be in SRAM as that's the only -	 * memory the MPU can see when it wakes up. +	 * memory the MPU can see when it wakes up after the entire +	 * chip enters idle.  	 */ -	omap2_sram_idle = omap_sram_push(omap24xx_idle_loop_suspend, -					 omap24xx_idle_loop_suspend_sz); -  	omap2_sram_suspend = omap_sram_push(omap24xx_cpu_suspend,  					    omap24xx_cpu_suspend_sz); diff --git a/arch/arm/mach-omap2/pmu.c b/arch/arm/mach-omap2/pmu.c index eb78ae7a346..0ef4d6aa758 100644 --- a/arch/arm/mach-omap2/pmu.c +++ b/arch/arm/mach-omap2/pmu.c @@ -48,8 +48,7 @@ static int __init omap2_init_pmu(unsigned oh_num, char *oh_names[])  		}  	} -	omap_pmu_dev = omap_device_build_ss(dev_name, -1, oh, oh_num, NULL, 0, -					    NULL, 0, 0); +	omap_pmu_dev = omap_device_build_ss(dev_name, -1, oh, oh_num, NULL, 0);  	WARN(IS_ERR(omap_pmu_dev), "Can't build omap_device for %s.\n",  	     dev_name); diff --git a/arch/arm/mach-omap2/powerdomain.c b/arch/arm/mach-omap2/powerdomain.c index dea62a9aad0..8e61d80bf6b 100644 --- a/arch/arm/mach-omap2/powerdomain.c +++ b/arch/arm/mach-omap2/powerdomain.c @@ -19,6 +19,7 @@  #include <linux/list.h>  #include <linux/errno.h>  #include <linux/string.h> +#include <linux/spinlock.h>  #include <trace/events/power.h>  #include "cm2xxx_3xxx.h" @@ -42,6 +43,16 @@ enum {  	PWRDM_STATE_PREV,  }; +/* + * Types of sleep_switch used internally in omap_set_pwrdm_state() + * and its associated static functions + * + * XXX Better documentation is needed here + */ +#define ALREADYACTIVE_SWITCH		0 +#define FORCEWAKEUP_SWITCH		1 +#define LOWPOWERSTATE_SWITCH		2 +#define ERROR_SWITCH			3  /* pwrdm_list contains all registered struct powerdomains */  static LIST_HEAD(pwrdm_list); @@ -101,6 +112,7 @@ static int _pwrdm_register(struct powerdomain *pwrdm)  	pwrdm->voltdm.ptr = voltdm;  	INIT_LIST_HEAD(&pwrdm->voltdm_node);  	voltdm_add_pwrdm(voltdm, pwrdm); +	spin_lock_init(&pwrdm->_lock);  	list_add(&pwrdm->node, &pwrdm_list); @@ -112,7 +124,7 @@ static int _pwrdm_register(struct powerdomain *pwrdm)  	for (i = 0; i < pwrdm->banks; i++)  		pwrdm->ret_mem_off_counter[i] = 0; -	pwrdm_wait_transition(pwrdm); +	arch_pwrdm->pwrdm_wait_transition(pwrdm);  	pwrdm->state = pwrdm_read_pwrst(pwrdm);  	pwrdm->state_counter[pwrdm->state] = 1; @@ -143,7 +155,7 @@ static void _update_logic_membank_counters(struct powerdomain *pwrdm)  static int _pwrdm_state_switch(struct powerdomain *pwrdm, int flag)  { -	int prev, state, trace_state = 0; +	int prev, next, state, trace_state = 0;  	if (pwrdm == NULL)  		return -EINVAL; @@ -164,9 +176,10 @@ static int _pwrdm_state_switch(struct powerdomain *pwrdm, int flag)  		 * If the power domain did not hit the desired state,  		 * generate a trace event with both the desired and hit states  		 */ -		if (state != prev) { +		next = pwrdm_read_next_pwrst(pwrdm); +		if (next != prev) {  			trace_state = (PWRDM_TRACE_STATES_FLAG | -				       ((state & OMAP_POWERSTATE_MASK) << 8) | +				       ((next & OMAP_POWERSTATE_MASK) << 8) |  				       ((prev & OMAP_POWERSTATE_MASK) << 0));  			trace_power_domain_target(pwrdm->name, trace_state,  						  smp_processor_id()); @@ -199,6 +212,80 @@ static int _pwrdm_post_transition_cb(struct powerdomain *pwrdm, void *unused)  	return 0;  } +/** + * _pwrdm_save_clkdm_state_and_activate - prepare for power state change + * @pwrdm: struct powerdomain * to operate on + * @curr_pwrst: current power state of @pwrdm + * @pwrst: power state to switch to + * @hwsup: ptr to a bool to return whether the clkdm is hardware-supervised + * + * Determine whether the powerdomain needs to be turned on before + * attempting to switch power states.  Called by + * omap_set_pwrdm_state().  NOTE that if the powerdomain contains + * multiple clockdomains, this code assumes that the first clockdomain + * supports software-supervised wakeup mode - potentially a problem. + * Returns the power state switch mode currently in use (see the + * "Types of sleep_switch" comment above). + */ +static u8 _pwrdm_save_clkdm_state_and_activate(struct powerdomain *pwrdm, +					       u8 curr_pwrst, u8 pwrst, +					       bool *hwsup) +{ +	u8 sleep_switch; + +	if (curr_pwrst < 0) { +		WARN_ON(1); +		sleep_switch = ERROR_SWITCH; +	} else if (curr_pwrst < PWRDM_POWER_ON) { +		if (curr_pwrst > pwrst && +		    pwrdm->flags & PWRDM_HAS_LOWPOWERSTATECHANGE && +		    arch_pwrdm->pwrdm_set_lowpwrstchange) { +			sleep_switch = LOWPOWERSTATE_SWITCH; +		} else { +			*hwsup = clkdm_in_hwsup(pwrdm->pwrdm_clkdms[0]); +			clkdm_wakeup_nolock(pwrdm->pwrdm_clkdms[0]); +			sleep_switch = FORCEWAKEUP_SWITCH; +		} +	} else { +		sleep_switch = ALREADYACTIVE_SWITCH; +	} + +	return sleep_switch; +} + +/** + * _pwrdm_restore_clkdm_state - restore the clkdm hwsup state after pwrst change + * @pwrdm: struct powerdomain * to operate on + * @sleep_switch: return value from _pwrdm_save_clkdm_state_and_activate() + * @hwsup: should @pwrdm's first clockdomain be set to hardware-supervised mode? + * + * Restore the clockdomain state perturbed by + * _pwrdm_save_clkdm_state_and_activate(), and call the power state + * bookkeeping code.  Called by omap_set_pwrdm_state().  NOTE that if + * the powerdomain contains multiple clockdomains, this assumes that + * the first associated clockdomain supports either + * hardware-supervised idle control in the register, or + * software-supervised sleep.  No return value. + */ +static void _pwrdm_restore_clkdm_state(struct powerdomain *pwrdm, +				       u8 sleep_switch, bool hwsup) +{ +	switch (sleep_switch) { +	case FORCEWAKEUP_SWITCH: +		if (hwsup) +			clkdm_allow_idle_nolock(pwrdm->pwrdm_clkdms[0]); +		else +			clkdm_sleep_nolock(pwrdm->pwrdm_clkdms[0]); +		break; +	case LOWPOWERSTATE_SWITCH: +		if (pwrdm->flags & PWRDM_HAS_LOWPOWERSTATECHANGE && +		    arch_pwrdm->pwrdm_set_lowpwrstchange) +			arch_pwrdm->pwrdm_set_lowpwrstchange(pwrdm); +		pwrdm_state_switch_nolock(pwrdm); +		break; +	} +} +  /* Public functions */  /** @@ -275,6 +362,30 @@ int pwrdm_complete_init(void)  }  /** + * pwrdm_lock - acquire a Linux spinlock on a powerdomain + * @pwrdm: struct powerdomain * to lock + * + * Acquire the powerdomain spinlock on @pwrdm.  No return value. + */ +void pwrdm_lock(struct powerdomain *pwrdm) +	__acquires(&pwrdm->_lock) +{ +	spin_lock_irqsave(&pwrdm->_lock, pwrdm->_lock_flags); +} + +/** + * pwrdm_unlock - release a Linux spinlock on a powerdomain + * @pwrdm: struct powerdomain * to unlock + * + * Release the powerdomain spinlock on @pwrdm.  No return value. + */ +void pwrdm_unlock(struct powerdomain *pwrdm) +	__releases(&pwrdm->_lock) +{ +	spin_unlock_irqrestore(&pwrdm->_lock, pwrdm->_lock_flags); +} + +/**   * pwrdm_lookup - look up a powerdomain by name, return a pointer   * @name: name of powerdomain   * @@ -920,65 +1031,27 @@ bool pwrdm_has_hdwr_sar(struct powerdomain *pwrdm)  	return (pwrdm && pwrdm->flags & PWRDM_HAS_HDWR_SAR) ? 1 : 0;  } -/** - * pwrdm_set_lowpwrstchange - Request a low power state change - * @pwrdm: struct powerdomain * - * - * Allows a powerdomain to transtion to a lower power sleep state - * from an existing sleep state without waking up the powerdomain. - * Returns -EINVAL if the powerdomain pointer is null or if the - * powerdomain does not support LOWPOWERSTATECHANGE, or returns 0 - * upon success. - */ -int pwrdm_set_lowpwrstchange(struct powerdomain *pwrdm) -{ -	int ret = -EINVAL; - -	if (!pwrdm) -		return -EINVAL; - -	if (!(pwrdm->flags & PWRDM_HAS_LOWPOWERSTATECHANGE)) -		return -EINVAL; - -	pr_debug("powerdomain: %s: setting LOWPOWERSTATECHANGE bit\n", -		 pwrdm->name); - -	if (arch_pwrdm && arch_pwrdm->pwrdm_set_lowpwrstchange) -		ret = arch_pwrdm->pwrdm_set_lowpwrstchange(pwrdm); - -	return ret; -} - -/** - * pwrdm_wait_transition - wait for powerdomain power transition to finish - * @pwrdm: struct powerdomain * to wait for - * - * If the powerdomain @pwrdm is in the process of a state transition, - * spin until it completes the power transition, or until an iteration - * bailout value is reached. Returns -EINVAL if the powerdomain - * pointer is null, -EAGAIN if the bailout value was reached, or - * returns 0 upon success. - */ -int pwrdm_wait_transition(struct powerdomain *pwrdm) +int pwrdm_state_switch_nolock(struct powerdomain *pwrdm)  { -	int ret = -EINVAL; +	int ret; -	if (!pwrdm) +	if (!pwrdm || !arch_pwrdm)  		return -EINVAL; -	if (arch_pwrdm && arch_pwrdm->pwrdm_wait_transition) -		ret = arch_pwrdm->pwrdm_wait_transition(pwrdm); +	ret = arch_pwrdm->pwrdm_wait_transition(pwrdm); +	if (!ret) +		ret = _pwrdm_state_switch(pwrdm, PWRDM_STATE_NOW);  	return ret;  } -int pwrdm_state_switch(struct powerdomain *pwrdm) +int __deprecated pwrdm_state_switch(struct powerdomain *pwrdm)  {  	int ret; -	ret = pwrdm_wait_transition(pwrdm); -	if (!ret) -		ret = _pwrdm_state_switch(pwrdm, PWRDM_STATE_NOW); +	pwrdm_lock(pwrdm); +	ret = pwrdm_state_switch_nolock(pwrdm); +	pwrdm_unlock(pwrdm);  	return ret;  } @@ -1004,6 +1077,61 @@ int pwrdm_post_transition(struct powerdomain *pwrdm)  }  /** + * omap_set_pwrdm_state - change a powerdomain's current power state + * @pwrdm: struct powerdomain * to change the power state of + * @pwrst: power state to change to + * + * Change the current hardware power state of the powerdomain + * represented by @pwrdm to the power state represented by @pwrst. + * Returns -EINVAL if @pwrdm is null or invalid or if the + * powerdomain's current power state could not be read, or returns 0 + * upon success or if @pwrdm does not support @pwrst or any + * lower-power state.  XXX Should not return 0 if the @pwrdm does not + * support @pwrst or any lower-power state: this should be an error. + */ +int omap_set_pwrdm_state(struct powerdomain *pwrdm, u8 pwrst) +{ +	u8 curr_pwrst, next_pwrst, sleep_switch; +	int ret = 0; +	bool hwsup = false; + +	if (!pwrdm || IS_ERR(pwrdm)) +		return -EINVAL; + +	while (!(pwrdm->pwrsts & (1 << pwrst))) { +		if (pwrst == PWRDM_POWER_OFF) +			return ret; +		pwrst--; +	} + +	pwrdm_lock(pwrdm); + +	curr_pwrst = pwrdm_read_pwrst(pwrdm); +	next_pwrst = pwrdm_read_next_pwrst(pwrdm); +	if (curr_pwrst == pwrst && next_pwrst == pwrst) +		goto osps_out; + +	sleep_switch = _pwrdm_save_clkdm_state_and_activate(pwrdm, curr_pwrst, +							    pwrst, &hwsup); +	if (sleep_switch == ERROR_SWITCH) { +		ret = -EINVAL; +		goto osps_out; +	} + +	ret = pwrdm_set_next_pwrst(pwrdm, pwrst); +	if (ret) +		pr_err("%s: unable to set power state of powerdomain: %s\n", +		       __func__, pwrdm->name); + +	_pwrdm_restore_clkdm_state(pwrdm, sleep_switch, hwsup); + +osps_out: +	pwrdm_unlock(pwrdm); + +	return ret; +} + +/**   * pwrdm_get_context_loss_count - get powerdomain's context loss count   * @pwrdm: struct powerdomain * to wait for   * diff --git a/arch/arm/mach-omap2/powerdomain.h b/arch/arm/mach-omap2/powerdomain.h index 5277d56eb37..140c36074fe 100644 --- a/arch/arm/mach-omap2/powerdomain.h +++ b/arch/arm/mach-omap2/powerdomain.h @@ -19,8 +19,7 @@  #include <linux/types.h>  #include <linux/list.h> - -#include <linux/atomic.h> +#include <linux/spinlock.h>  #include "voltage.h" @@ -44,18 +43,20 @@  #define PWRSTS_OFF_RET_ON	(PWRSTS_OFF_RET | PWRSTS_ON) -/* Powerdomain flags */ -#define PWRDM_HAS_HDWR_SAR	(1 << 0) /* hardware save-and-restore support */ -#define PWRDM_HAS_MPU_QUIRK	(1 << 1) /* MPU pwr domain has MEM bank 0 bits -					  * in MEM bank 1 position. This is -					  * true for OMAP3430 -					  */ -#define PWRDM_HAS_LOWPOWERSTATECHANGE	(1 << 2) /* -						  * support to transition from a -						  * sleep state to a lower sleep -						  * state without waking up the -						  * powerdomain -						  */ +/* + * Powerdomain flags (struct powerdomain.flags) + * + * PWRDM_HAS_HDWR_SAR - powerdomain has hardware save-and-restore support + * + * PWRDM_HAS_MPU_QUIRK - MPU pwr domain has MEM bank 0 bits in MEM + * bank 1 position. This is true for OMAP3430 + * + * PWRDM_HAS_LOWPOWERSTATECHANGE - can transition from a sleep state + * to a lower sleep state without waking up the powerdomain + */ +#define PWRDM_HAS_HDWR_SAR		BIT(0) +#define PWRDM_HAS_MPU_QUIRK		BIT(1) +#define PWRDM_HAS_LOWPOWERSTATECHANGE	BIT(2)  /*   * Number of memory banks that are power-controllable.	On OMAP4430, the @@ -103,6 +104,8 @@ struct powerdomain;   * @state_counter:   * @timer:   * @state_timer: + * @_lock: spinlock used to serialize powerdomain and some clockdomain ops + * @_lock_flags: stored flags when @_lock is taken   *   * @prcm_partition possible values are defined in mach-omap2/prcm44xx.h.   */ @@ -127,7 +130,8 @@ struct powerdomain {  	unsigned state_counter[PWRDM_MAX_PWRSTS];  	unsigned ret_logic_off_counter;  	unsigned ret_mem_off_counter[PWRDM_MAX_MEM_BANKS]; - +	spinlock_t _lock; +	unsigned long _lock_flags;  	const u8 pwrstctrl_offs;  	const u8 pwrstst_offs;  	const u32 logicretstate_mask; @@ -162,6 +166,16 @@ struct powerdomain {   * @pwrdm_disable_hdwr_sar: Disable Hardware Save-Restore feature for a pd   * @pwrdm_set_lowpwrstchange: Enable pd transitions from a shallow to deep sleep   * @pwrdm_wait_transition: Wait for a pd state transition to complete + * + * Regarding @pwrdm_set_lowpwrstchange: On the OMAP2 and 3-family + * chips, a powerdomain's power state is not allowed to directly + * transition from one low-power state (e.g., CSWR) to another + * low-power state (e.g., OFF) without first waking up the + * powerdomain.  This wastes energy.  So OMAP4 chips support the + * ability to transition a powerdomain power state directly from one + * low-power state to another.  The function pointed to by + * @pwrdm_set_lowpwrstchange is intended to configure the OMAP4 + * hardware powerdomain state machine to enable this feature.   */  struct pwrdm_ops {  	int	(*pwrdm_set_next_pwrst)(struct powerdomain *pwrdm, u8 pwrst); @@ -225,15 +239,15 @@ int pwrdm_enable_hdwr_sar(struct powerdomain *pwrdm);  int pwrdm_disable_hdwr_sar(struct powerdomain *pwrdm);  bool pwrdm_has_hdwr_sar(struct powerdomain *pwrdm); -int pwrdm_wait_transition(struct powerdomain *pwrdm); - +int pwrdm_state_switch_nolock(struct powerdomain *pwrdm);  int pwrdm_state_switch(struct powerdomain *pwrdm);  int pwrdm_pre_transition(struct powerdomain *pwrdm);  int pwrdm_post_transition(struct powerdomain *pwrdm); -int pwrdm_set_lowpwrstchange(struct powerdomain *pwrdm);  int pwrdm_get_context_loss_count(struct powerdomain *pwrdm);  bool pwrdm_can_ever_lose_context(struct powerdomain *pwrdm); +extern int omap_set_pwrdm_state(struct powerdomain *pwrdm, u8 state); +  extern void omap242x_powerdomains_init(void);  extern void omap243x_powerdomains_init(void);  extern void omap3xxx_powerdomains_init(void); @@ -253,5 +267,7 @@ extern u32 omap2_pwrdm_get_mem_bank_stst_mask(u8 bank);  extern struct powerdomain wkup_omap2_pwrdm;  extern struct powerdomain gfx_omap2_pwrdm; +extern void pwrdm_lock(struct powerdomain *pwrdm); +extern void pwrdm_unlock(struct powerdomain *pwrdm);  #endif diff --git a/arch/arm/mach-omap2/powerdomains2xxx_3xxx_data.c b/arch/arm/mach-omap2/powerdomains2xxx_3xxx_data.c index d3a5399091a..7b946f1005b 100644 --- a/arch/arm/mach-omap2/powerdomains2xxx_3xxx_data.c +++ b/arch/arm/mach-omap2/powerdomains2xxx_3xxx_data.c @@ -54,12 +54,12 @@ struct powerdomain gfx_omap2_pwrdm = {  	.pwrsts_mem_on	  = {  		[0] = PWRSTS_ON,  /* MEMONSTATE */  	}, -	.voltdm           = { .name = "core" }, +	.voltdm		  = { .name = "core" },  };  struct powerdomain wkup_omap2_pwrdm = {  	.name		= "wkup_pwrdm",  	.prcm_offs	= WKUP_MOD,  	.pwrsts		= PWRSTS_ON, -	.voltdm         = { .name = "wakeup" }, +	.voltdm		= { .name = "wakeup" },  }; diff --git a/arch/arm/mach-omap2/powerdomains2xxx_data.c b/arch/arm/mach-omap2/powerdomains2xxx_data.c index ba520d4f7c7..578eef86fcf 100644 --- a/arch/arm/mach-omap2/powerdomains2xxx_data.c +++ b/arch/arm/mach-omap2/powerdomains2xxx_data.c @@ -38,7 +38,7 @@ static struct powerdomain dsp_pwrdm = {  	.pwrsts_mem_on	  = {  		[0] = PWRSTS_ON,  	}, -	.voltdm           = { .name = "core" }, +	.voltdm		  = { .name = "core" },  };  static struct powerdomain mpu_24xx_pwrdm = { @@ -53,13 +53,14 @@ static struct powerdomain mpu_24xx_pwrdm = {  	.pwrsts_mem_on	  = {  		[0] = PWRSTS_ON,  	}, -	.voltdm           = { .name = "core" }, +	.voltdm		  = { .name = "core" },  };  static struct powerdomain core_24xx_pwrdm = {  	.name		  = "core_pwrdm",  	.prcm_offs	  = CORE_MOD,  	.pwrsts		  = PWRSTS_OFF_RET_ON, +	.pwrsts_logic_ret = PWRSTS_RET,  	.banks		  = 3,  	.pwrsts_mem_ret	  = {  		[0] = PWRSTS_OFF_RET,	 /* MEM1RETSTATE */ @@ -71,7 +72,7 @@ static struct powerdomain core_24xx_pwrdm = {  		[1] = PWRSTS_OFF_RET_ON, /* MEM2ONSTATE */  		[2] = PWRSTS_OFF_RET_ON, /* MEM3ONSTATE */  	}, -	.voltdm           = { .name = "core" }, +	.voltdm		  = { .name = "core" },  }; @@ -93,7 +94,7 @@ static struct powerdomain mdm_pwrdm = {  	.pwrsts_mem_on	  = {  		[0] = PWRSTS_ON,  /* MEMONSTATE */  	}, -	.voltdm           = { .name = "core" }, +	.voltdm		  = { .name = "core" },  };  /* diff --git a/arch/arm/mach-omap2/powerdomains3xxx_data.c b/arch/arm/mach-omap2/powerdomains3xxx_data.c index 8b23d234fb5..f0e14e9efe5 100644 --- a/arch/arm/mach-omap2/powerdomains3xxx_data.c +++ b/arch/arm/mach-omap2/powerdomains3xxx_data.c @@ -50,7 +50,7 @@ static struct powerdomain iva2_pwrdm = {  		[2] = PWRSTS_OFF_ON,  		[3] = PWRSTS_ON,  	}, -	.voltdm           = { .name = "mpu_iva" }, +	.voltdm		  = { .name = "mpu_iva" },  };  static struct powerdomain mpu_3xxx_pwrdm = { @@ -66,7 +66,7 @@ static struct powerdomain mpu_3xxx_pwrdm = {  	.pwrsts_mem_on	  = {  		[0] = PWRSTS_OFF_ON,  	}, -	.voltdm           = { .name = "mpu_iva" }, +	.voltdm		  = { .name = "mpu_iva" },  };  static struct powerdomain mpu_am35x_pwrdm = { @@ -82,7 +82,7 @@ static struct powerdomain mpu_am35x_pwrdm = {  	.pwrsts_mem_on	  = {  		[0] = PWRSTS_ON,  	}, -	.voltdm           = { .name = "mpu_iva" }, +	.voltdm		  = { .name = "mpu_iva" },  };  /* @@ -109,7 +109,7 @@ static struct powerdomain core_3xxx_pre_es3_1_pwrdm = {  		[0] = PWRSTS_OFF_RET_ON, /* MEM1ONSTATE */  		[1] = PWRSTS_OFF_RET_ON, /* MEM2ONSTATE */  	}, -	.voltdm           = { .name = "core" }, +	.voltdm		  = { .name = "core" },  };  static struct powerdomain core_3xxx_es3_1_pwrdm = { @@ -131,7 +131,7 @@ static struct powerdomain core_3xxx_es3_1_pwrdm = {  		[0] = PWRSTS_OFF_RET_ON, /* MEM1ONSTATE */  		[1] = PWRSTS_OFF_RET_ON, /* MEM2ONSTATE */  	}, -	.voltdm           = { .name = "core" }, +	.voltdm		  = { .name = "core" },  };  static struct powerdomain core_am35x_pwrdm = { @@ -148,7 +148,7 @@ static struct powerdomain core_am35x_pwrdm = {  		[0] = PWRSTS_ON, /* MEM1ONSTATE */  		[1] = PWRSTS_ON, /* MEM2ONSTATE */  	}, -	.voltdm           = { .name = "core" }, +	.voltdm		  = { .name = "core" },  };  static struct powerdomain dss_pwrdm = { @@ -163,7 +163,7 @@ static struct powerdomain dss_pwrdm = {  	.pwrsts_mem_on	  = {  		[0] = PWRSTS_ON,  /* MEMONSTATE */  	}, -	.voltdm           = { .name = "core" }, +	.voltdm		  = { .name = "core" },  };  static struct powerdomain dss_am35x_pwrdm = { @@ -178,7 +178,7 @@ static struct powerdomain dss_am35x_pwrdm = {  	.pwrsts_mem_on	  = {  		[0] = PWRSTS_ON,  /* MEMONSTATE */  	}, -	.voltdm           = { .name = "core" }, +	.voltdm		  = { .name = "core" },  };  /* @@ -199,7 +199,7 @@ static struct powerdomain sgx_pwrdm = {  	.pwrsts_mem_on	  = {  		[0] = PWRSTS_ON,  /* MEMONSTATE */  	}, -	.voltdm           = { .name = "core" }, +	.voltdm		  = { .name = "core" },  };  static struct powerdomain sgx_am35x_pwrdm = { @@ -214,7 +214,7 @@ static struct powerdomain sgx_am35x_pwrdm = {  	.pwrsts_mem_on	  = {  		[0] = PWRSTS_ON,  /* MEMONSTATE */  	}, -	.voltdm           = { .name = "core" }, +	.voltdm		  = { .name = "core" },  };  static struct powerdomain cam_pwrdm = { @@ -229,7 +229,7 @@ static struct powerdomain cam_pwrdm = {  	.pwrsts_mem_on	  = {  		[0] = PWRSTS_ON,  /* MEMONSTATE */  	}, -	.voltdm           = { .name = "core" }, +	.voltdm		  = { .name = "core" },  };  static struct powerdomain per_pwrdm = { @@ -244,7 +244,7 @@ static struct powerdomain per_pwrdm = {  	.pwrsts_mem_on	  = {  		[0] = PWRSTS_ON,  /* MEMONSTATE */  	}, -	.voltdm           = { .name = "core" }, +	.voltdm		  = { .name = "core" },  };  static struct powerdomain per_am35x_pwrdm = { @@ -259,13 +259,13 @@ static struct powerdomain per_am35x_pwrdm = {  	.pwrsts_mem_on	  = {  		[0] = PWRSTS_ON,  /* MEMONSTATE */  	}, -	.voltdm           = { .name = "core" }, +	.voltdm		  = { .name = "core" },  };  static struct powerdomain emu_pwrdm = {  	.name		= "emu_pwrdm",  	.prcm_offs	= OMAP3430_EMU_MOD, -	.voltdm           = { .name = "core" }, +	.voltdm		  = { .name = "core" },  };  static struct powerdomain neon_pwrdm = { @@ -273,7 +273,7 @@ static struct powerdomain neon_pwrdm = {  	.prcm_offs	  = OMAP3430_NEON_MOD,  	.pwrsts		  = PWRSTS_OFF_RET_ON,  	.pwrsts_logic_ret = PWRSTS_RET, -	.voltdm           = { .name = "mpu_iva" }, +	.voltdm		  = { .name = "mpu_iva" },  };  static struct powerdomain neon_am35x_pwrdm = { @@ -281,7 +281,7 @@ static struct powerdomain neon_am35x_pwrdm = {  	.prcm_offs	  = OMAP3430_NEON_MOD,  	.pwrsts		  = PWRSTS_ON,  	.pwrsts_logic_ret = PWRSTS_ON, -	.voltdm           = { .name = "mpu_iva" }, +	.voltdm		  = { .name = "mpu_iva" },  };  static struct powerdomain usbhost_pwrdm = { @@ -303,37 +303,37 @@ static struct powerdomain usbhost_pwrdm = {  	.pwrsts_mem_on	  = {  		[0] = PWRSTS_ON,  /* MEMONSTATE */  	}, -	.voltdm           = { .name = "core" }, +	.voltdm		  = { .name = "core" },  };  static struct powerdomain dpll1_pwrdm = {  	.name		= "dpll1_pwrdm",  	.prcm_offs	= MPU_MOD, -	.voltdm           = { .name = "mpu_iva" }, +	.voltdm		  = { .name = "mpu_iva" },  };  static struct powerdomain dpll2_pwrdm = {  	.name		= "dpll2_pwrdm",  	.prcm_offs	= OMAP3430_IVA2_MOD, -	.voltdm           = { .name = "mpu_iva" }, +	.voltdm		  = { .name = "mpu_iva" },  };  static struct powerdomain dpll3_pwrdm = {  	.name		= "dpll3_pwrdm",  	.prcm_offs	= PLL_MOD, -	.voltdm           = { .name = "core" }, +	.voltdm		  = { .name = "core" },  };  static struct powerdomain dpll4_pwrdm = {  	.name		= "dpll4_pwrdm",  	.prcm_offs	= PLL_MOD, -	.voltdm           = { .name = "core" }, +	.voltdm		  = { .name = "core" },  };  static struct powerdomain dpll5_pwrdm = {  	.name		= "dpll5_pwrdm",  	.prcm_offs	= PLL_MOD, -	.voltdm           = { .name = "core" }, +	.voltdm		  = { .name = "core" },  };  /* As powerdomains are added or removed above, this list must also be changed */ diff --git a/arch/arm/mach-omap2/prm2xxx_3xxx.c b/arch/arm/mach-omap2/prm2xxx_3xxx.c index a3e121f94a8..947f6adfed0 100644 --- a/arch/arm/mach-omap2/prm2xxx_3xxx.c +++ b/arch/arm/mach-omap2/prm2xxx_3xxx.c @@ -210,6 +210,7 @@ int omap2_clkdm_read_wkdep(struct clockdomain *clkdm1,  					     PM_WKDEP, (1 << clkdm2->dep_bit));  } +/* XXX Caller must hold the clkdm's powerdomain lock */  int omap2_clkdm_clear_all_wkdeps(struct clockdomain *clkdm)  {  	struct clkdm_dep *cd; @@ -221,7 +222,7 @@ int omap2_clkdm_clear_all_wkdeps(struct clockdomain *clkdm)  		/* PRM accesses are slow, so minimize them */  		mask |= 1 << cd->clkdm->dep_bit; -		atomic_set(&cd->wkdep_usecount, 0); +		cd->wkdep_usecount = 0;  	}  	omap2_prm_clear_mod_reg_bits(mask, clkdm->pwrdm.ptr->prcm_offs, diff --git a/arch/arm/mach-omap2/prm33xx.c b/arch/arm/mach-omap2/prm33xx.c index 1ac73883f89..44c0d7216aa 100644 --- a/arch/arm/mach-omap2/prm33xx.c +++ b/arch/arm/mach-omap2/prm33xx.c @@ -110,11 +110,11 @@ int am33xx_prm_assert_hardreset(u8 shift, s16 inst, u16 rstctrl_offs)   * -EINVAL upon an argument error, -EEXIST if the submodule was already out   * of reset, or -EBUSY if the submodule did not exit reset promptly.   */ -int am33xx_prm_deassert_hardreset(u8 shift, s16 inst, +int am33xx_prm_deassert_hardreset(u8 shift, u8 st_shift, s16 inst,  		u16 rstctrl_offs, u16 rstst_offs)  {  	int c; -	u32 mask = 1 << shift; +	u32 mask = 1 << st_shift;  	/* Check the current status to avoid  de-asserting the line twice */  	if (am33xx_prm_is_hardreset_asserted(shift, inst, rstctrl_offs) == 0) @@ -122,11 +122,14 @@ int am33xx_prm_deassert_hardreset(u8 shift, s16 inst,  	/* Clear the reset status by writing 1 to the status bit */  	am33xx_prm_rmw_reg_bits(0xffffffff, mask, inst, rstst_offs); +  	/* de-assert the reset control line */ +	mask = 1 << shift; +  	am33xx_prm_rmw_reg_bits(mask, 0, inst, rstctrl_offs); -	/* wait the status to be set */ -	omap_test_timeout(am33xx_prm_is_hardreset_asserted(shift, inst, +	/* wait the status to be set */ +	omap_test_timeout(am33xx_prm_is_hardreset_asserted(st_shift, inst,  							   rstst_offs),  			  MAX_MODULE_HARDRESET_WAIT, c); diff --git a/arch/arm/mach-omap2/prm33xx.h b/arch/arm/mach-omap2/prm33xx.h index 3f25c563a82..9b9918dfb11 100644 --- a/arch/arm/mach-omap2/prm33xx.h +++ b/arch/arm/mach-omap2/prm33xx.h @@ -117,6 +117,7 @@  #define AM33XX_PM_CEFUSE_PWRSTST_OFFSET		0x0004  #define AM33XX_PM_CEFUSE_PWRSTST		AM33XX_PRM_REGADDR(AM33XX_PRM_CEFUSE_MOD, 0x0004) +#ifndef __ASSEMBLER__  extern u32 am33xx_prm_read_reg(s16 inst, u16 idx);  extern void am33xx_prm_write_reg(u32 val, s16 inst, u16 idx);  extern u32 am33xx_prm_rmw_reg_bits(u32 mask, u32 bits, s16 inst, s16 idx); @@ -124,6 +125,7 @@ extern void am33xx_prm_global_warm_sw_reset(void);  extern int am33xx_prm_is_hardreset_asserted(u8 shift, s16 inst,  		u16 rstctrl_offs);  extern int am33xx_prm_assert_hardreset(u8 shift, s16 inst, u16 rstctrl_offs); -extern int am33xx_prm_deassert_hardreset(u8 shift, s16 inst, +extern int am33xx_prm_deassert_hardreset(u8 shift, u8 st_shift, s16 inst,  		u16 rstctrl_offs, u16 rstst_offs); +#endif /* ASSEMBLER */  #endif diff --git a/arch/arm/mach-omap2/serial.c b/arch/arm/mach-omap2/serial.c index 04fdbc4c499..d01c373cbbe 100644 --- a/arch/arm/mach-omap2/serial.c +++ b/arch/arm/mach-omap2/serial.c @@ -316,8 +316,7 @@ void __init omap_serial_init_port(struct omap_board_data *bdata,  	if (WARN_ON(!oh))  		return; -	pdev = omap_device_build(name, uart->num, oh, pdata, pdata_size, -				 NULL, 0, false); +	pdev = omap_device_build(name, uart->num, oh, pdata, pdata_size);  	if (IS_ERR(pdev)) {  		WARN(1, "Could not build omap_device for %s: %s.\n", name,  		     oh->name); diff --git a/arch/arm/mach-omap2/sleep24xx.S b/arch/arm/mach-omap2/sleep24xx.S index ce0ccd26efb..1d3cb25c962 100644 --- a/arch/arm/mach-omap2/sleep24xx.S +++ b/arch/arm/mach-omap2/sleep24xx.S @@ -37,25 +37,6 @@  	.text  /* - * Forces OMAP into idle state - * - * omap24xx_idle_loop_suspend() - This bit of code just executes the WFI - * for normal idles. - * - * Note: This code get's copied to internal SRAM at boot. When the OMAP - *	 wakes up it continues execution at the point it went to sleep. - */ -	.align	3 -ENTRY(omap24xx_idle_loop_suspend) -	stmfd	sp!, {r0, lr}		@ save registers on stack -	mov	r0, #0			@ clear for mcr setup -	mcr	p15, 0, r0, c7, c0, 4	@ wait for interrupt -	ldmfd	sp!, {r0, pc}		@ restore regs and return - -ENTRY(omap24xx_idle_loop_suspend_sz) -	.word	. - omap24xx_idle_loop_suspend - -/*   * omap24xx_cpu_suspend() - Forces OMAP into deep sleep state by completing   * SDRC shutdown then ARM shutdown.  Upon wake MPU is back on so just restore   * SDRC. diff --git a/arch/arm/mach-omap2/soc.h b/arch/arm/mach-omap2/soc.h index f31d90774de..15e959111e2 100644 --- a/arch/arm/mach-omap2/soc.h +++ b/arch/arm/mach-omap2/soc.h @@ -387,6 +387,7 @@ IS_OMAP_TYPE(3430, 0x3430)  #define AM335X_CLASS		0x33500033  #define AM335X_REV_ES1_0	AM335X_CLASS +#define AM335X_REV_ES2_0	(AM335X_CLASS | (0x1 << 8))  #define OMAP443X_CLASS		0x44300044  #define OMAP4430_REV_ES1_0	(OMAP443X_CLASS | (0x10 << 8)) diff --git a/arch/arm/mach-omap2/sr_device.c b/arch/arm/mach-omap2/sr_device.c index b9753fe2723..d7bc33f1534 100644 --- a/arch/arm/mach-omap2/sr_device.c +++ b/arch/arm/mach-omap2/sr_device.c @@ -152,8 +152,7 @@ static int __init sr_dev_init(struct omap_hwmod *oh, void *user)  	sr_data->enable_on_init = sr_enable_on_init; -	pdev = omap_device_build(name, i, oh, sr_data, sizeof(*sr_data), -				 NULL, 0, 0); +	pdev = omap_device_build(name, i, oh, sr_data, sizeof(*sr_data));  	if (IS_ERR(pdev))  		pr_warning("%s: Could not build omap_device for %s: %s.\n\n",  			__func__, name, oh->name); diff --git a/arch/arm/mach-omap2/timer.c b/arch/arm/mach-omap2/timer.c index b8ad6e632bb..63e5fb017fd 100644 --- a/arch/arm/mach-omap2/timer.c +++ b/arch/arm/mach-omap2/timer.c @@ -702,8 +702,7 @@ static int __init omap_timer_init(struct omap_hwmod *oh, void *unused)  	pdata->timer_errata = omap_dm_timer_get_errata();  	pdata->get_context_loss_count = omap_pm_get_dev_context_loss_count; -	pdev = omap_device_build(name, id, oh, pdata, sizeof(*pdata), -				 NULL, 0, 0); +	pdev = omap_device_build(name, id, oh, pdata, sizeof(*pdata));  	if (IS_ERR(pdev)) {  		pr_err("%s: Can't build omap_device for %s: %s.\n", diff --git a/arch/arm/mach-omap2/usb-host.c b/arch/arm/mach-omap2/usb-host.c index 2e44e8a2288..99f04deb4c7 100644 --- a/arch/arm/mach-omap2/usb-host.c +++ b/arch/arm/mach-omap2/usb-host.c @@ -42,14 +42,6 @@ static struct usbtll_omap_platform_data		usbtll_data;  static struct ehci_hcd_omap_platform_data	ehci_data;  static struct ohci_hcd_omap_platform_data	ohci_data; -static struct omap_device_pm_latency omap_uhhtll_latency[] = { -	  { -		.deactivate_func = omap_device_idle_hwmods, -		.activate_func	 = omap_device_enable_hwmods, -		.flags = OMAP_DEVICE_LATENCY_AUTO_ADJUST, -	  }, -}; -  /* MUX settings for EHCI pins */  /*   * setup_ehci_io_mux - initialize IO pad mux for USBHOST @@ -530,9 +522,7 @@ void __init usbhs_init(const struct usbhs_omap_board_data *pdata)  	}  	pdev = omap_device_build(OMAP_USBTLL_DEVICE, bus_id, tll_hwm, -				&usbtll_data, sizeof(usbtll_data), -				omap_uhhtll_latency, -				ARRAY_SIZE(omap_uhhtll_latency), false); +				 &usbtll_data, sizeof(usbtll_data));  	if (IS_ERR(pdev)) {  		pr_err("Could not build hwmod device %s\n",  		       USBHS_TLL_HWMODNAME); @@ -540,9 +530,7 @@ void __init usbhs_init(const struct usbhs_omap_board_data *pdata)  	}  	pdev = omap_device_build(OMAP_USBHS_DEVICE, bus_id, uhh_hwm, -				&usbhs_data, sizeof(usbhs_data), -				omap_uhhtll_latency, -				ARRAY_SIZE(omap_uhhtll_latency), false); +				&usbhs_data, sizeof(usbhs_data));  	if (IS_ERR(pdev)) {  		pr_err("Could not build hwmod devices %s\n",  		       USBHS_UHH_HWMODNAME); diff --git a/arch/arm/mach-omap2/usb-musb.c b/arch/arm/mach-omap2/usb-musb.c index 7b33b375fe7..8c4de2708cf 100644 --- a/arch/arm/mach-omap2/usb-musb.c +++ b/arch/arm/mach-omap2/usb-musb.c @@ -102,7 +102,7 @@ void __init usb_musb_init(struct omap_musb_board_data *musb_board_data)                  return;  	pdev = omap_device_build(name, bus_id, oh, &musb_plat, -			       sizeof(musb_plat), NULL, 0, false); +				 sizeof(musb_plat));  	if (IS_ERR(pdev)) {  		pr_err("Could not build omap_device for %s %s\n",  						name, oh_name); diff --git a/arch/arm/mach-omap2/wd_timer.c b/arch/arm/mach-omap2/wd_timer.c index 7c2b4ed38f0..910243f54a0 100644 --- a/arch/arm/mach-omap2/wd_timer.c +++ b/arch/arm/mach-omap2/wd_timer.c @@ -124,8 +124,7 @@ static int __init omap_init_wdt(void)  	pdata.read_reset_sources = prm_read_reset_sources;  	pdev = omap_device_build(dev_name, id, oh, &pdata, -				 sizeof(struct omap_wd_timer_platform_data), -				 NULL, 0, 0); +				 sizeof(struct omap_wd_timer_platform_data));  	WARN(IS_ERR(pdev), "Can't build omap_device for %s:%s.\n",  	     dev_name, oh->name);  	return 0; diff --git a/arch/arm/mach-realview/include/mach/irqs-eb.h b/arch/arm/mach-realview/include/mach/irqs-eb.h index d6b5073692d..44754230fdc 100644 --- a/arch/arm/mach-realview/include/mach/irqs-eb.h +++ b/arch/arm/mach-realview/include/mach/irqs-eb.h @@ -115,7 +115,7 @@  /*   * Only define NR_IRQS if less than NR_IRQS_EB   */ -#define NR_IRQS_EB		(IRQ_EB_GIC_START + 96) +#define NR_IRQS_EB		(IRQ_EB_GIC_START + 128)  #if defined(CONFIG_MACH_REALVIEW_EB) \  	&& (!defined(NR_IRQS) || (NR_IRQS < NR_IRQS_EB)) diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index 076c26d4386..dda3904dc64 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -640,7 +640,7 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,  	if (is_coherent || nommu())  		addr = __alloc_simple_buffer(dev, size, gfp, &page); -	else if (gfp & GFP_ATOMIC) +	else if (!(gfp & __GFP_WAIT))  		addr = __alloc_from_pool(size, &page);  	else if (!IS_ENABLED(CONFIG_CMA))  		addr = __alloc_remap_buffer(dev, size, gfp, prot, &page, caller); diff --git a/arch/arm/plat-omap/Kconfig b/arch/arm/plat-omap/Kconfig index 665870dce3c..1fbac364bd4 100644 --- a/arch/arm/plat-omap/Kconfig +++ b/arch/arm/plat-omap/Kconfig @@ -177,15 +177,6 @@ config OMAP3_L2_AUX_SECURE_SERVICE_SET_ID  	help  	  PPA routine service ID for setting L2 auxiliary control register. -config OMAP_32K_TIMER_HZ -	int "Kernel internal timer frequency for 32KHz timer" -	range 32 1024 -	depends on OMAP_32K_TIMER -	default "128" -	help -	  Kernel internal timer frequency should be a divisor of 32768, -	  such as 64 or 128. -  config OMAP_DM_TIMER  	bool "Use dual-mode timer"  	depends on ARCH_OMAP16XX || ARCH_OMAP2PLUS diff --git a/arch/arm/plat-omap/include/plat/timex.h b/arch/arm/plat-omap/include/plat/timex.h index 6d35767bc48..e27d2daa779 100644 --- a/arch/arm/plat-omap/include/plat/timex.h +++ b/arch/arm/plat-omap/include/plat/timex.h @@ -28,14 +28,6 @@  #if !defined(__ASM_ARCH_OMAP_TIMEX_H)  #define __ASM_ARCH_OMAP_TIMEX_H -/* - * OMAP 32KHz timer updates time one jiffie at a time from a secondary timer, - * and that's why the CLOCK_TICK_RATE is not 32768. - */ -#ifdef CONFIG_OMAP_32K_TIMER -#define CLOCK_TICK_RATE		(CONFIG_OMAP_32K_TIMER_HZ) -#else  #define CLOCK_TICK_RATE		(HZ * 100000UL) -#endif  #endif /* __ASM_ARCH_OMAP_TIMEX_H */ diff --git a/arch/avr32/include/asm/dma-mapping.h b/arch/avr32/include/asm/dma-mapping.h index aaf5199d8fc..b3d18f9f3e8 100644 --- a/arch/avr32/include/asm/dma-mapping.h +++ b/arch/avr32/include/asm/dma-mapping.h @@ -336,4 +336,14 @@ dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,  #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)  #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) +/* drivers/base/dma-mapping.c */ +extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma, +			   void *cpu_addr, dma_addr_t dma_addr, size_t size); +extern int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt, +				  void *cpu_addr, dma_addr_t dma_addr, +				  size_t size); + +#define dma_mmap_coherent(d, v, c, h, s) dma_common_mmap(d, v, c, h, s) +#define dma_get_sgtable(d, t, v, h, s) dma_common_get_sgtable(d, t, v, h, s) +  #endif /* __ASM_AVR32_DMA_MAPPING_H */ diff --git a/arch/blackfin/include/asm/dma-mapping.h b/arch/blackfin/include/asm/dma-mapping.h index bbf461076a0..054d9ec57d9 100644 --- a/arch/blackfin/include/asm/dma-mapping.h +++ b/arch/blackfin/include/asm/dma-mapping.h @@ -154,4 +154,14 @@ dma_cache_sync(struct device *dev, void *vaddr, size_t size,  	_dma_sync((dma_addr_t)vaddr, size, dir);  } +/* drivers/base/dma-mapping.c */ +extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma, +			   void *cpu_addr, dma_addr_t dma_addr, size_t size); +extern int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt, +				  void *cpu_addr, dma_addr_t dma_addr, +				  size_t size); + +#define dma_mmap_coherent(d, v, c, h, s) dma_common_mmap(d, v, c, h, s) +#define dma_get_sgtable(d, t, v, h, s) dma_common_get_sgtable(d, t, v, h, s) +  #endif				/* _BLACKFIN_DMA_MAPPING_H */ diff --git a/arch/c6x/include/asm/dma-mapping.h b/arch/c6x/include/asm/dma-mapping.h index 3c694065030..88bd0d899bd 100644 --- a/arch/c6x/include/asm/dma-mapping.h +++ b/arch/c6x/include/asm/dma-mapping.h @@ -89,4 +89,19 @@ extern void dma_free_coherent(struct device *, size_t, void *, dma_addr_t);  #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent((d), (s), (h), (f))  #define dma_free_noncoherent(d, s, v, h)  dma_free_coherent((d), (s), (v), (h)) +/* Not supported for now */ +static inline int dma_mmap_coherent(struct device *dev, +				    struct vm_area_struct *vma, void *cpu_addr, +				    dma_addr_t dma_addr, size_t size) +{ +	return -EINVAL; +} + +static inline int dma_get_sgtable(struct device *dev, struct sg_table *sgt, +				  void *cpu_addr, dma_addr_t dma_addr, +				  size_t size) +{ +	return -EINVAL; +} +  #endif	/* _ASM_C6X_DMA_MAPPING_H */ diff --git a/arch/cris/include/asm/dma-mapping.h b/arch/cris/include/asm/dma-mapping.h index 8588b2ccf85..2f0f654f1b4 100644 --- a/arch/cris/include/asm/dma-mapping.h +++ b/arch/cris/include/asm/dma-mapping.h @@ -158,5 +158,15 @@ dma_cache_sync(struct device *dev, void *vaddr, size_t size,  {  } +/* drivers/base/dma-mapping.c */ +extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma, +			   void *cpu_addr, dma_addr_t dma_addr, size_t size); +extern int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt, +				  void *cpu_addr, dma_addr_t dma_addr, +				  size_t size); + +#define dma_mmap_coherent(d, v, c, h, s) dma_common_mmap(d, v, c, h, s) +#define dma_get_sgtable(d, t, v, h, s) dma_common_get_sgtable(d, t, v, h, s) +  #endif diff --git a/arch/frv/include/asm/dma-mapping.h b/arch/frv/include/asm/dma-mapping.h index dfb811002c6..1746a2b8e6e 100644 --- a/arch/frv/include/asm/dma-mapping.h +++ b/arch/frv/include/asm/dma-mapping.h @@ -132,4 +132,19 @@ void dma_cache_sync(struct device *dev, void *vaddr, size_t size,  	flush_write_buffers();  } +/* Not supported for now */ +static inline int dma_mmap_coherent(struct device *dev, +				    struct vm_area_struct *vma, void *cpu_addr, +				    dma_addr_t dma_addr, size_t size) +{ +	return -EINVAL; +} + +static inline int dma_get_sgtable(struct device *dev, struct sg_table *sgt, +				  void *cpu_addr, dma_addr_t dma_addr, +				  size_t size) +{ +	return -EINVAL; +} +  #endif  /* _ASM_DMA_MAPPING_H */ diff --git a/arch/m68k/include/asm/dma-mapping.h b/arch/m68k/include/asm/dma-mapping.h index 3e6b8445af6..292805f0762 100644 --- a/arch/m68k/include/asm/dma-mapping.h +++ b/arch/m68k/include/asm/dma-mapping.h @@ -115,4 +115,14 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t handle)  #include <asm-generic/dma-mapping-broken.h>  #endif +/* drivers/base/dma-mapping.c */ +extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma, +			   void *cpu_addr, dma_addr_t dma_addr, size_t size); +extern int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt, +				  void *cpu_addr, dma_addr_t dma_addr, +				  size_t size); + +#define dma_mmap_coherent(d, v, c, h, s) dma_common_mmap(d, v, c, h, s) +#define dma_get_sgtable(d, t, v, h, s) dma_common_get_sgtable(d, t, v, h, s) +  #endif  /* _M68K_DMA_MAPPING_H */ diff --git a/arch/mn10300/include/asm/dma-mapping.h b/arch/mn10300/include/asm/dma-mapping.h index c1be4397b1e..a18abfc558e 100644 --- a/arch/mn10300/include/asm/dma-mapping.h +++ b/arch/mn10300/include/asm/dma-mapping.h @@ -168,4 +168,19 @@ void dma_cache_sync(void *vaddr, size_t size,  	mn10300_dcache_flush_inv();  } +/* Not supported for now */ +static inline int dma_mmap_coherent(struct device *dev, +				    struct vm_area_struct *vma, void *cpu_addr, +				    dma_addr_t dma_addr, size_t size) +{ +	return -EINVAL; +} + +static inline int dma_get_sgtable(struct device *dev, struct sg_table *sgt, +				  void *cpu_addr, dma_addr_t dma_addr, +				  size_t size) +{ +	return -EINVAL; +} +  #endif diff --git a/arch/parisc/include/asm/dma-mapping.h b/arch/parisc/include/asm/dma-mapping.h index 467bbd510ea..106b395688e 100644 --- a/arch/parisc/include/asm/dma-mapping.h +++ b/arch/parisc/include/asm/dma-mapping.h @@ -238,4 +238,19 @@ void * sba_get_iommu(struct parisc_device *dev);  /* At the moment, we panic on error for IOMMU resource exaustion */  #define dma_mapping_error(dev, x)	0 +/* This API cannot be supported on PA-RISC */ +static inline int dma_mmap_coherent(struct device *dev, +				    struct vm_area_struct *vma, void *cpu_addr, +				    dma_addr_t dma_addr, size_t size) +{ +	return -EINVAL; +} + +static inline int dma_get_sgtable(struct device *dev, struct sg_table *sgt, +				  void *cpu_addr, dma_addr_t dma_addr, +				  size_t size) +{ +	return -EINVAL; +} +  #endif diff --git a/arch/powerpc/mm/hash_low_64.S b/arch/powerpc/mm/hash_low_64.S index 56585086413..7443481a315 100644 --- a/arch/powerpc/mm/hash_low_64.S +++ b/arch/powerpc/mm/hash_low_64.S @@ -115,11 +115,13 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)  	sldi	r29,r5,SID_SHIFT - VPN_SHIFT  	rldicl  r28,r3,64 - VPN_SHIFT,64 - (SID_SHIFT - VPN_SHIFT)  	or	r29,r28,r29 - -	/* Calculate hash value for primary slot and store it in r28 */ -	rldicl	r5,r5,0,25		/* vsid & 0x0000007fffffffff */ -	rldicl	r0,r3,64-12,48		/* (ea >> 12) & 0xffff */ -	xor	r28,r5,r0 +	/* +	 * Calculate hash value for primary slot and store it in r28 +	 * r3 = va, r5 = vsid +	 * r0 = (va >> 12) & ((1ul << (28 - 12)) -1) +	 */ +	rldicl	r0,r3,64-12,48 +	xor	r28,r5,r0		/* hash */  	b	4f  3:	/* Calc vpn and put it in r29 */ @@ -130,11 +132,12 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)  	/*  	 * calculate hash value for primary slot and  	 * store it in r28 for 1T segment +	 * r3 = va, r5 = vsid  	 */ -	rldic	r28,r5,25,25		/* (vsid << 25) & 0x7fffffffff */ -	clrldi	r5,r5,40		/* vsid & 0xffffff */ -	rldicl	r0,r3,64-12,36		/* (ea >> 12) & 0xfffffff */ -	xor	r28,r28,r5 +	sldi	r28,r5,25		/* vsid << 25 */ +	/* r0 =  (va >> 12) & ((1ul << (40 - 12)) -1) */ +	rldicl	r0,r3,64-12,36 +	xor	r28,r28,r5		/* vsid ^ ( vsid << 25) */  	xor	r28,r28,r0		/* hash */  	/* Convert linux PTE bits into HW equivalents */ @@ -407,11 +410,13 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)  	 */  	rldicl  r28,r3,64 - VPN_SHIFT,64 - (SID_SHIFT - VPN_SHIFT)  	or	r29,r28,r29 - -	/* Calculate hash value for primary slot and store it in r28 */ -	rldicl	r5,r5,0,25		/* vsid & 0x0000007fffffffff */ -	rldicl	r0,r3,64-12,48		/* (ea >> 12) & 0xffff */ -	xor	r28,r5,r0 +	/* +	 * Calculate hash value for primary slot and store it in r28 +	 * r3 = va, r5 = vsid +	 * r0 = (va >> 12) & ((1ul << (28 - 12)) -1) +	 */ +	rldicl	r0,r3,64-12,48 +	xor	r28,r5,r0		/* hash */  	b	4f  3:	/* Calc vpn and put it in r29 */ @@ -426,11 +431,12 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)  	/*  	 * Calculate hash value for primary slot and  	 * store it in r28  for 1T segment +	 * r3 = va, r5 = vsid  	 */ -	rldic	r28,r5,25,25		/* (vsid << 25) & 0x7fffffffff */ -	clrldi	r5,r5,40		/* vsid & 0xffffff */ -	rldicl	r0,r3,64-12,36		/* (ea >> 12) & 0xfffffff */ -	xor	r28,r28,r5 +	sldi	r28,r5,25		/* vsid << 25 */ +	/* r0 = (va >> 12) & ((1ul << (40 - 12)) -1) */ +	rldicl	r0,r3,64-12,36 +	xor	r28,r28,r5		/* vsid ^ ( vsid << 25) */  	xor	r28,r28,r0		/* hash */  	/* Convert linux PTE bits into HW equivalents */ @@ -752,25 +758,27 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)  	rldicl  r28,r3,64 - VPN_SHIFT,64 - (SID_SHIFT - VPN_SHIFT)  	or	r29,r28,r29 -	/* Calculate hash value for primary slot and store it in r28 */ -	rldicl	r5,r5,0,25		/* vsid & 0x0000007fffffffff */ -	rldicl	r0,r3,64-16,52		/* (ea >> 16) & 0xfff */ -	xor	r28,r5,r0 +	/* Calculate hash value for primary slot and store it in r28 +	 * r3 = va, r5 = vsid +	 * r0 = (va >> 16) & ((1ul << (28 - 16)) -1) +	 */ +	rldicl	r0,r3,64-16,52 +	xor	r28,r5,r0		/* hash */  	b	4f  3:	/* Calc vpn and put it in r29 */  	sldi	r29,r5,SID_SHIFT_1T - VPN_SHIFT  	rldicl  r28,r3,64 - VPN_SHIFT,64 - (SID_SHIFT_1T - VPN_SHIFT)  	or	r29,r28,r29 -  	/*  	 * calculate hash value for primary slot and  	 * store it in r28 for 1T segment +	 * r3 = va, r5 = vsid  	 */ -	rldic	r28,r5,25,25		/* (vsid << 25) & 0x7fffffffff */ -	clrldi	r5,r5,40		/* vsid & 0xffffff */ -	rldicl	r0,r3,64-16,40		/* (ea >> 16) & 0xffffff */ -	xor	r28,r28,r5 +	sldi	r28,r5,25		/* vsid << 25 */ +	/* r0 = (va >> 16) & ((1ul << (40 - 16)) -1) */ +	rldicl	r0,r3,64-16,40 +	xor	r28,r28,r5		/* vsid ^ ( vsid << 25) */  	xor	r28,r28,r0		/* hash */  	/* Convert linux PTE bits into HW equivalents */ diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S index 102ff7cb3e4..142c4ceff11 100644 --- a/arch/x86/ia32/ia32entry.S +++ b/arch/x86/ia32/ia32entry.S @@ -207,7 +207,7 @@ sysexit_from_sys_call:  	testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)  	jnz ia32_ret_from_sys_call  	TRACE_IRQS_ON -	sti +	ENABLE_INTERRUPTS(CLBR_NONE)  	movl %eax,%esi		/* second arg, syscall return value */  	cmpl $-MAX_ERRNO,%eax	/* is it an error ? */  	jbe 1f @@ -217,7 +217,7 @@ sysexit_from_sys_call:  	call __audit_syscall_exit  	movq RAX-ARGOFFSET(%rsp),%rax	/* reload syscall return value */  	movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi -	cli +	DISABLE_INTERRUPTS(CLBR_NONE)  	TRACE_IRQS_OFF  	testl %edi,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)  	jz \exit diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c index fe9edec6698..84c1309c4c0 100644 --- a/arch/x86/kernel/cpu/intel_cacheinfo.c +++ b/arch/x86/kernel/cpu/intel_cacheinfo.c @@ -298,8 +298,7 @@ struct _cache_attr {  			 unsigned int);  }; -#ifdef CONFIG_AMD_NB - +#if defined(CONFIG_AMD_NB) && defined(CONFIG_SYSFS)  /*   * L3 cache descriptors   */ @@ -524,9 +523,9 @@ store_subcaches(struct _cpuid4_info *this_leaf, const char *buf, size_t count,  static struct _cache_attr subcaches =  	__ATTR(subcaches, 0644, show_subcaches, store_subcaches); -#else	/* CONFIG_AMD_NB */ +#else  #define amd_init_l3_cache(x, y) -#endif /* CONFIG_AMD_NB */ +#endif  /* CONFIG_AMD_NB && CONFIG_SYSFS */  static int  __cpuinit cpuid4_cache_lookup_regs(int index, diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c index 93b9e1181f8..4914e94ad6e 100644 --- a/arch/x86/kernel/cpu/perf_event_intel.c +++ b/arch/x86/kernel/cpu/perf_event_intel.c @@ -2019,7 +2019,10 @@ __init int intel_pmu_init(void)  		break;  	case 28: /* Atom */ -	case 54: /* Cedariew */ +	case 38: /* Lincroft */ +	case 39: /* Penwell */ +	case 53: /* Cloverview */ +	case 54: /* Cedarview */  		memcpy(hw_cache_event_ids, atom_hw_cache_event_ids,  		       sizeof(hw_cache_event_ids)); @@ -2084,6 +2087,7 @@ __init int intel_pmu_init(void)  		pr_cont("SandyBridge events, ");  		break;  	case 58: /* IvyBridge */ +	case 62: /* IvyBridge EP */  		memcpy(hw_cache_event_ids, snb_hw_cache_event_ids,  		       sizeof(hw_cache_event_ids));  		memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs, diff --git a/arch/x86/kernel/cpu/perf_event_p6.c b/arch/x86/kernel/cpu/perf_event_p6.c index f2af39f5dc3..4820c232a0b 100644 --- a/arch/x86/kernel/cpu/perf_event_p6.c +++ b/arch/x86/kernel/cpu/perf_event_p6.c @@ -19,7 +19,7 @@ static const u64 p6_perfmon_event_map[] =  }; -static __initconst u64 p6_hw_cache_event_ids +static u64 p6_hw_cache_event_ids  				[PERF_COUNT_HW_CACHE_MAX]  				[PERF_COUNT_HW_CACHE_OP_MAX]  				[PERF_COUNT_HW_CACHE_RESULT_MAX] = diff --git a/arch/x86/tools/insn_sanity.c b/arch/x86/tools/insn_sanity.c index cc2f8c13128..872eb60e780 100644 --- a/arch/x86/tools/insn_sanity.c +++ b/arch/x86/tools/insn_sanity.c @@ -55,7 +55,7 @@ static FILE		*input_file;	/* Input file name */  static void usage(const char *err)  {  	if (err) -		fprintf(stderr, "Error: %s\n\n", err); +		fprintf(stderr, "%s: Error: %s\n\n", prog, err);  	fprintf(stderr, "Usage: %s [-y|-n|-v] [-s seed[,no]] [-m max] [-i input]\n", prog);  	fprintf(stderr, "\t-y	64bit mode\n");  	fprintf(stderr, "\t-n	32bit mode\n"); @@ -269,7 +269,13 @@ int main(int argc, char **argv)  		insns++;  	} -	fprintf(stdout, "%s: decoded and checked %d %s instructions with %d errors (seed:0x%x)\n", (errors) ? "Failure" : "Success", insns, (input_file) ? "given" : "random", errors, seed); +	fprintf(stdout, "%s: %s: decoded and checked %d %s instructions with %d errors (seed:0x%x)\n", +		prog, +		(errors) ? "Failure" : "Success", +		insns, +		(input_file) ? "given" : "random", +		errors, +		seed);  	return errors ? 1 : 0;  } diff --git a/arch/xtensa/include/asm/dma-mapping.h b/arch/xtensa/include/asm/dma-mapping.h index 4acb5feba1f..172a02a6ad1 100644 --- a/arch/xtensa/include/asm/dma-mapping.h +++ b/arch/xtensa/include/asm/dma-mapping.h @@ -170,4 +170,19 @@ dma_cache_sync(struct device *dev, void *vaddr, size_t size,  	consistent_sync(vaddr, size, direction);  } +/* Not supported for now */ +static inline int dma_mmap_coherent(struct device *dev, +				    struct vm_area_struct *vma, void *cpu_addr, +				    dma_addr_t dma_addr, size_t size) +{ +	return -EINVAL; +} + +static inline int dma_get_sgtable(struct device *dev, struct sg_table *sgt, +				  void *cpu_addr, dma_addr_t dma_addr, +				  size_t size) +{ +	return -EINVAL; +} +  #endif	/* _XTENSA_DMA_MAPPING_H */ diff --git a/block/genhd.c b/block/genhd.c index 9a289d7c84b..3993ebf4135 100644 --- a/block/genhd.c +++ b/block/genhd.c @@ -35,6 +35,8 @@ static DEFINE_IDR(ext_devt_idr);  static struct device_type disk_type; +static void disk_check_events(struct disk_events *ev, +			      unsigned int *clearing_ptr);  static void disk_alloc_events(struct gendisk *disk);  static void disk_add_events(struct gendisk *disk);  static void disk_del_events(struct gendisk *disk); @@ -1549,6 +1551,7 @@ unsigned int disk_clear_events(struct gendisk *disk, unsigned int mask)  	const struct block_device_operations *bdops = disk->fops;  	struct disk_events *ev = disk->ev;  	unsigned int pending; +	unsigned int clearing = mask;  	if (!ev) {  		/* for drivers still using the old ->media_changed method */ @@ -1558,34 +1561,53 @@ unsigned int disk_clear_events(struct gendisk *disk, unsigned int mask)  		return 0;  	} -	/* tell the workfn about the events being cleared */ +	disk_block_events(disk); + +	/* +	 * store the union of mask and ev->clearing on the stack so that the +	 * race with disk_flush_events does not cause ambiguity (ev->clearing +	 * can still be modified even if events are blocked). +	 */  	spin_lock_irq(&ev->lock); -	ev->clearing |= mask; +	clearing |= ev->clearing; +	ev->clearing = 0;  	spin_unlock_irq(&ev->lock); -	/* uncondtionally schedule event check and wait for it to finish */ -	disk_block_events(disk); -	queue_delayed_work(system_freezable_wq, &ev->dwork, 0); -	flush_delayed_work(&ev->dwork); -	__disk_unblock_events(disk, false); +	disk_check_events(ev, &clearing); +	/* +	 * if ev->clearing is not 0, the disk_flush_events got called in the +	 * middle of this function, so we want to run the workfn without delay. +	 */ +	__disk_unblock_events(disk, ev->clearing ? true : false);  	/* then, fetch and clear pending events */  	spin_lock_irq(&ev->lock); -	WARN_ON_ONCE(ev->clearing & mask);	/* cleared by workfn */  	pending = ev->pending & mask;  	ev->pending &= ~mask;  	spin_unlock_irq(&ev->lock); +	WARN_ON_ONCE(clearing & mask);  	return pending;  } +/* + * Separate this part out so that a different pointer for clearing_ptr can be + * passed in for disk_clear_events. + */  static void disk_events_workfn(struct work_struct *work)  {  	struct delayed_work *dwork = to_delayed_work(work);  	struct disk_events *ev = container_of(dwork, struct disk_events, dwork); + +	disk_check_events(ev, &ev->clearing); +} + +static void disk_check_events(struct disk_events *ev, +			      unsigned int *clearing_ptr) +{  	struct gendisk *disk = ev->disk;  	char *envp[ARRAY_SIZE(disk_uevents) + 1] = { }; -	unsigned int clearing = ev->clearing; +	unsigned int clearing = *clearing_ptr;  	unsigned int events;  	unsigned long intv;  	int nr_events = 0, i; @@ -1598,7 +1620,7 @@ static void disk_events_workfn(struct work_struct *work)  	events &= ~ev->pending;  	ev->pending |= events; -	ev->clearing &= ~clearing; +	*clearing_ptr &= ~clearing;  	intv = disk_events_poll_jiffies(disk);  	if (!ev->block && intv) diff --git a/drivers/atm/iphase.h b/drivers/atm/iphase.h index 6a0955e6d4f..53ecac5a216 100644 --- a/drivers/atm/iphase.h +++ b/drivers/atm/iphase.h @@ -636,82 +636,82 @@ struct rx_buf_desc {  #define SEG_BASE IPHASE5575_FRAG_CONTROL_REG_BASE    #define REASS_BASE IPHASE5575_REASS_CONTROL_REG_BASE   -typedef volatile u_int  freg_t; +typedef volatile u_int	ffreg_t;  typedef u_int   rreg_t;  typedef struct _ffredn_t { -        freg_t  idlehead_high;  /* Idle cell header (high)              */ -        freg_t  idlehead_low;   /* Idle cell header (low)               */ -        freg_t  maxrate;        /* Maximum rate                         */ -        freg_t  stparms;        /* Traffic Management Parameters        */ -        freg_t  abrubr_abr;     /* ABRUBR Priority Byte 1, TCR Byte 0   */ -        freg_t  rm_type;        /*                                      */ -        u_int   filler5[0x17 - 0x06]; -        freg_t  cmd_reg;        /* Command register                     */ -        u_int   filler18[0x20 - 0x18]; -        freg_t  cbr_base;       /* CBR Pointer Base                     */ -        freg_t  vbr_base;       /* VBR Pointer Base                     */ -        freg_t  abr_base;       /* ABR Pointer Base                     */ -        freg_t  ubr_base;       /* UBR Pointer Base                     */ -        u_int   filler24; -        freg_t  vbrwq_base;     /* VBR Wait Queue Base                  */ -        freg_t  abrwq_base;     /* ABR Wait Queue Base                  */ -        freg_t  ubrwq_base;     /* UBR Wait Queue Base                  */ -        freg_t  vct_base;       /* Main VC Table Base                   */ -        freg_t  vcte_base;      /* Extended Main VC Table Base          */ -        u_int   filler2a[0x2C - 0x2A]; -        freg_t  cbr_tab_beg;    /* CBR Table Begin                      */ -        freg_t  cbr_tab_end;    /* CBR Table End                        */ -        freg_t  cbr_pointer;    /* CBR Pointer                          */ -        u_int   filler2f[0x30 - 0x2F]; -        freg_t  prq_st_adr;     /* Packet Ready Queue Start Address     */ -        freg_t  prq_ed_adr;     /* Packet Ready Queue End Address       */ -        freg_t  prq_rd_ptr;     /* Packet Ready Queue read pointer      */ -        freg_t  prq_wr_ptr;     /* Packet Ready Queue write pointer     */ -        freg_t  tcq_st_adr;     /* Transmit Complete Queue Start Address*/ -        freg_t  tcq_ed_adr;     /* Transmit Complete Queue End Address  */ -        freg_t  tcq_rd_ptr;     /* Transmit Complete Queue read pointer */ -        freg_t  tcq_wr_ptr;     /* Transmit Complete Queue write pointer*/ -        u_int   filler38[0x40 - 0x38]; -        freg_t  queue_base;     /* Base address for PRQ and TCQ         */ -        freg_t  desc_base;      /* Base address of descriptor table     */ -        u_int   filler42[0x45 - 0x42]; -        freg_t  mode_reg_0;     /* Mode register 0                      */ -        freg_t  mode_reg_1;     /* Mode register 1                      */ -        freg_t  intr_status_reg;/* Interrupt Status register            */ -        freg_t  mask_reg;       /* Mask Register                        */ -        freg_t  cell_ctr_high1; /* Total cell transfer count (high)     */ -        freg_t  cell_ctr_lo1;   /* Total cell transfer count (low)      */ -        freg_t  state_reg;      /* Status register                      */ -        u_int   filler4c[0x58 - 0x4c]; -        freg_t  curr_desc_num;  /* Contains the current descriptor num  */ -        freg_t  next_desc;      /* Next descriptor                      */ -        freg_t  next_vc;        /* Next VC                              */ -        u_int   filler5b[0x5d - 0x5b]; -        freg_t  present_slot_cnt;/* Present slot count                  */ -        u_int   filler5e[0x6a - 0x5e]; -        freg_t  new_desc_num;   /* New descriptor number                */ -        freg_t  new_vc;         /* New VC                               */ -        freg_t  sched_tbl_ptr;  /* Schedule table pointer               */ -        freg_t  vbrwq_wptr;     /* VBR wait queue write pointer         */ -        freg_t  vbrwq_rptr;     /* VBR wait queue read pointer          */ -        freg_t  abrwq_wptr;     /* ABR wait queue write pointer         */ -        freg_t  abrwq_rptr;     /* ABR wait queue read pointer          */ -        freg_t  ubrwq_wptr;     /* UBR wait queue write pointer         */ -        freg_t  ubrwq_rptr;     /* UBR wait queue read pointer          */ -        freg_t  cbr_vc;         /* CBR VC                               */ -        freg_t  vbr_sb_vc;      /* VBR SB VC                            */ -        freg_t  abr_sb_vc;      /* ABR SB VC                            */ -        freg_t  ubr_sb_vc;      /* UBR SB VC                            */ -        freg_t  vbr_next_link;  /* VBR next link                        */ -        freg_t  abr_next_link;  /* ABR next link                        */ -        freg_t  ubr_next_link;  /* UBR next link                        */ -        u_int   filler7a[0x7c-0x7a]; -        freg_t  out_rate_head;  /* Out of rate head                     */ -        u_int   filler7d[0xca-0x7d]; /* pad out to full address space   */ -        freg_t  cell_ctr_high1_nc;/* Total cell transfer count (high)   */ -        freg_t  cell_ctr_lo1_nc;/* Total cell transfer count (low)      */ -        u_int   fillercc[0x100-0xcc]; /* pad out to full address space   */ +	ffreg_t	idlehead_high;	/* Idle cell header (high)		*/ +	ffreg_t	idlehead_low;	/* Idle cell header (low)		*/ +	ffreg_t	maxrate;	/* Maximum rate				*/ +	ffreg_t	stparms;	/* Traffic Management Parameters	*/ +	ffreg_t	abrubr_abr;	/* ABRUBR Priority Byte 1, TCR Byte 0	*/ +	ffreg_t	rm_type;	/*					*/ +	u_int	filler5[0x17 - 0x06]; +	ffreg_t	cmd_reg;	/* Command register			*/ +	u_int	filler18[0x20 - 0x18]; +	ffreg_t	cbr_base;	/* CBR Pointer Base			*/ +	ffreg_t	vbr_base;	/* VBR Pointer Base			*/ +	ffreg_t	abr_base;	/* ABR Pointer Base			*/ +	ffreg_t	ubr_base;	/* UBR Pointer Base			*/ +	u_int	filler24; +	ffreg_t	vbrwq_base;	/* VBR Wait Queue Base			*/ +	ffreg_t	abrwq_base;	/* ABR Wait Queue Base			*/ +	ffreg_t	ubrwq_base;	/* UBR Wait Queue Base			*/ +	ffreg_t	vct_base;	/* Main VC Table Base			*/ +	ffreg_t	vcte_base;	/* Extended Main VC Table Base		*/ +	u_int	filler2a[0x2C - 0x2A]; +	ffreg_t	cbr_tab_beg;	/* CBR Table Begin			*/ +	ffreg_t	cbr_tab_end;	/* CBR Table End			*/ +	ffreg_t	cbr_pointer;	/* CBR Pointer				*/ +	u_int	filler2f[0x30 - 0x2F]; +	ffreg_t	prq_st_adr;	/* Packet Ready Queue Start Address	*/ +	ffreg_t	prq_ed_adr;	/* Packet Ready Queue End Address	*/ +	ffreg_t	prq_rd_ptr;	/* Packet Ready Queue read pointer	*/ +	ffreg_t	prq_wr_ptr;	/* Packet Ready Queue write pointer	*/ +	ffreg_t	tcq_st_adr;	/* Transmit Complete Queue Start Address*/ +	ffreg_t	tcq_ed_adr;	/* Transmit Complete Queue End Address	*/ +	ffreg_t	tcq_rd_ptr;	/* Transmit Complete Queue read pointer */ +	ffreg_t	tcq_wr_ptr;	/* Transmit Complete Queue write pointer*/ +	u_int	filler38[0x40 - 0x38]; +	ffreg_t	queue_base;	/* Base address for PRQ and TCQ		*/ +	ffreg_t	desc_base;	/* Base address of descriptor table	*/ +	u_int	filler42[0x45 - 0x42]; +	ffreg_t	mode_reg_0;	/* Mode register 0			*/ +	ffreg_t	mode_reg_1;	/* Mode register 1			*/ +	ffreg_t	intr_status_reg;/* Interrupt Status register		*/ +	ffreg_t	mask_reg;	/* Mask Register			*/ +	ffreg_t	cell_ctr_high1; /* Total cell transfer count (high)	*/ +	ffreg_t	cell_ctr_lo1;	/* Total cell transfer count (low)	*/ +	ffreg_t	state_reg;	/* Status register			*/ +	u_int	filler4c[0x58 - 0x4c]; +	ffreg_t	curr_desc_num;	/* Contains the current descriptor num	*/ +	ffreg_t	next_desc;	/* Next descriptor			*/ +	ffreg_t	next_vc;	/* Next VC				*/ +	u_int	filler5b[0x5d - 0x5b]; +	ffreg_t	present_slot_cnt;/* Present slot count			*/ +	u_int	filler5e[0x6a - 0x5e]; +	ffreg_t	new_desc_num;	/* New descriptor number		*/ +	ffreg_t	new_vc;		/* New VC				*/ +	ffreg_t	sched_tbl_ptr;	/* Schedule table pointer		*/ +	ffreg_t	vbrwq_wptr;	/* VBR wait queue write pointer		*/ +	ffreg_t	vbrwq_rptr;	/* VBR wait queue read pointer		*/ +	ffreg_t	abrwq_wptr;	/* ABR wait queue write pointer		*/ +	ffreg_t	abrwq_rptr;	/* ABR wait queue read pointer		*/ +	ffreg_t	ubrwq_wptr;	/* UBR wait queue write pointer		*/ +	ffreg_t	ubrwq_rptr;	/* UBR wait queue read pointer		*/ +	ffreg_t	cbr_vc;		/* CBR VC				*/ +	ffreg_t	vbr_sb_vc;	/* VBR SB VC				*/ +	ffreg_t	abr_sb_vc;	/* ABR SB VC				*/ +	ffreg_t	ubr_sb_vc;	/* UBR SB VC				*/ +	ffreg_t	vbr_next_link;	/* VBR next link			*/ +	ffreg_t	abr_next_link;	/* ABR next link			*/ +	ffreg_t	ubr_next_link;	/* UBR next link			*/ +	u_int	filler7a[0x7c-0x7a]; +	ffreg_t	out_rate_head;	/* Out of rate head			*/ +	u_int	filler7d[0xca-0x7d]; /* pad out to full address space	*/ +	ffreg_t	cell_ctr_high1_nc;/* Total cell transfer count (high)	*/ +	ffreg_t	cell_ctr_lo1_nc;/* Total cell transfer count (low)	*/ +	u_int	fillercc[0x100-0xcc]; /* pad out to full address space	 */  } ffredn_t;  typedef struct _rfredn_t { diff --git a/drivers/bcma/bcma_private.h b/drivers/bcma/bcma_private.h index 19e3fbfd575..cb0c4548857 100644 --- a/drivers/bcma/bcma_private.h +++ b/drivers/bcma/bcma_private.h @@ -94,11 +94,16 @@ void bcma_core_pci_hostmode_init(struct bcma_drv_pci *pc);  #ifdef CONFIG_BCMA_DRIVER_GPIO  /* driver_gpio.c */  int bcma_gpio_init(struct bcma_drv_cc *cc); +int bcma_gpio_unregister(struct bcma_drv_cc *cc);  #else  static inline int bcma_gpio_init(struct bcma_drv_cc *cc)  {  	return -ENOTSUPP;  } +static inline int bcma_gpio_unregister(struct bcma_drv_cc *cc) +{ +	return 0; +}  #endif /* CONFIG_BCMA_DRIVER_GPIO */  #endif diff --git a/drivers/bcma/driver_chipcommon_nflash.c b/drivers/bcma/driver_chipcommon_nflash.c index dbda91e4dff..1f0b83e18f6 100644 --- a/drivers/bcma/driver_chipcommon_nflash.c +++ b/drivers/bcma/driver_chipcommon_nflash.c @@ -21,7 +21,7 @@ int bcma_nflash_init(struct bcma_drv_cc *cc)  	struct bcma_bus *bus = cc->core->bus;  	if (bus->chipinfo.id != BCMA_CHIP_ID_BCM4706 && -	    cc->core->id.rev != 0x38) { +	    cc->core->id.rev != 38) {  		bcma_err(bus, "NAND flash on unsupported board!\n");  		return -ENOTSUPP;  	} diff --git a/drivers/bcma/driver_gpio.c b/drivers/bcma/driver_gpio.c index 9a6f585da2d..71f755c06fc 100644 --- a/drivers/bcma/driver_gpio.c +++ b/drivers/bcma/driver_gpio.c @@ -96,3 +96,8 @@ int bcma_gpio_init(struct bcma_drv_cc *cc)  	return gpiochip_add(chip);  } + +int bcma_gpio_unregister(struct bcma_drv_cc *cc) +{ +	return gpiochip_remove(&cc->gpio); +} diff --git a/drivers/bcma/main.c b/drivers/bcma/main.c index 4a92f647b58..324f9debda8 100644 --- a/drivers/bcma/main.c +++ b/drivers/bcma/main.c @@ -268,6 +268,13 @@ int bcma_bus_register(struct bcma_bus *bus)  void bcma_bus_unregister(struct bcma_bus *bus)  {  	struct bcma_device *cores[3]; +	int err; + +	err = bcma_gpio_unregister(&bus->drv_cc); +	if (err == -EBUSY) +		bcma_err(bus, "Some GPIOs are still in use.\n"); +	else if (err) +		bcma_err(bus, "Can not unregister GPIO driver: %i\n", err);  	cores[0] = bcma_find_core(bus, BCMA_CORE_MIPS_74K);  	cores[1] = bcma_find_core(bus, BCMA_CORE_PCIE); diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c index f58a4a4b4df..2b8303ad63c 100644 --- a/drivers/block/drbd/drbd_req.c +++ b/drivers/block/drbd/drbd_req.c @@ -168,7 +168,7 @@ static void wake_all_senders(struct drbd_tconn *tconn) {  }  /* must hold resource->req_lock */ -static void start_new_tl_epoch(struct drbd_tconn *tconn) +void start_new_tl_epoch(struct drbd_tconn *tconn)  {  	/* no point closing an epoch, if it is empty, anyways. */  	if (tconn->current_tle_writes == 0) diff --git a/drivers/block/drbd/drbd_req.h b/drivers/block/drbd/drbd_req.h index 016de6b8bb5..c08d22964d0 100644 --- a/drivers/block/drbd/drbd_req.h +++ b/drivers/block/drbd/drbd_req.h @@ -267,6 +267,7 @@ struct bio_and_error {  	int error;  }; +extern void start_new_tl_epoch(struct drbd_tconn *tconn);  extern void drbd_req_destroy(struct kref *kref);  extern void _req_may_be_done(struct drbd_request *req,  		struct bio_and_error *m); diff --git a/drivers/block/drbd/drbd_state.c b/drivers/block/drbd/drbd_state.c index 53bf6182bac..0fe220cfb9e 100644 --- a/drivers/block/drbd/drbd_state.c +++ b/drivers/block/drbd/drbd_state.c @@ -931,6 +931,7 @@ __drbd_set_state(struct drbd_conf *mdev, union drbd_state ns,  	enum drbd_state_rv rv = SS_SUCCESS;  	enum sanitize_state_warnings ssw;  	struct after_state_chg_work *ascw; +	bool did_remote, should_do_remote;  	os = drbd_read_state(mdev); @@ -981,11 +982,17 @@ __drbd_set_state(struct drbd_conf *mdev, union drbd_state ns,  	    (os.disk != D_DISKLESS && ns.disk == D_DISKLESS))  		atomic_inc(&mdev->local_cnt); +	did_remote = drbd_should_do_remote(mdev->state);  	mdev->state.i = ns.i; +	should_do_remote = drbd_should_do_remote(mdev->state);  	mdev->tconn->susp = ns.susp;  	mdev->tconn->susp_nod = ns.susp_nod;  	mdev->tconn->susp_fen = ns.susp_fen; +	/* put replicated vs not-replicated requests in seperate epochs */ +	if (did_remote != should_do_remote) +		start_new_tl_epoch(mdev->tconn); +  	if (os.disk == D_ATTACHING && ns.disk >= D_NEGOTIATING)  		drbd_print_uuids(mdev, "attached to UUIDs"); diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c index 9694dd99bbb..3fd10099045 100644 --- a/drivers/block/mtip32xx/mtip32xx.c +++ b/drivers/block/mtip32xx/mtip32xx.c @@ -626,12 +626,13 @@ static void mtip_timeout_function(unsigned long int data)  		}  	} -	if (cmdto_cnt && !test_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags)) { +	if (cmdto_cnt) {  		print_tags(port->dd, "timed out", tagaccum, cmdto_cnt); - -		mtip_restart_port(port); +		if (!test_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags)) { +			mtip_restart_port(port); +			wake_up_interruptible(&port->svc_wait); +		}  		clear_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags); -		wake_up_interruptible(&port->svc_wait);  	}  	if (port->ic_pause_timer) { @@ -3887,7 +3888,12 @@ static int mtip_block_remove(struct driver_data *dd)  	 * Delete our gendisk structure. This also removes the device  	 * from /dev  	 */ -	del_gendisk(dd->disk); +	if (dd->disk) { +		if (dd->disk->queue) +			del_gendisk(dd->disk); +		else +			put_disk(dd->disk); +	}  	spin_lock(&rssd_index_lock);  	ida_remove(&rssd_index_ida, dd->index); @@ -3921,7 +3927,13 @@ static int mtip_block_shutdown(struct driver_data *dd)  		"Shutting down %s ...\n", dd->disk->disk_name);  	/* Delete our gendisk structure, and cleanup the blk queue. */ -	del_gendisk(dd->disk); +	if (dd->disk) { +		if (dd->disk->queue) +			del_gendisk(dd->disk); +		else +			put_disk(dd->disk); +	} +  	spin_lock(&rssd_index_lock);  	ida_remove(&rssd_index_ida, dd->index); diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c index 74374fb762a..5ac841ff6cc 100644 --- a/drivers/block/xen-blkback/blkback.c +++ b/drivers/block/xen-blkback/blkback.c @@ -161,10 +161,12 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif,  static void make_response(struct xen_blkif *blkif, u64 id,  			  unsigned short op, int st); -#define foreach_grant(pos, rbtree, node) \ -	for ((pos) = container_of(rb_first((rbtree)), typeof(*(pos)), node); \ +#define foreach_grant_safe(pos, n, rbtree, node) \ +	for ((pos) = container_of(rb_first((rbtree)), typeof(*(pos)), node), \ +	     (n) = rb_next(&(pos)->node); \  	     &(pos)->node != NULL; \ -	     (pos) = container_of(rb_next(&(pos)->node), typeof(*(pos)), node)) +	     (pos) = container_of(n, typeof(*(pos)), node), \ +	     (n) = (&(pos)->node != NULL) ? rb_next(&(pos)->node) : NULL)  static void add_persistent_gnt(struct rb_root *root, @@ -217,10 +219,11 @@ static void free_persistent_gnts(struct rb_root *root, unsigned int num)  	struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];  	struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST];  	struct persistent_gnt *persistent_gnt; +	struct rb_node *n;  	int ret = 0;  	int segs_to_unmap = 0; -	foreach_grant(persistent_gnt, root, node) { +	foreach_grant_safe(persistent_gnt, n, root, node) {  		BUG_ON(persistent_gnt->handle ==  			BLKBACK_INVALID_HANDLE);  		gnttab_set_unmap_op(&unmap[segs_to_unmap], @@ -230,9 +233,6 @@ static void free_persistent_gnts(struct rb_root *root, unsigned int num)  			persistent_gnt->handle);  		pages[segs_to_unmap] = persistent_gnt->page; -		rb_erase(&persistent_gnt->node, root); -		kfree(persistent_gnt); -		num--;  		if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST ||  			!rb_next(&persistent_gnt->node)) { @@ -241,6 +241,10 @@ static void free_persistent_gnts(struct rb_root *root, unsigned int num)  			BUG_ON(ret);  			segs_to_unmap = 0;  		} + +		rb_erase(&persistent_gnt->node, root); +		kfree(persistent_gnt); +		num--;  	}  	BUG_ON(num != 0);  } diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index 96e9b00db08..11043c18ac5 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c @@ -792,6 +792,7 @@ static void blkif_free(struct blkfront_info *info, int suspend)  {  	struct llist_node *all_gnts;  	struct grant *persistent_gnt; +	struct llist_node *n;  	/* Prevent new requests being issued until we fix things up. */  	spin_lock_irq(&info->io_lock); @@ -804,7 +805,7 @@ static void blkif_free(struct blkfront_info *info, int suspend)  	/* Remove all persistent grants */  	if (info->persistent_gnts_c) {  		all_gnts = llist_del_all(&info->persistent_gnts); -		llist_for_each_entry(persistent_gnt, all_gnts, node) { +		llist_for_each_entry_safe(persistent_gnt, n, all_gnts, node) {  			gnttab_end_foreign_access(persistent_gnt->gref, 0, 0UL);  			__free_page(pfn_to_page(persistent_gnt->pfn));  			kfree(persistent_gnt); @@ -835,7 +836,7 @@ static void blkif_free(struct blkfront_info *info, int suspend)  static void blkif_completion(struct blk_shadow *s, struct blkfront_info *info,  			     struct blkif_response *bret)  { -	int i; +	int i = 0;  	struct bio_vec *bvec;  	struct req_iterator iter;  	unsigned long flags; @@ -852,7 +853,8 @@ static void blkif_completion(struct blk_shadow *s, struct blkfront_info *info,  		 */  		rq_for_each_segment(bvec, s->request, iter) {  			BUG_ON((bvec->bv_offset + bvec->bv_len) > PAGE_SIZE); -			i = offset >> PAGE_SHIFT; +			if (bvec->bv_offset < offset) +				i++;  			BUG_ON(i >= s->req.u.rw.nr_segments);  			shared_data = kmap_atomic(  				pfn_to_page(s->grants_used[i]->pfn)); @@ -861,7 +863,7 @@ static void blkif_completion(struct blk_shadow *s, struct blkfront_info *info,  				bvec->bv_len);  			bvec_kunmap_irq(bvec_data, &flags);  			kunmap_atomic(shared_data); -			offset += bvec->bv_len; +			offset = bvec->bv_offset + bvec->bv_len;  		}  	}  	/* Add the persistent grant into the list of free grants */ diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c index 684b0d53764..ee4dbeafb37 100644 --- a/drivers/char/virtio_console.c +++ b/drivers/char/virtio_console.c @@ -2062,7 +2062,8 @@ static void virtcons_remove(struct virtio_device *vdev)  	/* Disable interrupts for vqs */  	vdev->config->reset(vdev);  	/* Finish up work that's lined up */ -	cancel_work_sync(&portdev->control_work); +	if (use_multiport(portdev)) +		cancel_work_sync(&portdev->control_work);  	list_for_each_entry_safe(port, port2, &portdev->ports, list)  		unplug_port(port); diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index 4d0e60adbc6..a2d478e8692 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c @@ -1313,14 +1313,18 @@ void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *sav  				if (!(tmp & EVERGREEN_CRTC_BLANK_DATA_EN)) {  					radeon_wait_for_vblank(rdev, i);  					tmp |= EVERGREEN_CRTC_BLANK_DATA_EN; +					WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);  					WREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i], tmp); +					WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);  				}  			} else {  				tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);  				if (!(tmp & EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE)) {  					radeon_wait_for_vblank(rdev, i);  					tmp |= EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE; +					WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);  					WREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i], tmp); +					WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);  				}  			}  			/* wait for the next frame */ @@ -1345,6 +1349,8 @@ void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *sav  		blackout &= ~BLACKOUT_MODE_MASK;  		WREG32(MC_SHARED_BLACKOUT_CNTL, blackout | 1);  	} +	/* wait for the MC to settle */ +	udelay(100);  }  void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save) @@ -1378,11 +1384,15 @@ void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *s  			if (ASIC_IS_DCE6(rdev)) {  				tmp = RREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i]);  				tmp |= EVERGREEN_CRTC_BLANK_DATA_EN; +				WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);  				WREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i], tmp); +				WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);  			} else {  				tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);  				tmp &= ~EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE; +				WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);  				WREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i], tmp); +				WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);  			}  			/* wait for the next frame */  			frame_count = radeon_get_vblank_counter(rdev, i); @@ -2036,9 +2046,20 @@ static void evergreen_gpu_init(struct radeon_device *rdev)  	WREG32(HDP_ADDR_CONFIG, gb_addr_config);  	WREG32(DMA_TILING_CONFIG, gb_addr_config); -	tmp = gb_addr_config & NUM_PIPES_MASK; -	tmp = r6xx_remap_render_backend(rdev, tmp, rdev->config.evergreen.max_backends, -					EVERGREEN_MAX_BACKENDS, disabled_rb_mask); +	if ((rdev->config.evergreen.max_backends == 1) && +	    (rdev->flags & RADEON_IS_IGP)) { +		if ((disabled_rb_mask & 3) == 1) { +			/* RB0 disabled, RB1 enabled */ +			tmp = 0x11111111; +		} else { +			/* RB1 disabled, RB0 enabled */ +			tmp = 0x00000000; +		} +	} else { +		tmp = gb_addr_config & NUM_PIPES_MASK; +		tmp = r6xx_remap_render_backend(rdev, tmp, rdev->config.evergreen.max_backends, +						EVERGREEN_MAX_BACKENDS, disabled_rb_mask); +	}  	WREG32(GB_BACKEND_MAP, tmp);  	WREG32(CGTS_SYS_TCC_DISABLE, 0); diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index bc2540b17c5..becb03e8b32 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c @@ -1462,12 +1462,15 @@ u32 r6xx_remap_render_backend(struct radeon_device *rdev,  			      u32 disabled_rb_mask)  {  	u32 rendering_pipe_num, rb_num_width, req_rb_num; -	u32 pipe_rb_ratio, pipe_rb_remain; +	u32 pipe_rb_ratio, pipe_rb_remain, tmp;  	u32 data = 0, mask = 1 << (max_rb_num - 1);  	unsigned i, j;  	/* mask out the RBs that don't exist on that asic */ -	disabled_rb_mask |= (0xff << max_rb_num) & 0xff; +	tmp = disabled_rb_mask | ((0xff << max_rb_num) & 0xff); +	/* make sure at least one RB is available */ +	if ((tmp & 0xff) != 0xff) +		disabled_rb_mask = tmp;  	rendering_pipe_num = 1 << tiling_pipe_num;  	req_rb_num = total_max_rb_num - r600_count_pipe_bits(disabled_rb_mask); diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c index 9056fafb00e..0b202c07fe5 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.c +++ b/drivers/gpu/drm/radeon/radeon_asic.c @@ -1445,7 +1445,7 @@ static struct radeon_asic cayman_asic = {  	.vm = {  		.init = &cayman_vm_init,  		.fini = &cayman_vm_fini, -		.pt_ring_index = R600_RING_TYPE_DMA_INDEX, +		.pt_ring_index = RADEON_RING_TYPE_GFX_INDEX,  		.set_page = &cayman_vm_set_page,  	},  	.ring = { @@ -1572,7 +1572,7 @@ static struct radeon_asic trinity_asic = {  	.vm = {  		.init = &cayman_vm_init,  		.fini = &cayman_vm_fini, -		.pt_ring_index = R600_RING_TYPE_DMA_INDEX, +		.pt_ring_index = RADEON_RING_TYPE_GFX_INDEX,  		.set_page = &cayman_vm_set_page,  	},  	.ring = { @@ -1699,7 +1699,7 @@ static struct radeon_asic si_asic = {  	.vm = {  		.init = &si_vm_init,  		.fini = &si_vm_fini, -		.pt_ring_index = R600_RING_TYPE_DMA_INDEX, +		.pt_ring_index = RADEON_RING_TYPE_GFX_INDEX,  		.set_page = &si_vm_set_page,  	},  	.ring = { diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c index 33a56a09ff1..3e403bdda58 100644 --- a/drivers/gpu/drm/radeon/radeon_combios.c +++ b/drivers/gpu/drm/radeon/radeon_combios.c @@ -2470,6 +2470,14 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)  								   1),  								  ATOM_DEVICE_CRT1_SUPPORT);  				} +				/* RV100 board with external TDMS bit mis-set. +				 * Actually uses internal TMDS, clear the bit. +				 */ +				if (dev->pdev->device == 0x5159 && +				    dev->pdev->subsystem_vendor == 0x1014 && +				    dev->pdev->subsystem_device == 0x029A) { +					tmp &= ~(1 << 4); +				}  				if ((tmp >> 4) & 0x1) {  					devices |= ATOM_DEVICE_DFP2_SUPPORT;  					radeon_add_legacy_encoder(dev, diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index ff3def78461..05c96fa0b05 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c @@ -1115,8 +1115,10 @@ radeon_user_framebuffer_create(struct drm_device *dev,  	}  	radeon_fb = kzalloc(sizeof(*radeon_fb), GFP_KERNEL); -	if (radeon_fb == NULL) +	if (radeon_fb == NULL) { +		drm_gem_object_unreference_unlocked(obj);  		return ERR_PTR(-ENOMEM); +	}  	ret = radeon_framebuffer_init(dev, radeon_fb, mode_cmd, obj);  	if (ret) { diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c index 2430d80b187..cd72062d5a9 100644 --- a/drivers/gpu/drm/radeon/radeon_ring.c +++ b/drivers/gpu/drm/radeon/radeon_ring.c @@ -377,6 +377,9 @@ int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_ring *ring, unsi  {  	int r; +	/* make sure we aren't trying to allocate more space than there is on the ring */ +	if (ndw > (ring->ring_size / 4)) +		return -ENOMEM;  	/* Align requested size with padding so unlock_commit can  	 * pad safely */  	ndw = (ndw + ring->align_mask) & ~ring->align_mask; diff --git a/drivers/gpu/drm/radeon/reg_srcs/cayman b/drivers/gpu/drm/radeon/reg_srcs/cayman index 0f656b111c1..a072fa8c46b 100644 --- a/drivers/gpu/drm/radeon/reg_srcs/cayman +++ b/drivers/gpu/drm/radeon/reg_srcs/cayman @@ -1,5 +1,6 @@  cayman 0x9400  0x0000802C GRBM_GFX_INDEX +0x00008040 WAIT_UNTIL  0x000084FC CP_STRMOUT_CNTL  0x000085F0 CP_COHER_CNTL  0x000085F4 CP_COHER_SIZE diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c index 2bb6d0e84b3..435ed355136 100644 --- a/drivers/gpu/drm/radeon/rv515.c +++ b/drivers/gpu/drm/radeon/rv515.c @@ -336,6 +336,8 @@ void rv515_mc_stop(struct radeon_device *rdev, struct rv515_mc_save *save)  				WREG32(R600_CITF_CNTL, blackout);  		}  	} +	/* wait for the MC to settle */ +	udelay(100);  }  void rv515_mc_resume(struct radeon_device *rdev, struct rv515_mc_save *save) diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index 44420fca7df..8be35c809c7 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c @@ -429,7 +429,7 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,  	struct ttm_bo_device *bdev = bo->bdev;  	struct ttm_bo_driver *driver = bdev->driver; -	fbo = kzalloc(sizeof(*fbo), GFP_KERNEL); +	fbo = kmalloc(sizeof(*fbo), GFP_KERNEL);  	if (!fbo)  		return -ENOMEM; @@ -448,7 +448,12 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,  	fbo->vm_node = NULL;  	atomic_set(&fbo->cpu_writers, 0); -	fbo->sync_obj = driver->sync_obj_ref(bo->sync_obj); +	spin_lock(&bdev->fence_lock); +	if (bo->sync_obj) +		fbo->sync_obj = driver->sync_obj_ref(bo->sync_obj); +	else +		fbo->sync_obj = NULL; +	spin_unlock(&bdev->fence_lock);  	kref_init(&fbo->list_kref);  	kref_init(&fbo->kref);  	fbo->destroy = &ttm_transfered_destroy; @@ -661,13 +666,11 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,  		 */  		set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags); - -		/* ttm_buffer_object_transfer accesses bo->sync_obj */ -		ret = ttm_buffer_object_transfer(bo, &ghost_obj);  		spin_unlock(&bdev->fence_lock);  		if (tmp_obj)  			driver->sync_obj_unref(&tmp_obj); +		ret = ttm_buffer_object_transfer(bo, &ghost_obj);  		if (ret)  			return ret; diff --git a/drivers/infiniband/hw/qib/qib_qp.c b/drivers/infiniband/hw/qib/qib_qp.c index 4850d03870c..35275099caf 100644 --- a/drivers/infiniband/hw/qib/qib_qp.c +++ b/drivers/infiniband/hw/qib/qib_qp.c @@ -263,20 +263,15 @@ static void remove_qp(struct qib_ibdev *dev, struct qib_qp *qp)  		struct qib_qp __rcu **qpp;  		qpp = &dev->qp_table[n]; -		q = rcu_dereference_protected(*qpp, -			lockdep_is_held(&dev->qpt_lock)); -		for (; q; qpp = &q->next) { +		for (; (q = rcu_dereference_protected(*qpp, +				lockdep_is_held(&dev->qpt_lock))) != NULL; +				qpp = &q->next)  			if (q == qp) {  				atomic_dec(&qp->refcount);  				*qpp = qp->next;  				rcu_assign_pointer(qp->next, NULL); -				q = rcu_dereference_protected(*qpp, -					lockdep_is_held(&dev->qpt_lock));  				break;  			} -			q = rcu_dereference_protected(*qpp, -				lockdep_is_held(&dev->qpt_lock)); -		}  	}  	spin_unlock_irqrestore(&dev->qpt_lock, flags); diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c index 03103d2bd64..67b0c1d2367 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c @@ -741,6 +741,9 @@ void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_  	tx_req->mapping = addr; +	skb_orphan(skb); +	skb_dst_drop(skb); +  	rc = post_send(priv, tx, tx->tx_head & (ipoib_sendq_size - 1),  		       addr, skb->len);  	if (unlikely(rc)) { @@ -752,9 +755,6 @@ void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_  		dev->trans_start = jiffies;  		++tx->tx_head; -		skb_orphan(skb); -		skb_dst_drop(skb); -  		if (++priv->tx_outstanding == ipoib_sendq_size) {  			ipoib_dbg(priv, "TX ring 0x%x full, stopping kernel net queue\n",  				  tx->qp->qp_num); diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c index a1bca70e20a..2cfa76f5d99 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c @@ -600,6 +600,9 @@ void ipoib_send(struct net_device *dev, struct sk_buff *skb,  		netif_stop_queue(dev);  	} +	skb_orphan(skb); +	skb_dst_drop(skb); +  	rc = post_send(priv, priv->tx_head & (ipoib_sendq_size - 1),  		       address->ah, qpn, tx_req, phead, hlen);  	if (unlikely(rc)) { @@ -615,9 +618,6 @@ void ipoib_send(struct net_device *dev, struct sk_buff *skb,  		address->last_send = priv->tx_head;  		++priv->tx_head; - -		skb_orphan(skb); -		skb_dst_drop(skb);  	}  	if (unlikely(priv->tx_outstanding > MAX_SEND_CQE)) diff --git a/drivers/media/platform/omap3isp/isp.c b/drivers/media/platform/omap3isp/isp.c index e4aaee91201..e7f5da0296f 100644 --- a/drivers/media/platform/omap3isp/isp.c +++ b/drivers/media/platform/omap3isp/isp.c @@ -1338,28 +1338,15 @@ static int isp_enable_clocks(struct isp_device *isp)  {  	int r;  	unsigned long rate; -	int divisor; - -	/* -	 * cam_mclk clock chain: -	 *   dpll4 -> dpll4_m5 -> dpll4_m5x2 -> cam_mclk -	 * -	 * In OMAP3630 dpll4_m5x2 != 2 x dpll4_m5 but both are -	 * set to the same value. Hence the rate set for dpll4_m5 -	 * has to be twice of what is set on OMAP3430 to get -	 * the required value for cam_mclk -	 */ -	divisor = isp->revision == ISP_REVISION_15_0 ? 1 : 2;  	r = clk_prepare_enable(isp->clock[ISP_CLK_CAM_ICK]);  	if (r) {  		dev_err(isp->dev, "failed to enable cam_ick clock\n");  		goto out_clk_enable_ick;  	} -	r = clk_set_rate(isp->clock[ISP_CLK_DPLL4_M5_CK], -			 CM_CAM_MCLK_HZ/divisor); +	r = clk_set_rate(isp->clock[ISP_CLK_CAM_MCLK], CM_CAM_MCLK_HZ);  	if (r) { -		dev_err(isp->dev, "clk_set_rate for dpll4_m5_ck failed\n"); +		dev_err(isp->dev, "clk_set_rate for cam_mclk failed\n");  		goto out_clk_enable_mclk;  	}  	r = clk_prepare_enable(isp->clock[ISP_CLK_CAM_MCLK]); @@ -1401,7 +1388,6 @@ static void isp_disable_clocks(struct isp_device *isp)  static const char *isp_clocks[] = {  	"cam_ick",  	"cam_mclk", -	"dpll4_m5_ck",  	"csi2_96m_fck",  	"l3_ick",  }; diff --git a/drivers/media/platform/omap3isp/isp.h b/drivers/media/platform/omap3isp/isp.h index 517d348ce32..c77e1f2ae5c 100644 --- a/drivers/media/platform/omap3isp/isp.h +++ b/drivers/media/platform/omap3isp/isp.h @@ -147,7 +147,6 @@ struct isp_platform_callback {   * @ref_count: Reference count for handling multiple ISP requests.   * @cam_ick: Pointer to camera interface clock structure.   * @cam_mclk: Pointer to camera functional clock structure. - * @dpll4_m5_ck: Pointer to DPLL4 M5 clock structure.   * @csi2_fck: Pointer to camera CSI2 complexIO clock structure.   * @l3_ick: Pointer to OMAP3 L3 bus interface clock.   * @irq: Currently attached ISP ISR callbacks information structure. @@ -189,10 +188,9 @@ struct isp_device {  	u32 xclk_divisor[2];	/* Two clocks, a and b. */  #define ISP_CLK_CAM_ICK		0  #define ISP_CLK_CAM_MCLK	1 -#define ISP_CLK_DPLL4_M5_CK	2 -#define ISP_CLK_CSI2_FCK	3 -#define ISP_CLK_L3_ICK		4 -	struct clk *clock[5]; +#define ISP_CLK_CSI2_FCK	2 +#define ISP_CLK_L3_ICK		3 +	struct clk *clock[4];  	/* ISP modules */  	struct ispstat isp_af; diff --git a/drivers/media/radio/radio-keene.c b/drivers/media/radio/radio-keene.c index e10e525f33e..296941a9ae2 100644 --- a/drivers/media/radio/radio-keene.c +++ b/drivers/media/radio/radio-keene.c @@ -374,6 +374,7 @@ static int usb_keene_probe(struct usb_interface *intf,  	radio->vdev.ioctl_ops = &usb_keene_ioctl_ops;  	radio->vdev.lock = &radio->lock;  	radio->vdev.release = video_device_release_empty; +	radio->vdev.vfl_dir = VFL_DIR_TX;  	radio->usbdev = interface_to_usbdev(intf);  	radio->intf = intf; diff --git a/drivers/media/radio/radio-si4713.c b/drivers/media/radio/radio-si4713.c index a082e400ed0..1507c9d508d 100644 --- a/drivers/media/radio/radio-si4713.c +++ b/drivers/media/radio/radio-si4713.c @@ -250,6 +250,7 @@ static struct video_device radio_si4713_vdev_template = {  	.name			= "radio-si4713",  	.release		= video_device_release,  	.ioctl_ops		= &radio_si4713_ioctl_ops, +	.vfl_dir		= VFL_DIR_TX,  };  /* Platform driver interface */ diff --git a/drivers/media/radio/radio-wl1273.c b/drivers/media/radio/radio-wl1273.c index c48be195bba..cabbe3adf43 100644 --- a/drivers/media/radio/radio-wl1273.c +++ b/drivers/media/radio/radio-wl1273.c @@ -1971,6 +1971,7 @@ static struct video_device wl1273_viddev_template = {  	.ioctl_ops		= &wl1273_ioctl_ops,  	.name			= WL1273_FM_DRIVER_NAME,  	.release		= wl1273_vdev_release, +	.vfl_dir		= VFL_DIR_TX,  };  static int wl1273_fm_radio_remove(struct platform_device *pdev) diff --git a/drivers/media/radio/wl128x/fmdrv_v4l2.c b/drivers/media/radio/wl128x/fmdrv_v4l2.c index 048de453603..0a8ee8fab92 100644 --- a/drivers/media/radio/wl128x/fmdrv_v4l2.c +++ b/drivers/media/radio/wl128x/fmdrv_v4l2.c @@ -518,6 +518,16 @@ static struct video_device fm_viddev_template = {  	.ioctl_ops = &fm_drv_ioctl_ops,  	.name = FM_DRV_NAME,  	.release = video_device_release, +	/* +	 * To ensure both the tuner and modulator ioctls are accessible we +	 * set the vfl_dir to M2M to indicate this. +	 * +	 * It is not really a mem2mem device of course, but it can both receive +	 * and transmit using the same radio device. It's the only radio driver +	 * that does this and it should really be split in two radio devices, +	 * but that would affect applications using this driver. +	 */ +	.vfl_dir = VFL_DIR_M2M,  };  int fm_v4l2_init_video_device(struct fmdev *fmdev, int radio_nr) diff --git a/drivers/mtd/devices/Kconfig b/drivers/mtd/devices/Kconfig index 27f80cd8aef..46dcb54c32e 100644 --- a/drivers/mtd/devices/Kconfig +++ b/drivers/mtd/devices/Kconfig @@ -272,6 +272,7 @@ config MTD_DOCG3  	tristate "M-Systems Disk-On-Chip G3"  	select BCH  	select BCH_CONST_PARAMS +	select BITREVERSE  	---help---  	  This provides an MTD device driver for the M-Systems DiskOnChip  	  G3 devices. diff --git a/drivers/mtd/maps/physmap_of.c b/drivers/mtd/maps/physmap_of.c index 67cc73c18dd..7901d72c924 100644 --- a/drivers/mtd/maps/physmap_of.c +++ b/drivers/mtd/maps/physmap_of.c @@ -170,7 +170,7 @@ static int of_flash_probe(struct platform_device *dev)  	resource_size_t res_size;  	struct mtd_part_parser_data ppdata;  	bool map_indirect; -	const char *mtd_name; +	const char *mtd_name = NULL;  	match = of_match_device(of_flash_match, &dev->dev);  	if (!match) diff --git a/drivers/mtd/nand/bcm47xxnflash/ops_bcm4706.c b/drivers/mtd/nand/bcm47xxnflash/ops_bcm4706.c index 86c9a79b89b..595de4012e7 100644 --- a/drivers/mtd/nand/bcm47xxnflash/ops_bcm4706.c +++ b/drivers/mtd/nand/bcm47xxnflash/ops_bcm4706.c @@ -17,8 +17,8 @@  #include "bcm47xxnflash.h"  /* Broadcom uses 1'000'000 but it seems to be too many. Tests on WNDR4500 has - * shown 164 retries as maxiumum. */ -#define NFLASH_READY_RETRIES		1000 + * shown ~1000 retries as maxiumum. */ +#define NFLASH_READY_RETRIES		10000  #define NFLASH_SECTOR_SIZE		512 diff --git a/drivers/mtd/nand/davinci_nand.c b/drivers/mtd/nand/davinci_nand.c index 3502606f648..feae55c7b88 100644 --- a/drivers/mtd/nand/davinci_nand.c +++ b/drivers/mtd/nand/davinci_nand.c @@ -523,7 +523,7 @@ static struct nand_ecclayout hwecc4_2048 __initconst = {  static const struct of_device_id davinci_nand_of_match[] = {  	{.compatible = "ti,davinci-nand", },  	{}, -} +};  MODULE_DEVICE_TABLE(of, davinci_nand_of_match);  static struct davinci_nand_pdata diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c index 8323ac991ad..3766682a028 100644 --- a/drivers/mtd/nand/nand_base.c +++ b/drivers/mtd/nand/nand_base.c @@ -2857,8 +2857,11 @@ static int nand_flash_detect_onfi(struct mtd_info *mtd, struct nand_chip *chip,  	int i;  	int val; -	/* ONFI need to be probed in 8 bits mode */ -	WARN_ON(chip->options & NAND_BUSWIDTH_16); +	/* ONFI need to be probed in 8 bits mode, and 16 bits should be selected with NAND_BUSWIDTH_AUTO */ +	if (chip->options & NAND_BUSWIDTH_16) { +		pr_err("Trying ONFI probe in 16 bits mode, aborting !\n"); +		return 0; +	}  	/* Try ONFI for unknown chip or LP */  	chip->cmdfunc(mtd, NAND_CMD_READID, 0x20, -1);  	if (chip->read_byte(mtd) != 'O' || chip->read_byte(mtd) != 'N' || diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c index 1877ed7ca08..1c9e09fbdff 100644 --- a/drivers/net/bonding/bond_sysfs.c +++ b/drivers/net/bonding/bond_sysfs.c @@ -1053,6 +1053,7 @@ static ssize_t bonding_store_primary(struct device *d,  		pr_info("%s: Setting primary slave to None.\n",  			bond->dev->name);  		bond->primary_slave = NULL; +		memset(bond->params.primary, 0, sizeof(bond->params.primary));  		bond_select_active_slave(bond);  		goto out;  	} diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c index 58607f196c9..2282b1ae976 100644 --- a/drivers/net/can/c_can/c_can.c +++ b/drivers/net/can/c_can/c_can.c @@ -488,8 +488,12 @@ static void c_can_setup_receive_object(struct net_device *dev, int iface,  	priv->write_reg(priv, C_CAN_IFACE(MASK1_REG, iface),  			IFX_WRITE_LOW_16BIT(mask)); + +	/* According to C_CAN documentation, the reserved bit +	 * in IFx_MASK2 register is fixed 1 +	 */  	priv->write_reg(priv, C_CAN_IFACE(MASK2_REG, iface), -			IFX_WRITE_HIGH_16BIT(mask)); +			IFX_WRITE_HIGH_16BIT(mask) | BIT(13));  	priv->write_reg(priv, C_CAN_IFACE(ARB1_REG, iface),  			IFX_WRITE_LOW_16BIT(id)); diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h index 4eba17b83ba..f1b3df167ff 100644 --- a/drivers/net/ethernet/emulex/benet/be.h +++ b/drivers/net/ethernet/emulex/benet/be.h @@ -36,13 +36,13 @@  #define DRV_VER			"4.4.161.0u"  #define DRV_NAME		"be2net" -#define BE_NAME			"ServerEngines BladeEngine2 10Gbps NIC" -#define BE3_NAME		"ServerEngines BladeEngine3 10Gbps NIC" -#define OC_NAME			"Emulex OneConnect 10Gbps NIC" +#define BE_NAME			"Emulex BladeEngine2" +#define BE3_NAME		"Emulex BladeEngine3" +#define OC_NAME			"Emulex OneConnect"  #define OC_NAME_BE		OC_NAME	"(be3)"  #define OC_NAME_LANCER		OC_NAME "(Lancer)"  #define OC_NAME_SH		OC_NAME "(Skyhawk)" -#define DRV_DESC		"ServerEngines BladeEngine 10Gbps NIC Driver" +#define DRV_DESC		"Emulex OneConnect 10Gbps NIC Driver"  #define BE_VENDOR_ID 		0x19a2  #define EMULEX_VENDOR_ID	0x10df diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index 5c995700e53..4d6f3c54427 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -25,7 +25,7 @@  MODULE_VERSION(DRV_VER);  MODULE_DEVICE_TABLE(pci, be_dev_ids);  MODULE_DESCRIPTION(DRV_DESC " " DRV_VER); -MODULE_AUTHOR("ServerEngines Corporation"); +MODULE_AUTHOR("Emulex Corporation");  MODULE_LICENSE("GPL");  static unsigned int num_vfs; diff --git a/drivers/net/ethernet/intel/e1000e/defines.h b/drivers/net/ethernet/intel/e1000e/defines.h index 02a12b69555..4dab6fc265a 100644 --- a/drivers/net/ethernet/intel/e1000e/defines.h +++ b/drivers/net/ethernet/intel/e1000e/defines.h @@ -232,6 +232,7 @@  #define E1000_CTRL_FRCDPX   0x00001000  /* Force Duplex */  #define E1000_CTRL_LANPHYPC_OVERRIDE 0x00010000 /* SW control of LANPHYPC */  #define E1000_CTRL_LANPHYPC_VALUE    0x00020000 /* SW value of LANPHYPC */ +#define E1000_CTRL_MEHE     0x00080000  /* Memory Error Handling Enable */  #define E1000_CTRL_SWDPIN0  0x00040000  /* SWDPIN 0 value */  #define E1000_CTRL_SWDPIN1  0x00080000  /* SWDPIN 1 value */  #define E1000_CTRL_SWDPIO0  0x00400000  /* SWDPIN 0 Input or output */ @@ -389,6 +390,12 @@  #define E1000_PBS_16K E1000_PBA_16K +/* Uncorrectable/correctable ECC Error counts and enable bits */ +#define E1000_PBECCSTS_CORR_ERR_CNT_MASK	0x000000FF +#define E1000_PBECCSTS_UNCORR_ERR_CNT_MASK	0x0000FF00 +#define E1000_PBECCSTS_UNCORR_ERR_CNT_SHIFT	8 +#define E1000_PBECCSTS_ECC_ENABLE		0x00010000 +  #define IFS_MAX       80  #define IFS_MIN       40  #define IFS_RATIO     4 @@ -408,6 +415,7 @@  #define E1000_ICR_RXSEQ         0x00000008 /* Rx sequence error */  #define E1000_ICR_RXDMT0        0x00000010 /* Rx desc min. threshold (0) */  #define E1000_ICR_RXT0          0x00000080 /* Rx timer intr (ring 0) */ +#define E1000_ICR_ECCER         0x00400000 /* Uncorrectable ECC Error */  #define E1000_ICR_INT_ASSERTED  0x80000000 /* If this bit asserted, the driver should claim the interrupt */  #define E1000_ICR_RXQ0          0x00100000 /* Rx Queue 0 Interrupt */  #define E1000_ICR_RXQ1          0x00200000 /* Rx Queue 1 Interrupt */ @@ -443,6 +451,7 @@  #define E1000_IMS_RXSEQ     E1000_ICR_RXSEQ     /* Rx sequence error */  #define E1000_IMS_RXDMT0    E1000_ICR_RXDMT0    /* Rx desc min. threshold */  #define E1000_IMS_RXT0      E1000_ICR_RXT0      /* Rx timer intr */ +#define E1000_IMS_ECCER     E1000_ICR_ECCER     /* Uncorrectable ECC Error */  #define E1000_IMS_RXQ0      E1000_ICR_RXQ0      /* Rx Queue 0 Interrupt */  #define E1000_IMS_RXQ1      E1000_ICR_RXQ1      /* Rx Queue 1 Interrupt */  #define E1000_IMS_TXQ0      E1000_ICR_TXQ0      /* Tx Queue 0 Interrupt */ diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h index 6782a2eea1b..7e95f221d60 100644 --- a/drivers/net/ethernet/intel/e1000e/e1000.h +++ b/drivers/net/ethernet/intel/e1000e/e1000.h @@ -309,6 +309,8 @@ struct e1000_adapter {  	struct napi_struct napi; +	unsigned int uncorr_errors;	/* uncorrectable ECC errors */ +	unsigned int corr_errors;	/* correctable ECC errors */  	unsigned int restart_queue;  	u32 txd_cmd; diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c index f95bc6ee1c2..fd4772a2691 100644 --- a/drivers/net/ethernet/intel/e1000e/ethtool.c +++ b/drivers/net/ethernet/intel/e1000e/ethtool.c @@ -108,6 +108,8 @@ static const struct e1000_stats e1000_gstrings_stats[] = {  	E1000_STAT("dropped_smbus", stats.mgpdc),  	E1000_STAT("rx_dma_failed", rx_dma_failed),  	E1000_STAT("tx_dma_failed", tx_dma_failed), +	E1000_STAT("uncorr_ecc_errors", uncorr_errors), +	E1000_STAT("corr_ecc_errors", corr_errors),  };  #define E1000_GLOBAL_STATS_LEN	ARRAY_SIZE(e1000_gstrings_stats) diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h index cf217777586..b88676ff3d8 100644 --- a/drivers/net/ethernet/intel/e1000e/hw.h +++ b/drivers/net/ethernet/intel/e1000e/hw.h @@ -77,6 +77,7 @@ enum e1e_registers {  #define E1000_POEMB	E1000_PHY_CTRL	/* PHY OEM Bits */  	E1000_PBA      = 0x01000, /* Packet Buffer Allocation - RW */  	E1000_PBS      = 0x01008, /* Packet Buffer Size */ +	E1000_PBECCSTS = 0x0100C, /* Packet Buffer ECC Status - RW */  	E1000_EEMNGCTL = 0x01010, /* MNG EEprom Control */  	E1000_EEWR     = 0x0102C, /* EEPROM Write Register - RW */  	E1000_FLOP     = 0x0103C, /* FLASH Opcode Register */ diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c index 97633654760..24d9f61956f 100644 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.c +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c @@ -3624,6 +3624,17 @@ static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw)  	if (hw->mac.type == e1000_ich8lan)  		reg |= (E1000_RFCTL_IPV6_EX_DIS | E1000_RFCTL_NEW_IPV6_EXT_DIS);  	ew32(RFCTL, reg); + +	/* Enable ECC on Lynxpoint */ +	if (hw->mac.type == e1000_pch_lpt) { +		reg = er32(PBECCSTS); +		reg |= E1000_PBECCSTS_ECC_ENABLE; +		ew32(PBECCSTS, reg); + +		reg = er32(CTRL); +		reg |= E1000_CTRL_MEHE; +		ew32(CTRL, reg); +	}  }  /** diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index fbf75fdca99..643c883dd79 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -1678,6 +1678,23 @@ static irqreturn_t e1000_intr_msi(int irq, void *data)  			mod_timer(&adapter->watchdog_timer, jiffies + 1);  	} +	/* Reset on uncorrectable ECC error */ +	if ((icr & E1000_ICR_ECCER) && (hw->mac.type == e1000_pch_lpt)) { +		u32 pbeccsts = er32(PBECCSTS); + +		adapter->corr_errors += +		    pbeccsts & E1000_PBECCSTS_CORR_ERR_CNT_MASK; +		adapter->uncorr_errors += +		    (pbeccsts & E1000_PBECCSTS_UNCORR_ERR_CNT_MASK) >> +		    E1000_PBECCSTS_UNCORR_ERR_CNT_SHIFT; + +		/* Do the reset outside of interrupt context */ +		schedule_work(&adapter->reset_task); + +		/* return immediately since reset is imminent */ +		return IRQ_HANDLED; +	} +  	if (napi_schedule_prep(&adapter->napi)) {  		adapter->total_tx_bytes = 0;  		adapter->total_tx_packets = 0; @@ -1741,6 +1758,23 @@ static irqreturn_t e1000_intr(int irq, void *data)  			mod_timer(&adapter->watchdog_timer, jiffies + 1);  	} +	/* Reset on uncorrectable ECC error */ +	if ((icr & E1000_ICR_ECCER) && (hw->mac.type == e1000_pch_lpt)) { +		u32 pbeccsts = er32(PBECCSTS); + +		adapter->corr_errors += +		    pbeccsts & E1000_PBECCSTS_CORR_ERR_CNT_MASK; +		adapter->uncorr_errors += +		    (pbeccsts & E1000_PBECCSTS_UNCORR_ERR_CNT_MASK) >> +		    E1000_PBECCSTS_UNCORR_ERR_CNT_SHIFT; + +		/* Do the reset outside of interrupt context */ +		schedule_work(&adapter->reset_task); + +		/* return immediately since reset is imminent */ +		return IRQ_HANDLED; +	} +  	if (napi_schedule_prep(&adapter->napi)) {  		adapter->total_tx_bytes = 0;  		adapter->total_tx_packets = 0; @@ -2104,6 +2138,8 @@ static void e1000_irq_enable(struct e1000_adapter *adapter)  	if (adapter->msix_entries) {  		ew32(EIAC_82574, adapter->eiac_mask & E1000_EIAC_MASK_82574);  		ew32(IMS, adapter->eiac_mask | E1000_IMS_OTHER | E1000_IMS_LSC); +	} else if (hw->mac.type == e1000_pch_lpt) { +		ew32(IMS, IMS_ENABLE_MASK | E1000_IMS_ECCER);  	} else {  		ew32(IMS, IMS_ENABLE_MASK);  	} @@ -4251,6 +4287,16 @@ static void e1000e_update_stats(struct e1000_adapter *adapter)  	adapter->stats.mgptc += er32(MGTPTC);  	adapter->stats.mgprc += er32(MGTPRC);  	adapter->stats.mgpdc += er32(MGTPDC); + +	/* Correctable ECC Errors */ +	if (hw->mac.type == e1000_pch_lpt) { +		u32 pbeccsts = er32(PBECCSTS); +		adapter->corr_errors += +		    pbeccsts & E1000_PBECCSTS_CORR_ERR_CNT_MASK; +		adapter->uncorr_errors += +		    (pbeccsts & E1000_PBECCSTS_UNCORR_ERR_CNT_MASK) >> +		    E1000_PBECCSTS_UNCORR_ERR_CNT_SHIFT; +	}  }  /** diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index a6542d75374..5163af31499 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -380,7 +380,7 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)  		}  	} -	if ((dev_cap->flags & +	if ((dev->caps.flags &  	    (MLX4_DEV_CAP_FLAG_64B_CQE | MLX4_DEV_CAP_FLAG_64B_EQE)) &&  	    mlx4_is_master(dev))  		dev->caps.function_caps |= MLX4_FUNC_CAP_64B_EQE_CQE; diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c index 7992b3e05d3..78ace59efd2 100644 --- a/drivers/net/ethernet/via/via-rhine.c +++ b/drivers/net/ethernet/via/via-rhine.c @@ -1801,7 +1801,7 @@ static void rhine_tx(struct net_device *dev)  					 rp->tx_skbuff[entry]->len,  					 PCI_DMA_TODEVICE);  		} -		dev_kfree_skb_irq(rp->tx_skbuff[entry]); +		dev_kfree_skb(rp->tx_skbuff[entry]);  		rp->tx_skbuff[entry] = NULL;  		entry = (++rp->dirty_tx) % TX_RING_SIZE;  	} @@ -2010,11 +2010,7 @@ static void rhine_slow_event_task(struct work_struct *work)  	if (intr_status & IntrPCIErr)  		netif_warn(rp, hw, dev, "PCI error\n"); -	napi_disable(&rp->napi); -	rhine_irq_disable(rp); -	/* Slow and safe. Consider __napi_schedule as a replacement ? */ -	napi_enable(&rp->napi); -	napi_schedule(&rp->napi); +	iowrite16(RHINE_EVENT & 0xffff, rp->base + IntrEnable);  out_unlock:  	mutex_unlock(&rp->task_lock); diff --git a/drivers/net/tun.c b/drivers/net/tun.c index cc09b67c23b..2917a86f4c4 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -298,11 +298,12 @@ static void tun_flow_cleanup(unsigned long data)  }  static void tun_flow_update(struct tun_struct *tun, u32 rxhash, -			    u16 queue_index) +			    struct tun_file *tfile)  {  	struct hlist_head *head;  	struct tun_flow_entry *e;  	unsigned long delay = tun->ageing_time; +	u16 queue_index = tfile->queue_index;  	if (!rxhash)  		return; @@ -311,7 +312,9 @@ static void tun_flow_update(struct tun_struct *tun, u32 rxhash,  	rcu_read_lock(); -	if (tun->numqueues == 1) +	/* We may get a very small possibility of OOO during switching, not +	 * worth to optimize.*/ +	if (tun->numqueues == 1 || tfile->detached)  		goto unlock;  	e = tun_flow_find(head, rxhash); @@ -411,21 +414,21 @@ static void __tun_detach(struct tun_file *tfile, bool clean)  	tun = rtnl_dereference(tfile->tun); -	if (tun) { +	if (tun && !tfile->detached) {  		u16 index = tfile->queue_index;  		BUG_ON(index >= tun->numqueues);  		dev = tun->dev;  		rcu_assign_pointer(tun->tfiles[index],  				   tun->tfiles[tun->numqueues - 1]); -		rcu_assign_pointer(tfile->tun, NULL);  		ntfile = rtnl_dereference(tun->tfiles[index]);  		ntfile->queue_index = index;  		--tun->numqueues; -		if (clean) +		if (clean) { +			rcu_assign_pointer(tfile->tun, NULL);  			sock_put(&tfile->sk); -		else +		} else  			tun_disable_queue(tun, tfile);  		synchronize_net(); @@ -439,10 +442,13 @@ static void __tun_detach(struct tun_file *tfile, bool clean)  	}  	if (clean) { -		if (tun && tun->numqueues == 0 && tun->numdisabled == 0 && -		    !(tun->flags & TUN_PERSIST)) -			if (tun->dev->reg_state == NETREG_REGISTERED) +		if (tun && tun->numqueues == 0 && tun->numdisabled == 0) { +			netif_carrier_off(tun->dev); + +			if (!(tun->flags & TUN_PERSIST) && +			    tun->dev->reg_state == NETREG_REGISTERED)  				unregister_netdevice(tun->dev); +		}  		BUG_ON(!test_bit(SOCK_EXTERNALLY_ALLOCATED,  				 &tfile->socket.flags)); @@ -470,6 +476,10 @@ static void tun_detach_all(struct net_device *dev)  		rcu_assign_pointer(tfile->tun, NULL);  		--tun->numqueues;  	} +	list_for_each_entry(tfile, &tun->disabled, next) { +		wake_up_all(&tfile->wq.wait); +		rcu_assign_pointer(tfile->tun, NULL); +	}  	BUG_ON(tun->numqueues != 0);  	synchronize_net(); @@ -500,7 +510,7 @@ static int tun_attach(struct tun_struct *tun, struct file *file)  		goto out;  	err = -EINVAL; -	if (rtnl_dereference(tfile->tun)) +	if (rtnl_dereference(tfile->tun) && !tfile->detached)  		goto out;  	err = -EBUSY; @@ -1199,7 +1209,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,  	tun->dev->stats.rx_packets++;  	tun->dev->stats.rx_bytes += len; -	tun_flow_update(tun, rxhash, tfile->queue_index); +	tun_flow_update(tun, rxhash, tfile);  	return total_len;  } @@ -1658,10 +1668,10 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)  		    device_create_file(&tun->dev->dev, &dev_attr_owner) ||  		    device_create_file(&tun->dev->dev, &dev_attr_group))  			pr_err("Failed to create tun sysfs files\n"); - -		netif_carrier_on(tun->dev);  	} +	netif_carrier_on(tun->dev); +  	tun_debug(KERN_INFO, tun, "tun_set_iff\n");  	if (ifr->ifr_flags & IFF_NO_PI) @@ -1813,7 +1823,7 @@ static int tun_set_queue(struct file *file, struct ifreq *ifr)  		ret = tun_attach(tun, file);  	} else if (ifr->ifr_flags & IFF_DETACH_QUEUE) {  		tun = rtnl_dereference(tfile->tun); -		if (!tun || !(tun->flags & TUN_TAP_MQ)) +		if (!tun || !(tun->flags & TUN_TAP_MQ) || tfile->detached)  			ret = -EINVAL;  		else  			__tun_detach(tfile, false); diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c index 9197b2c72ca..00d3b2d3782 100644 --- a/drivers/net/usb/cdc_ncm.c +++ b/drivers/net/usb/cdc_ncm.c @@ -1215,6 +1215,9 @@ static const struct usb_device_id cdc_devs[] = {  	{ USB_VENDOR_AND_INTERFACE_INFO(0x12d1, 0xff, 0x02, 0x46),  	  .driver_info = (unsigned long)&wwan_info,  	}, +	{ USB_VENDOR_AND_INTERFACE_INFO(0x12d1, 0xff, 0x02, 0x76), +	  .driver_info = (unsigned long)&wwan_info, +	},  	/* Infineon(now Intel) HSPA Modem platform */  	{ USB_DEVICE_AND_INTERFACE_INFO(0x1519, 0x0443, diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index 575a5839ee3..c8e05e27f38 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -351,6 +351,10 @@ static const struct usb_device_id products[] = {  		USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, USB_CLASS_VENDOR_SPEC, 1, 57),  		.driver_info        = (unsigned long)&qmi_wwan_info,  	}, +	{	/* HUAWEI_INTERFACE_NDIS_CONTROL_QUALCOMM */ +		USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, USB_CLASS_VENDOR_SPEC, 0x01, 0x69), +		.driver_info        = (unsigned long)&qmi_wwan_info, +	},  	/* 2. Combined interface devices matching on class+protocol */  	{	/* Huawei E367 and possibly others in "Windows mode" */ @@ -361,6 +365,14 @@ static const struct usb_device_id products[] = {  		USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, USB_CLASS_VENDOR_SPEC, 1, 17),  		.driver_info        = (unsigned long)&qmi_wwan_info,  	}, +	{	/* HUAWEI_NDIS_SINGLE_INTERFACE_VDF */ +		USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, USB_CLASS_VENDOR_SPEC, 0x01, 0x37), +		.driver_info        = (unsigned long)&qmi_wwan_info, +	}, +	{	/* HUAWEI_INTERFACE_NDIS_HW_QUALCOMM */ +		USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, USB_CLASS_VENDOR_SPEC, 0x01, 0x67), +		.driver_info        = (unsigned long)&qmi_wwan_info, +	},  	{	/* Pantech UML290, P4200 and more */  		USB_VENDOR_AND_INTERFACE_INFO(0x106c, USB_CLASS_VENDOR_SPEC, 0xf0, 0xff),  		.driver_info        = (unsigned long)&qmi_wwan_info, @@ -461,6 +473,7 @@ static const struct usb_device_id products[] = {  	{QMI_FIXED_INTF(0x1199, 0x901c, 8)},    /* Sierra Wireless EM7700 */  	{QMI_FIXED_INTF(0x1bbb, 0x011e, 4)},	/* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */  	{QMI_FIXED_INTF(0x2357, 0x0201, 4)},	/* TP-LINK HSUPA Modem MA180 */ +	{QMI_FIXED_INTF(0x1bc7, 0x1200, 5)},	/* Telit LE920 */  	/* 4. Gobi 1000 devices */  	{QMI_GOBI1K_DEVICE(0x05c6, 0x9212)},	/* Acer Gobi Modem Device */ diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c index f34b2ebee81..5e33606c136 100644 --- a/drivers/net/usb/usbnet.c +++ b/drivers/net/usb/usbnet.c @@ -380,6 +380,12 @@ static int rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags)  	unsigned long		lockflags;  	size_t			size = dev->rx_urb_size; +	/* prevent rx skb allocation when error ratio is high */ +	if (test_bit(EVENT_RX_KILL, &dev->flags)) { +		usb_free_urb(urb); +		return -ENOLINK; +	} +  	skb = __netdev_alloc_skb_ip_align(dev->net, size, flags);  	if (!skb) {  		netif_dbg(dev, rx_err, dev->net, "no rx skb\n"); @@ -539,6 +545,17 @@ block:  		break;  	} +	/* stop rx if packet error rate is high */ +	if (++dev->pkt_cnt > 30) { +		dev->pkt_cnt = 0; +		dev->pkt_err = 0; +	} else { +		if (state == rx_cleanup) +			dev->pkt_err++; +		if (dev->pkt_err > 20) +			set_bit(EVENT_RX_KILL, &dev->flags); +	} +  	state = defer_bh(dev, skb, &dev->rxq, state);  	if (urb) { @@ -791,6 +808,11 @@ int usbnet_open (struct net_device *net)  		   (dev->driver_info->flags & FLAG_FRAMING_AX) ? "ASIX" :  		   "simple"); +	/* reset rx error state */ +	dev->pkt_cnt = 0; +	dev->pkt_err = 0; +	clear_bit(EVENT_RX_KILL, &dev->flags); +  	// delay posting reads until we're fully open  	tasklet_schedule (&dev->bh);  	if (info->manage_power) { @@ -1103,13 +1125,11 @@ netdev_tx_t usbnet_start_xmit (struct sk_buff *skb,  	if (info->tx_fixup) {  		skb = info->tx_fixup (dev, skb, GFP_ATOMIC);  		if (!skb) { -			if (netif_msg_tx_err(dev)) { -				netif_dbg(dev, tx_err, dev->net, "can't tx_fixup skb\n"); -				goto drop; -			} else { -				/* cdc_ncm collected packet; waits for more */ +			/* packet collected; minidriver waiting for more */ +			if (info->flags & FLAG_MULTI_PACKET)  				goto not_drop; -			} +			netif_dbg(dev, tx_err, dev->net, "can't tx_fixup skb\n"); +			goto drop;  		}  	}  	length = skb->len; @@ -1254,6 +1274,9 @@ static void usbnet_bh (unsigned long param)  		}  	} +	/* restart RX again after disabling due to high error rate */ +	clear_bit(EVENT_RX_KILL, &dev->flags); +  	// waiting for all pending urbs to complete?  	if (dev->wait) {  		if ((dev->txq.qlen + dev->rxq.qlen + dev->done.qlen) == 0) { diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c index dc8913c6238..12c6440d164 100644 --- a/drivers/net/vmxnet3/vmxnet3_drv.c +++ b/drivers/net/vmxnet3/vmxnet3_drv.c @@ -154,8 +154,7 @@ vmxnet3_check_link(struct vmxnet3_adapter *adapter, bool affectTxQueue)  	if (ret & 1) { /* Link is up. */  		printk(KERN_INFO "%s: NIC Link is Up %d Mbps\n",  		       adapter->netdev->name, adapter->link_speed); -		if (!netif_carrier_ok(adapter->netdev)) -			netif_carrier_on(adapter->netdev); +		netif_carrier_on(adapter->netdev);  		if (affectTxQueue) {  			for (i = 0; i < adapter->num_tx_queues; i++) @@ -165,8 +164,7 @@ vmxnet3_check_link(struct vmxnet3_adapter *adapter, bool affectTxQueue)  	} else {  		printk(KERN_INFO "%s: NIC Link is Down\n",  		       adapter->netdev->name); -		if (netif_carrier_ok(adapter->netdev)) -			netif_carrier_off(adapter->netdev); +		netif_carrier_off(adapter->netdev);  		if (affectTxQueue) {  			for (i = 0; i < adapter->num_tx_queues; i++) @@ -3061,6 +3059,7 @@ vmxnet3_probe_device(struct pci_dev *pdev,  	netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues);  	netif_set_real_num_rx_queues(adapter->netdev, adapter->num_rx_queues); +	netif_carrier_off(netdev);  	err = register_netdev(netdev);  	if (err) { diff --git a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c index 0f71d1d4339..e5fd20994be 100644 --- a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c +++ b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c @@ -36,6 +36,7 @@  #include "debug.h"  #define N_TX_QUEUES	4 /* #tx queues on mac80211<->driver interface */ +#define BRCMS_FLUSH_TIMEOUT	500 /* msec */  /* Flags we support */  #define MAC_FILTERS (FIF_PROMISC_IN_BSS | \ @@ -708,16 +709,29 @@ static void brcms_ops_rfkill_poll(struct ieee80211_hw *hw)  	wiphy_rfkill_set_hw_state(wl->pub->ieee_hw->wiphy, blocked);  } +static bool brcms_tx_flush_completed(struct brcms_info *wl) +{ +	bool result; + +	spin_lock_bh(&wl->lock); +	result = brcms_c_tx_flush_completed(wl->wlc); +	spin_unlock_bh(&wl->lock); +	return result; +} +  static void brcms_ops_flush(struct ieee80211_hw *hw, bool drop)  {  	struct brcms_info *wl = hw->priv; +	int ret;  	no_printk("%s: drop = %s\n", __func__, drop ? "true" : "false"); -	/* wait for packet queue and dma fifos to run empty */ -	spin_lock_bh(&wl->lock); -	brcms_c_wait_for_tx_completion(wl->wlc, drop); -	spin_unlock_bh(&wl->lock); +	ret = wait_event_timeout(wl->tx_flush_wq, +				 brcms_tx_flush_completed(wl), +				 msecs_to_jiffies(BRCMS_FLUSH_TIMEOUT)); + +	brcms_dbg_mac80211(wl->wlc->hw->d11core, +			   "ret=%d\n", jiffies_to_msecs(ret));  }  static const struct ieee80211_ops brcms_ops = { @@ -772,6 +786,7 @@ void brcms_dpc(unsigned long data)   done:  	spin_unlock_bh(&wl->lock); +	wake_up(&wl->tx_flush_wq);  }  /* @@ -1020,6 +1035,8 @@ static struct brcms_info *brcms_attach(struct bcma_device *pdev)  	atomic_set(&wl->callbacks, 0); +	init_waitqueue_head(&wl->tx_flush_wq); +  	/* setup the bottom half handler */  	tasklet_init(&wl->tasklet, brcms_dpc, (unsigned long) wl); @@ -1609,13 +1626,3 @@ bool brcms_rfkill_set_hw_state(struct brcms_info *wl)  	spin_lock_bh(&wl->lock);  	return blocked;  } - -/* - * precondition: perimeter lock has been acquired - */ -void brcms_msleep(struct brcms_info *wl, uint ms) -{ -	spin_unlock_bh(&wl->lock); -	msleep(ms); -	spin_lock_bh(&wl->lock); -} diff --git a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.h b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.h index 9358bd5ebd3..947ccacf43e 100644 --- a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.h +++ b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.h @@ -68,6 +68,8 @@ struct brcms_info {  	spinlock_t lock;	/* per-device perimeter lock */  	spinlock_t isr_lock;	/* per-device ISR synchronization lock */ +	/* tx flush */ +	wait_queue_head_t tx_flush_wq;  	/* timer related fields */  	atomic_t callbacks;	/* # outstanding callback functions */ @@ -100,7 +102,6 @@ extern struct brcms_timer *brcms_init_timer(struct brcms_info *wl,  extern void brcms_free_timer(struct brcms_timer *timer);  extern void brcms_add_timer(struct brcms_timer *timer, uint ms, int periodic);  extern bool brcms_del_timer(struct brcms_timer *timer); -extern void brcms_msleep(struct brcms_info *wl, uint ms);  extern void brcms_dpc(unsigned long data);  extern void brcms_timer(struct brcms_timer *t);  extern void brcms_fatal_error(struct brcms_info *wl); diff --git a/drivers/net/wireless/brcm80211/brcmsmac/main.c b/drivers/net/wireless/brcm80211/brcmsmac/main.c index 17594de4199..8b5839008af 100644 --- a/drivers/net/wireless/brcm80211/brcmsmac/main.c +++ b/drivers/net/wireless/brcm80211/brcmsmac/main.c @@ -1027,7 +1027,6 @@ brcms_c_dotxstatus(struct brcms_c_info *wlc, struct tx_status *txs)  static bool  brcms_b_txstatus(struct brcms_hardware *wlc_hw, bool bound, bool *fatal)  { -	bool morepending = false;  	struct bcma_device *core;  	struct tx_status txstatus, *txs;  	u32 s1, s2; @@ -1041,23 +1040,20 @@ brcms_b_txstatus(struct brcms_hardware *wlc_hw, bool bound, bool *fatal)  	txs = &txstatus;  	core = wlc_hw->d11core;  	*fatal = false; -	s1 = bcma_read32(core, D11REGOFFS(frmtxstatus)); -	while (!(*fatal) -	       && (s1 & TXS_V)) { -		/* !give others some time to run! */ -		if (n >= max_tx_num) { -			morepending = true; -			break; -		} +	while (n < max_tx_num) { +		s1 = bcma_read32(core, D11REGOFFS(frmtxstatus));  		if (s1 == 0xffffffff) {  			brcms_err(core, "wl%d: %s: dead chip\n", wlc_hw->unit,  				  __func__);  			*fatal = true;  			return false;  		} -		s2 = bcma_read32(core, D11REGOFFS(frmtxstatus2)); +		/* only process when valid */ +		if (!(s1 & TXS_V)) +			break; +		s2 = bcma_read32(core, D11REGOFFS(frmtxstatus2));  		txs->status = s1 & TXS_STATUS_MASK;  		txs->frameid = (s1 & TXS_FID_MASK) >> TXS_FID_SHIFT;  		txs->sequence = s2 & TXS_SEQ_MASK; @@ -1065,15 +1061,12 @@ brcms_b_txstatus(struct brcms_hardware *wlc_hw, bool bound, bool *fatal)  		txs->lasttxtime = 0;  		*fatal = brcms_c_dotxstatus(wlc_hw->wlc, txs); - -		s1 = bcma_read32(core, D11REGOFFS(frmtxstatus)); +		if (*fatal == true) +			return false;  		n++;  	} -	if (*fatal) -		return false; - -	return morepending; +	return n >= max_tx_num;  }  static void brcms_c_tbtt(struct brcms_c_info *wlc) @@ -7518,25 +7511,16 @@ int brcms_c_get_curband(struct brcms_c_info *wlc)  	return wlc->band->bandunit;  } -void brcms_c_wait_for_tx_completion(struct brcms_c_info *wlc, bool drop) +bool brcms_c_tx_flush_completed(struct brcms_c_info *wlc)  { -	int timeout = 20;  	int i;  	/* Kick DMA to send any pending AMPDU */  	for (i = 0; i < ARRAY_SIZE(wlc->hw->di); i++)  		if (wlc->hw->di[i]) -			dma_txflush(wlc->hw->di[i]); - -	/* wait for queue and DMA fifos to run dry */ -	while (brcms_txpktpendtot(wlc) > 0) { -		brcms_msleep(wlc->wl, 1); - -		if (--timeout == 0) -			break; -	} +			dma_kick_tx(wlc->hw->di[i]); -	WARN_ON_ONCE(timeout == 0); +	return !brcms_txpktpendtot(wlc);  }  void brcms_c_set_beacon_listen_interval(struct brcms_c_info *wlc, u8 interval) diff --git a/drivers/net/wireless/brcm80211/brcmsmac/pub.h b/drivers/net/wireless/brcm80211/brcmsmac/pub.h index 4fb2834f4e6..b0f14b7b861 100644 --- a/drivers/net/wireless/brcm80211/brcmsmac/pub.h +++ b/drivers/net/wireless/brcm80211/brcmsmac/pub.h @@ -314,8 +314,6 @@ extern void brcms_c_associate_upd(struct brcms_c_info *wlc, bool state);  extern void brcms_c_scan_start(struct brcms_c_info *wlc);  extern void brcms_c_scan_stop(struct brcms_c_info *wlc);  extern int brcms_c_get_curband(struct brcms_c_info *wlc); -extern void brcms_c_wait_for_tx_completion(struct brcms_c_info *wlc, -					   bool drop);  extern int brcms_c_set_channel(struct brcms_c_info *wlc, u16 channel);  extern int brcms_c_set_rate_limit(struct brcms_c_info *wlc, u16 srl, u16 lrl);  extern void brcms_c_get_current_rateset(struct brcms_c_info *wlc, @@ -332,5 +330,6 @@ extern int brcms_c_set_tx_power(struct brcms_c_info *wlc, int txpwr);  extern int brcms_c_get_tx_power(struct brcms_c_info *wlc);  extern bool brcms_c_check_radio_disabled(struct brcms_c_info *wlc);  extern void brcms_c_mute(struct brcms_c_info *wlc, bool on); +extern bool brcms_c_tx_flush_completed(struct brcms_c_info *wlc);  #endif				/* _BRCM_PUB_H_ */ diff --git a/drivers/net/wireless/iwlwifi/dvm/tx.c b/drivers/net/wireless/iwlwifi/dvm/tx.c index 31534f7c054..279796419ea 100644 --- a/drivers/net/wireless/iwlwifi/dvm/tx.c +++ b/drivers/net/wireless/iwlwifi/dvm/tx.c @@ -1153,6 +1153,13 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,  			next_reclaimed = ssn;  		} +		if (tid != IWL_TID_NON_QOS) { +			priv->tid_data[sta_id][tid].next_reclaimed = +				next_reclaimed; +			IWL_DEBUG_TX_REPLY(priv, "Next reclaimed packet:%d\n", +						  next_reclaimed); +		} +  		iwl_trans_reclaim(priv->trans, txq_id, ssn, &skbs);  		iwlagn_check_ratid_empty(priv, sta_id, tid); @@ -1203,28 +1210,11 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,  			if (!is_agg)  				iwlagn_non_agg_tx_status(priv, ctx, hdr->addr1); -			/* -			 * W/A for FW bug - the seq_ctl isn't updated when the -			 * queues are flushed. Fetch it from the packet itself -			 */ -			if (!is_agg && status == TX_STATUS_FAIL_FIFO_FLUSHED) { -				next_reclaimed = le16_to_cpu(hdr->seq_ctrl); -				next_reclaimed = -					SEQ_TO_SN(next_reclaimed + 0x10); -			} -  			is_offchannel_skb =  				(info->flags & IEEE80211_TX_CTL_TX_OFFCHAN);  			freed++;  		} -		if (tid != IWL_TID_NON_QOS) { -			priv->tid_data[sta_id][tid].next_reclaimed = -				next_reclaimed; -			IWL_DEBUG_TX_REPLY(priv, "Next reclaimed packet:%d\n", -					   next_reclaimed); -		} -  		WARN_ON(!is_agg && freed != 1);  		/* diff --git a/drivers/net/wireless/mwifiex/scan.c b/drivers/net/wireless/mwifiex/scan.c index 9189a32b784..973a9d90e9e 100644 --- a/drivers/net/wireless/mwifiex/scan.c +++ b/drivers/net/wireless/mwifiex/scan.c @@ -1563,7 +1563,7 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,  		dev_err(adapter->dev, "SCAN_RESP: too many AP returned (%d)\n",  			scan_rsp->number_of_sets);  		ret = -1; -		goto done; +		goto check_next_scan;  	}  	bytes_left = le16_to_cpu(scan_rsp->bss_descript_size); @@ -1634,7 +1634,8 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,  		if (!beacon_size || beacon_size > bytes_left) {  			bss_info += bytes_left;  			bytes_left = 0; -			return -1; +			ret = -1; +			goto check_next_scan;  		}  		/* Initialize the current working beacon pointer for this BSS @@ -1690,7 +1691,7 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,  				dev_err(priv->adapter->dev,  					"%s: bytes left < IE length\n",  					__func__); -				goto done; +				goto check_next_scan;  			}  			if (element_id == WLAN_EID_DS_PARAMS) {  				channel = *(current_ptr + sizeof(struct ieee_types_header)); @@ -1753,6 +1754,7 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,  		}  	} +check_next_scan:  	spin_lock_irqsave(&adapter->scan_pending_q_lock, flags);  	if (list_empty(&adapter->scan_pending_q)) {  		spin_unlock_irqrestore(&adapter->scan_pending_q_lock, flags); @@ -1813,7 +1815,6 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,  		}  	} -done:  	return ret;  } diff --git a/drivers/net/wireless/rtlwifi/base.c b/drivers/net/wireless/rtlwifi/base.c index 4494d130b37..0f8b05185ed 100644 --- a/drivers/net/wireless/rtlwifi/base.c +++ b/drivers/net/wireless/rtlwifi/base.c @@ -1004,7 +1004,8 @@ u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx)  					 is_tx ? "Tx" : "Rx");  				if (is_tx) { -					rtl_lps_leave(hw); +					schedule_work(&rtlpriv-> +						      works.lps_leave_work);  					ppsc->last_delaylps_stamp_jiffies =  					    jiffies;  				} @@ -1014,7 +1015,7 @@ u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx)  		}  	} else if (ETH_P_ARP == ether_type) {  		if (is_tx) { -			rtl_lps_leave(hw); +			schedule_work(&rtlpriv->works.lps_leave_work);  			ppsc->last_delaylps_stamp_jiffies = jiffies;  		} @@ -1024,7 +1025,7 @@ u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx)  			 "802.1X %s EAPOL pkt!!\n", is_tx ? "Tx" : "Rx");  		if (is_tx) { -			rtl_lps_leave(hw); +			schedule_work(&rtlpriv->works.lps_leave_work);  			ppsc->last_delaylps_stamp_jiffies = jiffies;  		} diff --git a/drivers/net/wireless/rtlwifi/usb.c b/drivers/net/wireless/rtlwifi/usb.c index f2ecdeb3a90..1535efda3d5 100644 --- a/drivers/net/wireless/rtlwifi/usb.c +++ b/drivers/net/wireless/rtlwifi/usb.c @@ -542,8 +542,8 @@ static void _rtl_rx_pre_process(struct ieee80211_hw *hw, struct sk_buff *skb)  	WARN_ON(skb_queue_empty(&rx_queue));  	while (!skb_queue_empty(&rx_queue)) {  		_skb = skb_dequeue(&rx_queue); -		_rtl_usb_rx_process_agg(hw, skb); -		ieee80211_rx_irqsafe(hw, skb); +		_rtl_usb_rx_process_agg(hw, _skb); +		ieee80211_rx_irqsafe(hw, _skb);  	}  } diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h index 94b79c3338c..9d7f1723dd8 100644 --- a/drivers/net/xen-netback/common.h +++ b/drivers/net/xen-netback/common.h @@ -151,6 +151,9 @@ void xen_netbk_queue_tx_skb(struct xenvif *vif, struct sk_buff *skb);  /* Notify xenvif that ring now has space to send an skb to the frontend */  void xenvif_notify_tx_completion(struct xenvif *vif); +/* Prevent the device from generating any further traffic. */ +void xenvif_carrier_off(struct xenvif *vif); +  /* Returns number of ring slots required to send an skb to the frontend */  unsigned int xen_netbk_count_skb_slots(struct xenvif *vif, struct sk_buff *skb); diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c index b7d41f8c338..b8c5193bd42 100644 --- a/drivers/net/xen-netback/interface.c +++ b/drivers/net/xen-netback/interface.c @@ -343,17 +343,22 @@ err:  	return err;  } -void xenvif_disconnect(struct xenvif *vif) +void xenvif_carrier_off(struct xenvif *vif)  {  	struct net_device *dev = vif->dev; -	if (netif_carrier_ok(dev)) { -		rtnl_lock(); -		netif_carrier_off(dev); /* discard queued packets */ -		if (netif_running(dev)) -			xenvif_down(vif); -		rtnl_unlock(); -		xenvif_put(vif); -	} + +	rtnl_lock(); +	netif_carrier_off(dev); /* discard queued packets */ +	if (netif_running(dev)) +		xenvif_down(vif); +	rtnl_unlock(); +	xenvif_put(vif); +} + +void xenvif_disconnect(struct xenvif *vif) +{ +	if (netif_carrier_ok(vif->dev)) +		xenvif_carrier_off(vif);  	atomic_dec(&vif->refcnt);  	wait_event(vif->waiting_to_free, atomic_read(&vif->refcnt) == 0); diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c index f2d6b78d901..2b9520c46e9 100644 --- a/drivers/net/xen-netback/netback.c +++ b/drivers/net/xen-netback/netback.c @@ -147,7 +147,8 @@ void xen_netbk_remove_xenvif(struct xenvif *vif)  	atomic_dec(&netbk->netfront_count);  } -static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx); +static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx, +				  u8 status);  static void make_tx_response(struct xenvif *vif,  			     struct xen_netif_tx_request *txp,  			     s8       st); @@ -879,7 +880,7 @@ static void netbk_tx_err(struct xenvif *vif,  	do {  		make_tx_response(vif, txp, XEN_NETIF_RSP_ERROR); -		if (cons >= end) +		if (cons == end)  			break;  		txp = RING_GET_REQUEST(&vif->tx, cons++);  	} while (1); @@ -888,6 +889,13 @@ static void netbk_tx_err(struct xenvif *vif,  	xenvif_put(vif);  } +static void netbk_fatal_tx_err(struct xenvif *vif) +{ +	netdev_err(vif->dev, "fatal error; disabling device\n"); +	xenvif_carrier_off(vif); +	xenvif_put(vif); +} +  static int netbk_count_requests(struct xenvif *vif,  				struct xen_netif_tx_request *first,  				struct xen_netif_tx_request *txp, @@ -901,19 +909,22 @@ static int netbk_count_requests(struct xenvif *vif,  	do {  		if (frags >= work_to_do) { -			netdev_dbg(vif->dev, "Need more frags\n"); +			netdev_err(vif->dev, "Need more frags\n"); +			netbk_fatal_tx_err(vif);  			return -frags;  		}  		if (unlikely(frags >= MAX_SKB_FRAGS)) { -			netdev_dbg(vif->dev, "Too many frags\n"); +			netdev_err(vif->dev, "Too many frags\n"); +			netbk_fatal_tx_err(vif);  			return -frags;  		}  		memcpy(txp, RING_GET_REQUEST(&vif->tx, cons + frags),  		       sizeof(*txp));  		if (txp->size > first->size) { -			netdev_dbg(vif->dev, "Frags galore\n"); +			netdev_err(vif->dev, "Frag is bigger than frame.\n"); +			netbk_fatal_tx_err(vif);  			return -frags;  		} @@ -921,8 +932,9 @@ static int netbk_count_requests(struct xenvif *vif,  		frags++;  		if (unlikely((txp->offset + txp->size) > PAGE_SIZE)) { -			netdev_dbg(vif->dev, "txp->offset: %x, size: %u\n", +			netdev_err(vif->dev, "txp->offset: %x, size: %u\n",  				 txp->offset, txp->size); +			netbk_fatal_tx_err(vif);  			return -frags;  		}  	} while ((txp++)->flags & XEN_NETTXF_more_data); @@ -966,7 +978,7 @@ static struct gnttab_copy *xen_netbk_get_requests(struct xen_netbk *netbk,  		pending_idx = netbk->pending_ring[index];  		page = xen_netbk_alloc_page(netbk, skb, pending_idx);  		if (!page) -			return NULL; +			goto err;  		gop->source.u.ref = txp->gref;  		gop->source.domid = vif->domid; @@ -988,6 +1000,17 @@ static struct gnttab_copy *xen_netbk_get_requests(struct xen_netbk *netbk,  	}  	return gop; +err: +	/* Unwind, freeing all pages and sending error responses. */ +	while (i-- > start) { +		xen_netbk_idx_release(netbk, frag_get_pending_idx(&frags[i]), +				      XEN_NETIF_RSP_ERROR); +	} +	/* The head too, if necessary. */ +	if (start) +		xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_ERROR); + +	return NULL;  }  static int xen_netbk_tx_check_gop(struct xen_netbk *netbk, @@ -996,30 +1019,20 @@ static int xen_netbk_tx_check_gop(struct xen_netbk *netbk,  {  	struct gnttab_copy *gop = *gopp;  	u16 pending_idx = *((u16 *)skb->data); -	struct pending_tx_info *pending_tx_info = netbk->pending_tx_info; -	struct xenvif *vif = pending_tx_info[pending_idx].vif; -	struct xen_netif_tx_request *txp;  	struct skb_shared_info *shinfo = skb_shinfo(skb);  	int nr_frags = shinfo->nr_frags;  	int i, err, start;  	/* Check status of header. */  	err = gop->status; -	if (unlikely(err)) { -		pending_ring_idx_t index; -		index = pending_index(netbk->pending_prod++); -		txp = &pending_tx_info[pending_idx].req; -		make_tx_response(vif, txp, XEN_NETIF_RSP_ERROR); -		netbk->pending_ring[index] = pending_idx; -		xenvif_put(vif); -	} +	if (unlikely(err)) +		xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_ERROR);  	/* Skip first skb fragment if it is on same page as header fragment. */  	start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx);  	for (i = start; i < nr_frags; i++) {  		int j, newerr; -		pending_ring_idx_t index;  		pending_idx = frag_get_pending_idx(&shinfo->frags[i]); @@ -1028,16 +1041,12 @@ static int xen_netbk_tx_check_gop(struct xen_netbk *netbk,  		if (likely(!newerr)) {  			/* Had a previous error? Invalidate this fragment. */  			if (unlikely(err)) -				xen_netbk_idx_release(netbk, pending_idx); +				xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY);  			continue;  		}  		/* Error on this fragment: respond to client with an error. */ -		txp = &netbk->pending_tx_info[pending_idx].req; -		make_tx_response(vif, txp, XEN_NETIF_RSP_ERROR); -		index = pending_index(netbk->pending_prod++); -		netbk->pending_ring[index] = pending_idx; -		xenvif_put(vif); +		xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_ERROR);  		/* Not the first error? Preceding frags already invalidated. */  		if (err) @@ -1045,10 +1054,10 @@ static int xen_netbk_tx_check_gop(struct xen_netbk *netbk,  		/* First error: invalidate header and preceding fragments. */  		pending_idx = *((u16 *)skb->data); -		xen_netbk_idx_release(netbk, pending_idx); +		xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY);  		for (j = start; j < i; j++) {  			pending_idx = frag_get_pending_idx(&shinfo->frags[j]); -			xen_netbk_idx_release(netbk, pending_idx); +			xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY);  		}  		/* Remember the error: invalidate all subsequent fragments. */ @@ -1082,7 +1091,7 @@ static void xen_netbk_fill_frags(struct xen_netbk *netbk, struct sk_buff *skb)  		/* Take an extra reference to offset xen_netbk_idx_release */  		get_page(netbk->mmap_pages[pending_idx]); -		xen_netbk_idx_release(netbk, pending_idx); +		xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY);  	}  } @@ -1095,7 +1104,8 @@ static int xen_netbk_get_extras(struct xenvif *vif,  	do {  		if (unlikely(work_to_do-- <= 0)) { -			netdev_dbg(vif->dev, "Missing extra info\n"); +			netdev_err(vif->dev, "Missing extra info\n"); +			netbk_fatal_tx_err(vif);  			return -EBADR;  		} @@ -1104,8 +1114,9 @@ static int xen_netbk_get_extras(struct xenvif *vif,  		if (unlikely(!extra.type ||  			     extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) {  			vif->tx.req_cons = ++cons; -			netdev_dbg(vif->dev, +			netdev_err(vif->dev,  				   "Invalid extra type: %d\n", extra.type); +			netbk_fatal_tx_err(vif);  			return -EINVAL;  		} @@ -1121,13 +1132,15 @@ static int netbk_set_skb_gso(struct xenvif *vif,  			     struct xen_netif_extra_info *gso)  {  	if (!gso->u.gso.size) { -		netdev_dbg(vif->dev, "GSO size must not be zero.\n"); +		netdev_err(vif->dev, "GSO size must not be zero.\n"); +		netbk_fatal_tx_err(vif);  		return -EINVAL;  	}  	/* Currently only TCPv4 S.O. is supported. */  	if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4) { -		netdev_dbg(vif->dev, "Bad GSO type %d.\n", gso->u.gso.type); +		netdev_err(vif->dev, "Bad GSO type %d.\n", gso->u.gso.type); +		netbk_fatal_tx_err(vif);  		return -EINVAL;  	} @@ -1264,9 +1277,25 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)  		/* Get a netif from the list with work to do. */  		vif = poll_net_schedule_list(netbk); +		/* This can sometimes happen because the test of +		 * list_empty(net_schedule_list) at the top of the +		 * loop is unlocked.  Just go back and have another +		 * look. +		 */  		if (!vif)  			continue; +		if (vif->tx.sring->req_prod - vif->tx.req_cons > +		    XEN_NETIF_TX_RING_SIZE) { +			netdev_err(vif->dev, +				   "Impossible number of requests. " +				   "req_prod %d, req_cons %d, size %ld\n", +				   vif->tx.sring->req_prod, vif->tx.req_cons, +				   XEN_NETIF_TX_RING_SIZE); +			netbk_fatal_tx_err(vif); +			continue; +		} +  		RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, work_to_do);  		if (!work_to_do) {  			xenvif_put(vif); @@ -1294,17 +1323,14 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)  			work_to_do = xen_netbk_get_extras(vif, extras,  							  work_to_do);  			idx = vif->tx.req_cons; -			if (unlikely(work_to_do < 0)) { -				netbk_tx_err(vif, &txreq, idx); +			if (unlikely(work_to_do < 0))  				continue; -			}  		}  		ret = netbk_count_requests(vif, &txreq, txfrags, work_to_do); -		if (unlikely(ret < 0)) { -			netbk_tx_err(vif, &txreq, idx - ret); +		if (unlikely(ret < 0))  			continue; -		} +  		idx += ret;  		if (unlikely(txreq.size < ETH_HLEN)) { @@ -1316,11 +1342,11 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)  		/* No crossing a page as the payload mustn't fragment. */  		if (unlikely((txreq.offset + txreq.size) > PAGE_SIZE)) { -			netdev_dbg(vif->dev, +			netdev_err(vif->dev,  				   "txreq.offset: %x, size: %u, end: %lu\n",  				   txreq.offset, txreq.size,  				   (txreq.offset&~PAGE_MASK) + txreq.size); -			netbk_tx_err(vif, &txreq, idx); +			netbk_fatal_tx_err(vif);  			continue;  		} @@ -1348,8 +1374,8 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)  			gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];  			if (netbk_set_skb_gso(vif, skb, gso)) { +				/* Failure in netbk_set_skb_gso is fatal. */  				kfree_skb(skb); -				netbk_tx_err(vif, &txreq, idx);  				continue;  			}  		} @@ -1448,7 +1474,7 @@ static void xen_netbk_tx_submit(struct xen_netbk *netbk)  			txp->size -= data_len;  		} else {  			/* Schedule a response immediately. */ -			xen_netbk_idx_release(netbk, pending_idx); +			xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY);  		}  		if (txp->flags & XEN_NETTXF_csum_blank) @@ -1500,7 +1526,8 @@ static void xen_netbk_tx_action(struct xen_netbk *netbk)  	xen_netbk_tx_submit(netbk);  } -static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx) +static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx, +				  u8 status)  {  	struct xenvif *vif;  	struct pending_tx_info *pending_tx_info; @@ -1514,7 +1541,7 @@ static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx)  	vif = pending_tx_info->vif; -	make_tx_response(vif, &pending_tx_info->req, XEN_NETIF_RSP_OKAY); +	make_tx_response(vif, &pending_tx_info->req, status);  	index = pending_index(netbk->pending_prod++);  	netbk->pending_ring[index] = pending_idx; diff --git a/drivers/pinctrl/Kconfig b/drivers/pinctrl/Kconfig index efaecefe3f8..a5f3c8ca480 100644 --- a/drivers/pinctrl/Kconfig +++ b/drivers/pinctrl/Kconfig @@ -184,8 +184,8 @@ config PINCTRL_SAMSUNG  	select PINMUX  	select PINCONF -config PINCTRL_EXYNOS4 -	bool "Pinctrl driver data for Exynos4 SoC" +config PINCTRL_EXYNOS +	bool "Pinctrl driver data for Samsung EXYNOS SoCs"  	depends on OF && GPIOLIB  	select PINCTRL_SAMSUNG diff --git a/drivers/pinctrl/Makefile b/drivers/pinctrl/Makefile index fc4606f27dc..6e87e52eab5 100644 --- a/drivers/pinctrl/Makefile +++ b/drivers/pinctrl/Makefile @@ -36,7 +36,7 @@ obj-$(CONFIG_PINCTRL_TEGRA30)	+= pinctrl-tegra30.o  obj-$(CONFIG_PINCTRL_U300)	+= pinctrl-u300.o  obj-$(CONFIG_PINCTRL_COH901)	+= pinctrl-coh901.o  obj-$(CONFIG_PINCTRL_SAMSUNG)	+= pinctrl-samsung.o -obj-$(CONFIG_PINCTRL_EXYNOS4)	+= pinctrl-exynos.o +obj-$(CONFIG_PINCTRL_EXYNOS)	+= pinctrl-exynos.o  obj-$(CONFIG_PINCTRL_EXYNOS5440)	+= pinctrl-exynos5440.o  obj-$(CONFIG_PINCTRL_XWAY)	+= pinctrl-xway.o  obj-$(CONFIG_PINCTRL_LANTIQ)	+= pinctrl-lantiq.o diff --git a/drivers/pinctrl/pinctrl-sirf.c b/drivers/pinctrl/pinctrl-sirf.c index 498b2ba905d..d02498b30c6 100644 --- a/drivers/pinctrl/pinctrl-sirf.c +++ b/drivers/pinctrl/pinctrl-sirf.c @@ -1246,6 +1246,22 @@ static void __iomem *sirfsoc_rsc_of_iomap(void)  	return of_iomap(np, 0);  } +static int sirfsoc_gpio_of_xlate(struct gpio_chip *gc, +       const struct of_phandle_args *gpiospec, +       u32 *flags) +{ +       if (gpiospec->args[0] > SIRFSOC_GPIO_NO_OF_BANKS * SIRFSOC_GPIO_BANK_SIZE) +               return -EINVAL; + +       if (gc != &sgpio_bank[gpiospec->args[0] / SIRFSOC_GPIO_BANK_SIZE].chip.gc) +               return -EINVAL; + +       if (flags) +               *flags = gpiospec->args[1]; + +       return gpiospec->args[0] % SIRFSOC_GPIO_BANK_SIZE; +} +  static int sirfsoc_pinmux_probe(struct platform_device *pdev)  {  	int ret; @@ -1736,6 +1752,8 @@ static int sirfsoc_gpio_probe(struct device_node *np)  		bank->chip.gc.ngpio = SIRFSOC_GPIO_BANK_SIZE;  		bank->chip.gc.label = kstrdup(np->full_name, GFP_KERNEL);  		bank->chip.gc.of_node = np; +		bank->chip.gc.of_xlate = sirfsoc_gpio_of_xlate; +		bank->chip.gc.of_gpio_n_cells = 2;  		bank->chip.regs = regs;  		bank->id = i;  		bank->is_marco = is_marco; diff --git a/drivers/regulator/max77686.c b/drivers/regulator/max77686.c index b85040caaea..cca18a3c029 100644 --- a/drivers/regulator/max77686.c +++ b/drivers/regulator/max77686.c @@ -379,9 +379,10 @@ static struct regulator_desc regulators[] = {  };  #ifdef CONFIG_OF -static int max77686_pmic_dt_parse_pdata(struct max77686_dev *iodev, +static int max77686_pmic_dt_parse_pdata(struct platform_device *pdev,  					struct max77686_platform_data *pdata)  { +	struct max77686_dev *iodev = dev_get_drvdata(pdev->dev.parent);  	struct device_node *pmic_np, *regulators_np;  	struct max77686_regulator_data *rdata;  	struct of_regulator_match rmatch; @@ -390,15 +391,15 @@ static int max77686_pmic_dt_parse_pdata(struct max77686_dev *iodev,  	pmic_np = iodev->dev->of_node;  	regulators_np = of_find_node_by_name(pmic_np, "voltage-regulators");  	if (!regulators_np) { -		dev_err(iodev->dev, "could not find regulators sub-node\n"); +		dev_err(&pdev->dev, "could not find regulators sub-node\n");  		return -EINVAL;  	}  	pdata->num_regulators = ARRAY_SIZE(regulators); -	rdata = devm_kzalloc(iodev->dev, sizeof(*rdata) * +	rdata = devm_kzalloc(&pdev->dev, sizeof(*rdata) *  			     pdata->num_regulators, GFP_KERNEL);  	if (!rdata) { -		dev_err(iodev->dev, +		dev_err(&pdev->dev,  			"could not allocate memory for regulator data\n");  		return -ENOMEM;  	} @@ -407,7 +408,7 @@ static int max77686_pmic_dt_parse_pdata(struct max77686_dev *iodev,  		rmatch.name = regulators[i].name;  		rmatch.init_data = NULL;  		rmatch.of_node = NULL; -		of_regulator_match(iodev->dev, regulators_np, &rmatch, 1); +		of_regulator_match(&pdev->dev, regulators_np, &rmatch, 1);  		rdata[i].initdata = rmatch.init_data;  		rdata[i].of_node = rmatch.of_node;  	} @@ -417,7 +418,7 @@ static int max77686_pmic_dt_parse_pdata(struct max77686_dev *iodev,  	return 0;  }  #else -static int max77686_pmic_dt_parse_pdata(struct max77686_dev *iodev, +static int max77686_pmic_dt_parse_pdata(struct platform_device *pdev,  					struct max77686_platform_data *pdata)  {  	return 0; @@ -440,7 +441,7 @@ static int max77686_pmic_probe(struct platform_device *pdev)  	}  	if (iodev->dev->of_node) { -		ret = max77686_pmic_dt_parse_pdata(iodev, pdata); +		ret = max77686_pmic_dt_parse_pdata(pdev, pdata);  		if (ret)  			return ret;  	} diff --git a/drivers/regulator/max8907-regulator.c b/drivers/regulator/max8907-regulator.c index d1a77512d83..d40cf7fdb54 100644 --- a/drivers/regulator/max8907-regulator.c +++ b/drivers/regulator/max8907-regulator.c @@ -237,8 +237,7 @@ static int max8907_regulator_parse_dt(struct platform_device *pdev)  		return -EINVAL;  	} -	ret = of_regulator_match(pdev->dev.parent, regulators, -				 max8907_matches, +	ret = of_regulator_match(&pdev->dev, regulators, max8907_matches,  				 ARRAY_SIZE(max8907_matches));  	if (ret < 0) {  		dev_err(&pdev->dev, "Error parsing regulator init data: %d\n", diff --git a/drivers/regulator/max8997.c b/drivers/regulator/max8997.c index 02be7fcae32..836908ce505 100644 --- a/drivers/regulator/max8997.c +++ b/drivers/regulator/max8997.c @@ -934,7 +934,7 @@ static struct regulator_desc regulators[] = {  };  #ifdef CONFIG_OF -static int max8997_pmic_dt_parse_dvs_gpio(struct max8997_dev *iodev, +static int max8997_pmic_dt_parse_dvs_gpio(struct platform_device *pdev,  			struct max8997_platform_data *pdata,  			struct device_node *pmic_np)  { @@ -944,7 +944,7 @@ static int max8997_pmic_dt_parse_dvs_gpio(struct max8997_dev *iodev,  		gpio = of_get_named_gpio(pmic_np,  					"max8997,pmic-buck125-dvs-gpios", i);  		if (!gpio_is_valid(gpio)) { -			dev_err(iodev->dev, "invalid gpio[%d]: %d\n", i, gpio); +			dev_err(&pdev->dev, "invalid gpio[%d]: %d\n", i, gpio);  			return -EINVAL;  		}  		pdata->buck125_gpios[i] = gpio; @@ -952,22 +952,23 @@ static int max8997_pmic_dt_parse_dvs_gpio(struct max8997_dev *iodev,  	return 0;  } -static int max8997_pmic_dt_parse_pdata(struct max8997_dev *iodev, +static int max8997_pmic_dt_parse_pdata(struct platform_device *pdev,  					struct max8997_platform_data *pdata)  { +	struct max8997_dev *iodev = dev_get_drvdata(pdev->dev.parent);  	struct device_node *pmic_np, *regulators_np, *reg_np;  	struct max8997_regulator_data *rdata;  	unsigned int i, dvs_voltage_nr = 1, ret;  	pmic_np = iodev->dev->of_node;  	if (!pmic_np) { -		dev_err(iodev->dev, "could not find pmic sub-node\n"); +		dev_err(&pdev->dev, "could not find pmic sub-node\n");  		return -ENODEV;  	}  	regulators_np = of_find_node_by_name(pmic_np, "regulators");  	if (!regulators_np) { -		dev_err(iodev->dev, "could not find regulators sub-node\n"); +		dev_err(&pdev->dev, "could not find regulators sub-node\n");  		return -EINVAL;  	} @@ -976,11 +977,10 @@ static int max8997_pmic_dt_parse_pdata(struct max8997_dev *iodev,  	for_each_child_of_node(regulators_np, reg_np)  		pdata->num_regulators++; -	rdata = devm_kzalloc(iodev->dev, sizeof(*rdata) * +	rdata = devm_kzalloc(&pdev->dev, sizeof(*rdata) *  				pdata->num_regulators, GFP_KERNEL);  	if (!rdata) { -		dev_err(iodev->dev, "could not allocate memory for " -						"regulator data\n"); +		dev_err(&pdev->dev, "could not allocate memory for regulator data\n");  		return -ENOMEM;  	} @@ -991,14 +991,14 @@ static int max8997_pmic_dt_parse_pdata(struct max8997_dev *iodev,  				break;  		if (i == ARRAY_SIZE(regulators)) { -			dev_warn(iodev->dev, "don't know how to configure " -				"regulator %s\n", reg_np->name); +			dev_warn(&pdev->dev, "don't know how to configure regulator %s\n", +				 reg_np->name);  			continue;  		}  		rdata->id = i; -		rdata->initdata = of_get_regulator_init_data( -						iodev->dev, reg_np); +		rdata->initdata = of_get_regulator_init_data(&pdev->dev, +							     reg_np);  		rdata->reg_node = reg_np;  		rdata++;  	} @@ -1014,7 +1014,7 @@ static int max8997_pmic_dt_parse_pdata(struct max8997_dev *iodev,  	if (pdata->buck1_gpiodvs || pdata->buck2_gpiodvs ||  						pdata->buck5_gpiodvs) { -		ret = max8997_pmic_dt_parse_dvs_gpio(iodev, pdata, pmic_np); +		ret = max8997_pmic_dt_parse_dvs_gpio(pdev, pdata, pmic_np);  		if (ret)  			return -EINVAL; @@ -1025,8 +1025,7 @@ static int max8997_pmic_dt_parse_pdata(struct max8997_dev *iodev,  		} else {  			if (pdata->buck125_default_idx >= 8) {  				pdata->buck125_default_idx = 0; -				dev_info(iodev->dev, "invalid value for " -				"default dvs index, using 0 instead\n"); +				dev_info(&pdev->dev, "invalid value for default dvs index, using 0 instead\n");  			}  		} @@ -1040,28 +1039,28 @@ static int max8997_pmic_dt_parse_pdata(struct max8997_dev *iodev,  	if (of_property_read_u32_array(pmic_np,  				"max8997,pmic-buck1-dvs-voltage",  				pdata->buck1_voltage, dvs_voltage_nr)) { -		dev_err(iodev->dev, "buck1 voltages not specified\n"); +		dev_err(&pdev->dev, "buck1 voltages not specified\n");  		return -EINVAL;  	}  	if (of_property_read_u32_array(pmic_np,  				"max8997,pmic-buck2-dvs-voltage",  				pdata->buck2_voltage, dvs_voltage_nr)) { -		dev_err(iodev->dev, "buck2 voltages not specified\n"); +		dev_err(&pdev->dev, "buck2 voltages not specified\n");  		return -EINVAL;  	}  	if (of_property_read_u32_array(pmic_np,  				"max8997,pmic-buck5-dvs-voltage",  				pdata->buck5_voltage, dvs_voltage_nr)) { -		dev_err(iodev->dev, "buck5 voltages not specified\n"); +		dev_err(&pdev->dev, "buck5 voltages not specified\n");  		return -EINVAL;  	}  	return 0;  }  #else -static int max8997_pmic_dt_parse_pdata(struct max8997_dev *iodev, +static int max8997_pmic_dt_parse_pdata(struct platform_device *pdev,  					struct max8997_platform_data *pdata)  {  	return 0; @@ -1085,7 +1084,7 @@ static int max8997_pmic_probe(struct platform_device *pdev)  	}  	if (iodev->dev->of_node) { -		ret = max8997_pmic_dt_parse_pdata(iodev, pdata); +		ret = max8997_pmic_dt_parse_pdata(pdev, pdata);  		if (ret)  			return ret;  	} diff --git a/drivers/regulator/max8998.c b/drivers/regulator/max8998.c index 1f0df4046b8..0a8dd1cbee6 100644 --- a/drivers/regulator/max8998.c +++ b/drivers/regulator/max8998.c @@ -65,7 +65,7 @@ static const struct voltage_map_desc ldo9_voltage_map_desc = {  	.min = 2800000,	.step = 100000,	.max = 3100000,  };  static const struct voltage_map_desc ldo10_voltage_map_desc = { -	.min = 95000,	.step = 50000,	.max = 1300000, +	.min = 950000,	.step = 50000,	.max = 1300000,  };  static const struct voltage_map_desc ldo1213_voltage_map_desc = {  	.min = 800000,	.step = 100000,	.max = 3300000, diff --git a/drivers/regulator/of_regulator.c b/drivers/regulator/of_regulator.c index 6f684916fd7..66ca769287a 100644 --- a/drivers/regulator/of_regulator.c +++ b/drivers/regulator/of_regulator.c @@ -120,6 +120,12 @@ int of_regulator_match(struct device *dev, struct device_node *node,  	if (!dev || !node)  		return -EINVAL; +	for (i = 0; i < num_matches; i++) { +		struct of_regulator_match *match = &matches[i]; +		match->init_data = NULL; +		match->of_node = NULL; +	} +  	for_each_child_of_node(node, child) {  		name = of_get_property(child,  					"regulator-compatible", NULL); diff --git a/drivers/regulator/s2mps11.c b/drivers/regulator/s2mps11.c index bd062a2ffbe..cd9ea2ea182 100644 --- a/drivers/regulator/s2mps11.c +++ b/drivers/regulator/s2mps11.c @@ -174,9 +174,9 @@ static struct regulator_ops s2mps11_buck_ops = {  	.min_uV		= S2MPS11_BUCK_MIN2,			\  	.uV_step	= S2MPS11_BUCK_STEP2,			\  	.n_voltages	= S2MPS11_BUCK_N_VOLTAGES,		\ -	.vsel_reg	= S2MPS11_REG_B9CTRL2,			\ +	.vsel_reg	= S2MPS11_REG_B10CTRL2,			\  	.vsel_mask	= S2MPS11_BUCK_VSEL_MASK,		\ -	.enable_reg	= S2MPS11_REG_B9CTRL1,			\ +	.enable_reg	= S2MPS11_REG_B10CTRL1,			\  	.enable_mask	= S2MPS11_ENABLE_MASK			\  } diff --git a/drivers/regulator/tps65217-regulator.c b/drivers/regulator/tps65217-regulator.c index 73dce766412..df395187c06 100644 --- a/drivers/regulator/tps65217-regulator.c +++ b/drivers/regulator/tps65217-regulator.c @@ -305,8 +305,8 @@ static struct tps65217_board *tps65217_parse_dt(struct platform_device *pdev)  	if (!regs)  		return NULL; -	count = of_regulator_match(pdev->dev.parent, regs, -				reg_matches, TPS65217_NUM_REGULATOR); +	count = of_regulator_match(&pdev->dev, regs, reg_matches, +				   TPS65217_NUM_REGULATOR);  	of_node_put(regs);  	if ((count < 0) || (count > TPS65217_NUM_REGULATOR))  		return NULL; diff --git a/drivers/regulator/tps65910-regulator.c b/drivers/regulator/tps65910-regulator.c index 59c3770fa77..b0e4c0bc85c 100644 --- a/drivers/regulator/tps65910-regulator.c +++ b/drivers/regulator/tps65910-regulator.c @@ -998,7 +998,7 @@ static struct tps65910_board *tps65910_parse_dt_reg_data(  		return NULL;  	} -	ret = of_regulator_match(pdev->dev.parent, regulators, matches, count); +	ret = of_regulator_match(&pdev->dev, regulators, matches, count);  	if (ret < 0) {  		dev_err(&pdev->dev, "Error parsing regulator init data: %d\n",  			ret); diff --git a/drivers/rtc/rtc-isl1208.c b/drivers/rtc/rtc-isl1208.c index afb7cfa85cc..c016ad81767 100644 --- a/drivers/rtc/rtc-isl1208.c +++ b/drivers/rtc/rtc-isl1208.c @@ -506,6 +506,7 @@ isl1208_rtc_interrupt(int irq, void *data)  {  	unsigned long timeout = jiffies + msecs_to_jiffies(1000);  	struct i2c_client *client = data; +	struct rtc_device *rtc = i2c_get_clientdata(client);  	int handled = 0, sr, err;  	/* @@ -528,6 +529,8 @@ isl1208_rtc_interrupt(int irq, void *data)  	if (sr & ISL1208_REG_SR_ALM) {  		dev_dbg(&client->dev, "alarm!\n"); +		rtc_update_irq(rtc, 1, RTC_IRQF | RTC_AF); +  		/* Clear the alarm */  		sr &= ~ISL1208_REG_SR_ALM;  		sr = i2c_smbus_write_byte_data(client, ISL1208_REG_SR, sr); diff --git a/drivers/rtc/rtc-pl031.c b/drivers/rtc/rtc-pl031.c index 08378e3cc21..10c1a3454e4 100644 --- a/drivers/rtc/rtc-pl031.c +++ b/drivers/rtc/rtc-pl031.c @@ -44,6 +44,7 @@  #define RTC_YMR		0x34	/* Year match register */  #define RTC_YLR		0x38	/* Year data load register */ +#define RTC_CR_EN	(1 << 0)	/* counter enable bit */  #define RTC_CR_CWEN	(1 << 26)	/* Clockwatch enable bit */  #define RTC_TCR_EN	(1 << 1) /* Periodic timer enable bit */ @@ -320,7 +321,7 @@ static int pl031_probe(struct amba_device *adev, const struct amba_id *id)  	struct pl031_local *ldata;  	struct pl031_vendor_data *vendor = id->data;  	struct rtc_class_ops *ops = &vendor->ops; -	unsigned long time; +	unsigned long time, data;  	ret = amba_request_regions(adev, NULL);  	if (ret) @@ -345,10 +346,11 @@ static int pl031_probe(struct amba_device *adev, const struct amba_id *id)  	dev_dbg(&adev->dev, "designer ID = 0x%02x\n", amba_manf(adev));  	dev_dbg(&adev->dev, "revision = 0x%01x\n", amba_rev(adev)); +	data = readl(ldata->base + RTC_CR);  	/* Enable the clockwatch on ST Variants */  	if (vendor->clockwatch) -		writel(readl(ldata->base + RTC_CR) | RTC_CR_CWEN, -		       ldata->base + RTC_CR); +		data |= RTC_CR_CWEN; +	writel(data | RTC_CR_EN, ldata->base + RTC_CR);  	/*  	 * On ST PL031 variants, the RTC reset value does not provide correct diff --git a/drivers/rtc/rtc-vt8500.c b/drivers/rtc/rtc-vt8500.c index 00c930f4b6f..2730533e2d2 100644 --- a/drivers/rtc/rtc-vt8500.c +++ b/drivers/rtc/rtc-vt8500.c @@ -137,7 +137,7 @@ static int vt8500_rtc_set_time(struct device *dev, struct rtc_time *tm)  		return -EINVAL;  	} -	writel((bin2bcd(tm->tm_year - 100) << DATE_YEAR_S) +	writel((bin2bcd(tm->tm_year % 100) << DATE_YEAR_S)  		| (bin2bcd(tm->tm_mon + 1) << DATE_MONTH_S)  		| (bin2bcd(tm->tm_mday))  		| ((tm->tm_year >= 200) << DATE_CENTURY_S), diff --git a/drivers/ssb/driver_gpio.c b/drivers/ssb/driver_gpio.c index 97ac0a38e3d..eb2753008ef 100644 --- a/drivers/ssb/driver_gpio.c +++ b/drivers/ssb/driver_gpio.c @@ -174,3 +174,15 @@ int ssb_gpio_init(struct ssb_bus *bus)  	return -1;  } + +int ssb_gpio_unregister(struct ssb_bus *bus) +{ +	if (ssb_chipco_available(&bus->chipco) || +	    ssb_extif_available(&bus->extif)) { +		return gpiochip_remove(&bus->gpio); +	} else { +		SSB_WARN_ON(1); +	} + +	return -1; +} diff --git a/drivers/ssb/main.c b/drivers/ssb/main.c index 772ad9b5c30..24dc331b470 100644 --- a/drivers/ssb/main.c +++ b/drivers/ssb/main.c @@ -443,6 +443,15 @@ static void ssb_devices_unregister(struct ssb_bus *bus)  void ssb_bus_unregister(struct ssb_bus *bus)  { +	int err; + +	err = ssb_gpio_unregister(bus); +	if (err == -EBUSY) +		ssb_dprintk(KERN_ERR PFX "Some GPIOs are still in use.\n"); +	else if (err) +		ssb_dprintk(KERN_ERR PFX +			    "Can not unregister GPIO driver: %i\n", err); +  	ssb_buses_lock();  	ssb_devices_unregister(bus);  	list_del(&bus->list); diff --git a/drivers/ssb/ssb_private.h b/drivers/ssb/ssb_private.h index 6c10b66c796..da38305a2d2 100644 --- a/drivers/ssb/ssb_private.h +++ b/drivers/ssb/ssb_private.h @@ -252,11 +252,16 @@ static inline void ssb_extif_init(struct ssb_extif *extif)  #ifdef CONFIG_SSB_DRIVER_GPIO  extern int ssb_gpio_init(struct ssb_bus *bus); +extern int ssb_gpio_unregister(struct ssb_bus *bus);  #else /* CONFIG_SSB_DRIVER_GPIO */  static inline int ssb_gpio_init(struct ssb_bus *bus)  {  	return -ENOTSUPP;  } +static inline int ssb_gpio_unregister(struct ssb_bus *bus) +{ +	return 0; +}  #endif /* CONFIG_SSB_DRIVER_GPIO */  #endif /* LINUX_SSB_PRIVATE_H_ */ diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c index e2695101bb9..f2aa7543d20 100644 --- a/drivers/target/target_core_device.c +++ b/drivers/target/target_core_device.c @@ -941,6 +941,8 @@ int se_dev_set_queue_depth(struct se_device *dev, u32 queue_depth)  int se_dev_set_fabric_max_sectors(struct se_device *dev, u32 fabric_max_sectors)  { +	int block_size = dev->dev_attrib.block_size; +  	if (dev->export_count) {  		pr_err("dev[%p]: Unable to change SE Device"  			" fabric_max_sectors while export_count is %d\n", @@ -978,8 +980,12 @@ int se_dev_set_fabric_max_sectors(struct se_device *dev, u32 fabric_max_sectors)  	/*  	 * Align max_sectors down to PAGE_SIZE to follow transport_allocate_data_tasks()  	 */ +	if (!block_size) { +		block_size = 512; +		pr_warn("Defaulting to 512 for zero block_size\n"); +	}  	fabric_max_sectors = se_dev_align_max_sectors(fabric_max_sectors, -						      dev->dev_attrib.block_size); +						      block_size);  	dev->dev_attrib.fabric_max_sectors = fabric_max_sectors;  	pr_debug("dev[%p]: SE Device max_sectors changed to %u\n", diff --git a/drivers/target/target_core_fabric_configfs.c b/drivers/target/target_core_fabric_configfs.c index 810263dfa4a..c57bbbc7a7d 100644 --- a/drivers/target/target_core_fabric_configfs.c +++ b/drivers/target/target_core_fabric_configfs.c @@ -754,6 +754,11 @@ static int target_fabric_port_link(  		return -EFAULT;  	} +	if (!(dev->dev_flags & DF_CONFIGURED)) { +		pr_err("se_device not configured yet, cannot port link\n"); +		return -ENODEV; +	} +  	tpg_ci = &lun_ci->ci_parent->ci_group->cg_item;  	se_tpg = container_of(to_config_group(tpg_ci),  				struct se_portal_group, tpg_group); diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c index 26a6d183ccb..a664c664a31 100644 --- a/drivers/target/target_core_sbc.c +++ b/drivers/target/target_core_sbc.c @@ -58,11 +58,10 @@ sbc_emulate_readcapacity(struct se_cmd *cmd)  	buf[7] = dev->dev_attrib.block_size & 0xff;  	rbuf = transport_kmap_data_sg(cmd); -	if (!rbuf) -		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; - -	memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length)); -	transport_kunmap_data_sg(cmd); +	if (rbuf) { +		memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length)); +		transport_kunmap_data_sg(cmd); +	}  	target_complete_cmd(cmd, GOOD);  	return 0; @@ -97,11 +96,10 @@ sbc_emulate_readcapacity_16(struct se_cmd *cmd)  		buf[14] = 0x80;  	rbuf = transport_kmap_data_sg(cmd); -	if (!rbuf) -		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; - -	memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length)); -	transport_kunmap_data_sg(cmd); +	if (rbuf) { +		memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length)); +		transport_kunmap_data_sg(cmd); +	}  	target_complete_cmd(cmd, GOOD);  	return 0; diff --git a/drivers/target/target_core_spc.c b/drivers/target/target_core_spc.c index 84f9e96e8ac..2d88f087d96 100644 --- a/drivers/target/target_core_spc.c +++ b/drivers/target/target_core_spc.c @@ -641,11 +641,10 @@ spc_emulate_inquiry(struct se_cmd *cmd)  out:  	rbuf = transport_kmap_data_sg(cmd); -	if (!rbuf) -		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; - -	memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length)); -	transport_kunmap_data_sg(cmd); +	if (rbuf) { +		memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length)); +		transport_kunmap_data_sg(cmd); +	}  	if (!ret)  		target_complete_cmd(cmd, GOOD); @@ -851,7 +850,7 @@ static sense_reason_t spc_emulate_modesense(struct se_cmd *cmd)  {  	struct se_device *dev = cmd->se_dev;  	char *cdb = cmd->t_task_cdb; -	unsigned char *buf, *map_buf; +	unsigned char buf[SE_MODE_PAGE_BUF], *rbuf;  	int type = dev->transport->get_device_type(dev);  	int ten = (cmd->t_task_cdb[0] == MODE_SENSE_10);  	bool dbd = !!(cdb[1] & 0x08); @@ -863,26 +862,8 @@ static sense_reason_t spc_emulate_modesense(struct se_cmd *cmd)  	int ret;  	int i; -	map_buf = transport_kmap_data_sg(cmd); -	if (!map_buf) -		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; -	/* -	 * If SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC is not set, then we -	 * know we actually allocated a full page.  Otherwise, if the -	 * data buffer is too small, allocate a temporary buffer so we -	 * don't have to worry about overruns in all our INQUIRY -	 * emulation handling. -	 */ -	if (cmd->data_length < SE_MODE_PAGE_BUF && -	    (cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC)) { -		buf = kzalloc(SE_MODE_PAGE_BUF, GFP_KERNEL); -		if (!buf) { -			transport_kunmap_data_sg(cmd); -			return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; -		} -	} else { -		buf = map_buf; -	} +	memset(buf, 0, SE_MODE_PAGE_BUF); +  	/*  	 * Skip over MODE DATA LENGTH + MEDIUM TYPE fields to byte 3 for  	 * MODE_SENSE_10 and byte 2 for MODE_SENSE (6). @@ -934,8 +915,6 @@ static sense_reason_t spc_emulate_modesense(struct se_cmd *cmd)  	if (page == 0x3f) {  		if (subpage != 0x00 && subpage != 0xff) {  			pr_warn("MODE_SENSE: Invalid subpage code: 0x%02x\n", subpage); -			kfree(buf); -			transport_kunmap_data_sg(cmd);  			return TCM_INVALID_CDB_FIELD;  		} @@ -972,7 +951,6 @@ static sense_reason_t spc_emulate_modesense(struct se_cmd *cmd)  		pr_err("MODE SENSE: unimplemented page/subpage: 0x%02x/0x%02x\n",  		       page, subpage); -	transport_kunmap_data_sg(cmd);  	return TCM_UNKNOWN_MODE_PAGE;  set_length: @@ -981,12 +959,12 @@ set_length:  	else  		buf[0] = length - 1; -	if (buf != map_buf) { -		memcpy(map_buf, buf, cmd->data_length); -		kfree(buf); +	rbuf = transport_kmap_data_sg(cmd); +	if (rbuf) { +		memcpy(rbuf, buf, min_t(u32, SE_MODE_PAGE_BUF, cmd->data_length)); +		transport_kunmap_data_sg(cmd);  	} -	transport_kunmap_data_sg(cmd);  	target_complete_cmd(cmd, GOOD);  	return 0;  } diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c index 4225d5e7213..8e64adf8e4d 100644 --- a/drivers/usb/core/hcd.c +++ b/drivers/usb/core/hcd.c @@ -39,6 +39,7 @@  #include <asm/unaligned.h>  #include <linux/platform_device.h>  #include <linux/workqueue.h> +#include <linux/pm_runtime.h>  #include <linux/usb.h>  #include <linux/usb/hcd.h> @@ -1025,6 +1026,49 @@ static int register_root_hub(struct usb_hcd *hcd)  	return retval;  } +/* + * usb_hcd_start_port_resume - a root-hub port is sending a resume signal + * @bus: the bus which the root hub belongs to + * @portnum: the port which is being resumed + * + * HCDs should call this function when they know that a resume signal is + * being sent to a root-hub port.  The root hub will be prevented from + * going into autosuspend until usb_hcd_end_port_resume() is called. + * + * The bus's private lock must be held by the caller. + */ +void usb_hcd_start_port_resume(struct usb_bus *bus, int portnum) +{ +	unsigned bit = 1 << portnum; + +	if (!(bus->resuming_ports & bit)) { +		bus->resuming_ports |= bit; +		pm_runtime_get_noresume(&bus->root_hub->dev); +	} +} +EXPORT_SYMBOL_GPL(usb_hcd_start_port_resume); + +/* + * usb_hcd_end_port_resume - a root-hub port has stopped sending a resume signal + * @bus: the bus which the root hub belongs to + * @portnum: the port which is being resumed + * + * HCDs should call this function when they know that a resume signal has + * stopped being sent to a root-hub port.  The root hub will be allowed to + * autosuspend again. + * + * The bus's private lock must be held by the caller. + */ +void usb_hcd_end_port_resume(struct usb_bus *bus, int portnum) +{ +	unsigned bit = 1 << portnum; + +	if (bus->resuming_ports & bit) { +		bus->resuming_ports &= ~bit; +		pm_runtime_put_noidle(&bus->root_hub->dev); +	} +} +EXPORT_SYMBOL_GPL(usb_hcd_end_port_resume);  /*-------------------------------------------------------------------------*/ diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c index 957ed2c4148..cbf7168e3ce 100644 --- a/drivers/usb/core/hub.c +++ b/drivers/usb/core/hub.c @@ -2838,6 +2838,23 @@ void usb_enable_ltm(struct usb_device *udev)  EXPORT_SYMBOL_GPL(usb_enable_ltm);  #ifdef	CONFIG_USB_SUSPEND +/* + * usb_disable_function_remotewakeup - disable usb3.0 + * device's function remote wakeup + * @udev: target device + * + * Assume there's only one function on the USB 3.0 + * device and disable remote wake for the first + * interface. FIXME if the interface association + * descriptor shows there's more than one function. + */ +static int usb_disable_function_remotewakeup(struct usb_device *udev) +{ +	return usb_control_msg(udev, usb_sndctrlpipe(udev, 0), +				USB_REQ_CLEAR_FEATURE, USB_RECIP_INTERFACE, +				USB_INTRF_FUNC_SUSPEND,	0, NULL, 0, +				USB_CTRL_SET_TIMEOUT); +}  /*   * usb_port_suspend - suspend a usb device's upstream port @@ -2955,12 +2972,19 @@ int usb_port_suspend(struct usb_device *udev, pm_message_t msg)  		dev_dbg(hub->intfdev, "can't suspend port %d, status %d\n",  				port1, status);  		/* paranoia:  "should not happen" */ -		if (udev->do_remote_wakeup) -			(void) usb_control_msg(udev, usb_sndctrlpipe(udev, 0), -				USB_REQ_CLEAR_FEATURE, USB_RECIP_DEVICE, -				USB_DEVICE_REMOTE_WAKEUP, 0, -				NULL, 0, -				USB_CTRL_SET_TIMEOUT); +		if (udev->do_remote_wakeup) { +			if (!hub_is_superspeed(hub->hdev)) { +				(void) usb_control_msg(udev, +						usb_sndctrlpipe(udev, 0), +						USB_REQ_CLEAR_FEATURE, +						USB_RECIP_DEVICE, +						USB_DEVICE_REMOTE_WAKEUP, 0, +						NULL, 0, +						USB_CTRL_SET_TIMEOUT); +			} else +				(void) usb_disable_function_remotewakeup(udev); + +		}  		/* Try to enable USB2 hardware LPM again */  		if (udev->usb2_hw_lpm_capable == 1) @@ -3052,20 +3076,30 @@ static int finish_port_resume(struct usb_device *udev)  	 * udev->reset_resume  	 */  	} else if (udev->actconfig && !udev->reset_resume) { -		le16_to_cpus(&devstatus); -		if (devstatus & (1 << USB_DEVICE_REMOTE_WAKEUP)) { -			status = usb_control_msg(udev, -					usb_sndctrlpipe(udev, 0), -					USB_REQ_CLEAR_FEATURE, +		if (!hub_is_superspeed(udev->parent)) { +			le16_to_cpus(&devstatus); +			if (devstatus & (1 << USB_DEVICE_REMOTE_WAKEUP)) +				status = usb_control_msg(udev, +						usb_sndctrlpipe(udev, 0), +						USB_REQ_CLEAR_FEATURE,  						USB_RECIP_DEVICE, -					USB_DEVICE_REMOTE_WAKEUP, 0, -					NULL, 0, -					USB_CTRL_SET_TIMEOUT); -			if (status) -				dev_dbg(&udev->dev, -					"disable remote wakeup, status %d\n", -					status); +						USB_DEVICE_REMOTE_WAKEUP, 0, +						NULL, 0, +						USB_CTRL_SET_TIMEOUT); +		} else { +			status = usb_get_status(udev, USB_RECIP_INTERFACE, 0, +					&devstatus); +			le16_to_cpus(&devstatus); +			if (!status && devstatus & (USB_INTRF_STAT_FUNC_RW_CAP +					| USB_INTRF_STAT_FUNC_RW)) +				status = +					usb_disable_function_remotewakeup(udev);  		} + +		if (status) +			dev_dbg(&udev->dev, +				"disable remote wakeup, status %d\n", +				status);  		status = 0;  	}  	return status; diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c index 09537b2f100..b416a3fc995 100644 --- a/drivers/usb/host/ehci-hcd.c +++ b/drivers/usb/host/ehci-hcd.c @@ -797,6 +797,7 @@ static irqreturn_t ehci_irq (struct usb_hcd *hcd)  			ehci->reset_done[i] = jiffies + msecs_to_jiffies(25);  			set_bit(i, &ehci->resuming_ports);  			ehci_dbg (ehci, "port %d remote wakeup\n", i + 1); +			usb_hcd_start_port_resume(&hcd->self, i);  			mod_timer(&hcd->rh_timer, ehci->reset_done[i]);  		}  	} diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c index 4ccb97c0678..4d3b294f203 100644 --- a/drivers/usb/host/ehci-hub.c +++ b/drivers/usb/host/ehci-hub.c @@ -649,7 +649,11 @@ ehci_hub_status_data (struct usb_hcd *hcd, char *buf)  			status = STS_PCD;  		}  	} -	/* FIXME autosuspend idle root hubs */ + +	/* If a resume is in progress, make sure it can finish */ +	if (ehci->resuming_ports) +		mod_timer(&hcd->rh_timer, jiffies + msecs_to_jiffies(25)); +  	spin_unlock_irqrestore (&ehci->lock, flags);  	return status ? retval : 0;  } @@ -851,6 +855,7 @@ static int ehci_hub_control (  				/* resume signaling for 20 msec */  				ehci->reset_done[wIndex] = jiffies  						+ msecs_to_jiffies(20); +				usb_hcd_start_port_resume(&hcd->self, wIndex);  				/* check the port again */  				mod_timer(&ehci_to_hcd(ehci)->rh_timer,  						ehci->reset_done[wIndex]); @@ -862,6 +867,7 @@ static int ehci_hub_control (  				clear_bit(wIndex, &ehci->suspended_ports);  				set_bit(wIndex, &ehci->port_c_suspend);  				ehci->reset_done[wIndex] = 0; +				usb_hcd_end_port_resume(&hcd->self, wIndex);  				/* stop resume signaling */  				temp = ehci_readl(ehci, status_reg); @@ -950,6 +956,7 @@ static int ehci_hub_control (  			ehci->reset_done[wIndex] = 0;  			if (temp & PORT_PE)  				set_bit(wIndex, &ehci->port_c_suspend); +			usb_hcd_end_port_resume(&hcd->self, wIndex);  		}  		if (temp & PORT_OC) diff --git a/drivers/usb/host/ehci-q.c b/drivers/usb/host/ehci-q.c index 3d989028c83..fd252f0cfb3 100644 --- a/drivers/usb/host/ehci-q.c +++ b/drivers/usb/host/ehci-q.c @@ -1197,17 +1197,26 @@ static void start_iaa_cycle(struct ehci_hcd *ehci, bool nested)  	if (ehci->async_iaa || ehci->async_unlinking)  		return; -	/* Do all the waiting QHs at once */ -	ehci->async_iaa = ehci->async_unlink; -	ehci->async_unlink = NULL; -  	/* If the controller isn't running, we don't have to wait for it */  	if (unlikely(ehci->rh_state < EHCI_RH_RUNNING)) { + +		/* Do all the waiting QHs */ +		ehci->async_iaa = ehci->async_unlink; +		ehci->async_unlink = NULL; +  		if (!nested)		/* Avoid recursion */  			end_unlink_async(ehci);  	/* Otherwise start a new IAA cycle */  	} else if (likely(ehci->rh_state == EHCI_RH_RUNNING)) { +		struct ehci_qh		*qh; + +		/* Do only the first waiting QH (nVidia bug?) */ +		qh = ehci->async_unlink; +		ehci->async_iaa = qh; +		ehci->async_unlink = qh->unlink_next; +		qh->unlink_next = NULL; +  		/* Make sure the unlinks are all visible to the hardware */  		wmb(); @@ -1255,34 +1264,35 @@ static void end_unlink_async(struct ehci_hcd *ehci)  	}  } +static void start_unlink_async(struct ehci_hcd *ehci, struct ehci_qh *qh); +  static void unlink_empty_async(struct ehci_hcd *ehci)  { -	struct ehci_qh		*qh, *next; -	bool			stopped = (ehci->rh_state < EHCI_RH_RUNNING); +	struct ehci_qh		*qh; +	struct ehci_qh		*qh_to_unlink = NULL;  	bool			check_unlinks_later = false; +	int			count = 0; -	/* Unlink all the async QHs that have been empty for a timer cycle */ -	next = ehci->async->qh_next.qh; -	while (next) { -		qh = next; -		next = qh->qh_next.qh; - +	/* Find the last async QH which has been empty for a timer cycle */ +	for (qh = ehci->async->qh_next.qh; qh; qh = qh->qh_next.qh) {  		if (list_empty(&qh->qtd_list) &&  				qh->qh_state == QH_STATE_LINKED) { -			if (!stopped && qh->unlink_cycle == -					ehci->async_unlink_cycle) +			++count; +			if (qh->unlink_cycle == ehci->async_unlink_cycle)  				check_unlinks_later = true;  			else -				single_unlink_async(ehci, qh); +				qh_to_unlink = qh;  		}  	} -	/* Start a new IAA cycle if any QHs are waiting for it */ -	if (ehci->async_unlink) -		start_iaa_cycle(ehci, false); +	/* If nothing else is being unlinked, unlink the last empty QH */ +	if (!ehci->async_iaa && !ehci->async_unlink && qh_to_unlink) { +		start_unlink_async(ehci, qh_to_unlink); +		--count; +	} -	/* QHs that haven't been empty for long enough will be handled later */ -	if (check_unlinks_later) { +	/* Other QHs will be handled later */ +	if (count > 0) {  		ehci_enable_event(ehci, EHCI_HRTIMER_ASYNC_UNLINKS, true);  		++ehci->async_unlink_cycle;  	} diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c index 69ebee73c0c..b476daf49f6 100644 --- a/drivers/usb/host/ehci-sched.c +++ b/drivers/usb/host/ehci-sched.c @@ -213,7 +213,7 @@ static inline unsigned char tt_start_uframe(struct ehci_hcd *ehci, __hc32 mask)  }  static const unsigned char -max_tt_usecs[] = { 125, 125, 125, 125, 125, 125, 30, 0 }; +max_tt_usecs[] = { 125, 125, 125, 125, 125, 125, 125, 25 };  /* carryover low/fullspeed bandwidth that crosses uframe boundries */  static inline void carryover_tt_bandwidth(unsigned short tt_usecs[8]) @@ -2212,11 +2212,11 @@ static void scan_isoc(struct ehci_hcd *ehci)  	}  	ehci->now_frame = now_frame; +	frame = ehci->last_iso_frame;  	for (;;) {  		union ehci_shadow	q, *q_p;  		__hc32			type, *hw_p; -		frame = ehci->last_iso_frame;  restart:  		/* scan each element in frame's queue for completions */  		q_p = &ehci->pshadow [frame]; @@ -2321,6 +2321,9 @@ restart:  		/* Stop when we have reached the current frame */  		if (frame == now_frame)  			break; -		ehci->last_iso_frame = (frame + 1) & fmask; + +		/* The last frame may still have active siTDs */ +		ehci->last_iso_frame = frame; +		frame = (frame + 1) & fmask;  	}  } diff --git a/drivers/usb/host/ehci-timer.c b/drivers/usb/host/ehci-timer.c index 20dbdcbe9b0..f904071d70d 100644 --- a/drivers/usb/host/ehci-timer.c +++ b/drivers/usb/host/ehci-timer.c @@ -113,14 +113,15 @@ static void ehci_poll_ASS(struct ehci_hcd *ehci)  	if (want != actual) { -		/* Poll again later, but give up after about 20 ms */ -		if (ehci->ASS_poll_count++ < 20) { -			ehci_enable_event(ehci, EHCI_HRTIMER_POLL_ASS, true); -			return; -		} -		ehci_dbg(ehci, "Waited too long for the async schedule status (%x/%x), giving up\n", -				want, actual); +		/* Poll again later */ +		ehci_enable_event(ehci, EHCI_HRTIMER_POLL_ASS, true); +		++ehci->ASS_poll_count; +		return;  	} + +	if (ehci->ASS_poll_count > 20) +		ehci_dbg(ehci, "ASS poll count reached %d\n", +				ehci->ASS_poll_count);  	ehci->ASS_poll_count = 0;  	/* The status is up-to-date; restart or stop the schedule as needed */ @@ -159,14 +160,14 @@ static void ehci_poll_PSS(struct ehci_hcd *ehci)  	if (want != actual) { -		/* Poll again later, but give up after about 20 ms */ -		if (ehci->PSS_poll_count++ < 20) { -			ehci_enable_event(ehci, EHCI_HRTIMER_POLL_PSS, true); -			return; -		} -		ehci_dbg(ehci, "Waited too long for the periodic schedule status (%x/%x), giving up\n", -				want, actual); +		/* Poll again later */ +		ehci_enable_event(ehci, EHCI_HRTIMER_POLL_PSS, true); +		return;  	} + +	if (ehci->PSS_poll_count > 20) +		ehci_dbg(ehci, "PSS poll count reached %d\n", +				ehci->PSS_poll_count);  	ehci->PSS_poll_count = 0;  	/* The status is up-to-date; restart or stop the schedule as needed */ diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c index a3b6d7104ae..4c338ec03a0 100644 --- a/drivers/usb/host/pci-quirks.c +++ b/drivers/usb/host/pci-quirks.c @@ -780,6 +780,7 @@ void usb_enable_xhci_ports(struct pci_dev *xhci_pdev)  				"defaulting to EHCI.\n");  		dev_warn(&xhci_pdev->dev,  				"USB 3.0 devices will work at USB 2.0 speeds.\n"); +		usb_disable_xhci_ports(xhci_pdev);  		return;  	} diff --git a/drivers/usb/host/uhci-hub.c b/drivers/usb/host/uhci-hub.c index 768d54295a2..15d13229ddb 100644 --- a/drivers/usb/host/uhci-hub.c +++ b/drivers/usb/host/uhci-hub.c @@ -116,6 +116,7 @@ static void uhci_finish_suspend(struct uhci_hcd *uhci, int port,  		}  	}  	clear_bit(port, &uhci->resuming_ports); +	usb_hcd_end_port_resume(&uhci_to_hcd(uhci)->self, port);  }  /* Wait for the UHCI controller in HP's iLO2 server management chip. @@ -167,6 +168,8 @@ static void uhci_check_ports(struct uhci_hcd *uhci)  				set_bit(port, &uhci->resuming_ports);  				uhci->ports_timeout = jiffies +  						msecs_to_jiffies(25); +				usb_hcd_start_port_resume( +						&uhci_to_hcd(uhci)->self, port);  				/* Make sure we see the port again  				 * after the resuming period is over. */ diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index 59fb5c677db..7f76a49e90d 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c @@ -1698,7 +1698,7 @@ static void handle_port_status(struct xhci_hcd *xhci,  				faked_port_index + 1);  		if (slot_id && xhci->devs[slot_id])  			xhci_ring_device(xhci, slot_id); -		if (bus_state->port_remote_wakeup && (1 << faked_port_index)) { +		if (bus_state->port_remote_wakeup & (1 << faked_port_index)) {  			bus_state->port_remote_wakeup &=  				~(1 << faked_port_index);  			xhci_test_and_clear_bit(xhci, port_array, @@ -2589,6 +2589,8 @@ cleanup:  				(trb_comp_code != COMP_STALL &&  					trb_comp_code != COMP_BABBLE))  				xhci_urb_free_priv(xhci, urb_priv); +			else +				kfree(urb_priv);  			usb_hcd_unlink_urb_from_ep(bus_to_hcd(urb->dev->bus), urb);  			if ((urb->actual_length != urb->transfer_buffer_length && @@ -3108,7 +3110,7 @@ static u32 xhci_v1_0_td_remainder(int running_total, int trb_buff_len,  	 * running_total.  	 */  	packets_transferred = (running_total + trb_buff_len) / -		usb_endpoint_maxp(&urb->ep->desc); +		GET_MAX_PACKET(usb_endpoint_maxp(&urb->ep->desc));  	if ((total_packet_count - packets_transferred) > 31)  		return 31 << 17; @@ -3642,7 +3644,8 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,  		td_len = urb->iso_frame_desc[i].length;  		td_remain_len = td_len;  		total_packet_count = DIV_ROUND_UP(td_len, -				usb_endpoint_maxp(&urb->ep->desc)); +				GET_MAX_PACKET( +					usb_endpoint_maxp(&urb->ep->desc)));  		/* A zero-length transfer still involves at least one packet. */  		if (total_packet_count == 0)  			total_packet_count++; @@ -3664,9 +3667,11 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,  		td = urb_priv->td[i];  		for (j = 0; j < trbs_per_td; j++) {  			u32 remainder = 0; -			field = TRB_TBC(burst_count) | TRB_TLBPC(residue); +			field = 0;  			if (first_trb) { +				field = TRB_TBC(burst_count) | +					TRB_TLBPC(residue);  				/* Queue the isoc TRB */  				field |= TRB_TYPE(TRB_ISOC);  				/* Assume URB_ISO_ASAP is set */ diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c index f14736f647f..edc0f0dcad8 100644 --- a/drivers/usb/serial/cp210x.c +++ b/drivers/usb/serial/cp210x.c @@ -60,6 +60,7 @@ static const struct usb_device_id id_table[] = {  	{ USB_DEVICE(0x0FCF, 0x1003) }, /* Dynastream ANT development board */  	{ USB_DEVICE(0x0FCF, 0x1004) }, /* Dynastream ANT2USB */  	{ USB_DEVICE(0x0FCF, 0x1006) }, /* Dynastream ANT development board */ +	{ USB_DEVICE(0x0FDE, 0xCA05) }, /* OWL Wireless Electricity Monitor CM-160 */  	{ USB_DEVICE(0x10A6, 0xAA26) }, /* Knock-off DCU-11 cable */  	{ USB_DEVICE(0x10AB, 0x10C5) }, /* Siemens MC60 Cable */  	{ USB_DEVICE(0x10B5, 0xAC70) }, /* Nokia CA-42 USB */ diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c index ba68835d06a..90ceef1776c 100644 --- a/drivers/usb/serial/ftdi_sio.c +++ b/drivers/usb/serial/ftdi_sio.c @@ -584,6 +584,7 @@ static struct usb_device_id id_table_combined [] = {  	/*  	 * ELV devices:  	 */ +	{ USB_DEVICE(FTDI_ELV_VID, FTDI_ELV_WS300_PID) },  	{ USB_DEVICE(FTDI_VID, FTDI_ELV_USR_PID) },  	{ USB_DEVICE(FTDI_VID, FTDI_ELV_MSM1_PID) },  	{ USB_DEVICE(FTDI_VID, FTDI_ELV_KL100_PID) }, @@ -670,6 +671,7 @@ static struct usb_device_id id_table_combined [] = {  	{ USB_DEVICE(FTDI_VID, XSENS_CONVERTER_5_PID) },  	{ USB_DEVICE(FTDI_VID, XSENS_CONVERTER_6_PID) },  	{ USB_DEVICE(FTDI_VID, XSENS_CONVERTER_7_PID) }, +	{ USB_DEVICE(FTDI_VID, FTDI_OMNI1509) },  	{ USB_DEVICE(MOBILITY_VID, MOBILITY_USB_SERIAL_PID) },  	{ USB_DEVICE(FTDI_VID, FTDI_ACTIVE_ROBOTS_PID) },  	{ USB_DEVICE(FTDI_VID, FTDI_MHAM_KW_PID) }, diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h index fa5d5603827..9d359e189a6 100644 --- a/drivers/usb/serial/ftdi_sio_ids.h +++ b/drivers/usb/serial/ftdi_sio_ids.h @@ -147,6 +147,11 @@  #define XSENS_CONVERTER_6_PID	0xD38E  #define XSENS_CONVERTER_7_PID	0xD38F +/** + * Zolix (www.zolix.com.cb) product ids + */ +#define FTDI_OMNI1509			0xD491	/* Omni1509 embedded USB-serial */ +  /*   * NDI (www.ndigital.com) product ids   */ @@ -204,7 +209,7 @@  /*   * ELV USB devices submitted by Christian Abt of ELV (www.elv.de). - * All of these devices use FTDI's vendor ID (0x0403). + * Almost all of these devices use FTDI's vendor ID (0x0403).   * Further IDs taken from ELV Windows .inf file.   *   * The previously included PID for the UO 100 module was incorrect. @@ -212,6 +217,8 @@   *   * Armin Laeuger originally sent the PID for the UM 100 module.   */ +#define FTDI_ELV_VID	0x1B1F	/* ELV AG */ +#define FTDI_ELV_WS300_PID	0xC006	/* eQ3 WS 300 PC II */  #define FTDI_ELV_USR_PID	0xE000	/* ELV Universal-Sound-Recorder */  #define FTDI_ELV_MSM1_PID	0xE001	/* ELV Mini-Sound-Modul */  #define FTDI_ELV_KL100_PID	0xE002	/* ELV Kfz-Leistungsmesser KL 100 */ diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index 0d9dac9e7f9..567bc77d639 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c @@ -242,6 +242,7 @@ static void option_instat_callback(struct urb *urb);  #define TELIT_PRODUCT_CC864_DUAL		0x1005  #define TELIT_PRODUCT_CC864_SINGLE		0x1006  #define TELIT_PRODUCT_DE910_DUAL		0x1010 +#define TELIT_PRODUCT_LE920			0x1200  /* ZTE PRODUCTS */  #define ZTE_VENDOR_ID				0x19d2 @@ -453,6 +454,10 @@ static void option_instat_callback(struct urb *urb);  #define TPLINK_VENDOR_ID			0x2357  #define TPLINK_PRODUCT_MA180			0x0201 +/* Changhong products */ +#define CHANGHONG_VENDOR_ID			0x2077 +#define CHANGHONG_PRODUCT_CH690			0x7001 +  /* some devices interfaces need special handling due to a number of reasons */  enum option_blacklist_reason {  		OPTION_BLACKLIST_NONE = 0, @@ -534,6 +539,11 @@ static const struct option_blacklist_info zte_1255_blacklist = {  	.reserved = BIT(3) | BIT(4),  }; +static const struct option_blacklist_info telit_le920_blacklist = { +	.sendsetup = BIT(0), +	.reserved = BIT(1) | BIT(5), +}; +  static const struct usb_device_id option_ids[] = {  	{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) },  	{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA) }, @@ -784,6 +794,8 @@ static const struct usb_device_id option_ids[] = {  	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_CC864_DUAL) },  	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_CC864_SINGLE) },  	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_DE910_DUAL) }, +	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920), +		.driver_info = (kernel_ulong_t)&telit_le920_blacklist },  	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 0xff, 0xff) }, /* ZTE WCDMA products */  	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0002, 0xff, 0xff, 0xff),  		.driver_info = (kernel_ulong_t)&net_intf1_blacklist }, @@ -1318,6 +1330,7 @@ static const struct usb_device_id option_ids[] = {  	{ USB_DEVICE(PETATEL_VENDOR_ID, PETATEL_PRODUCT_NP10T) },  	{ USB_DEVICE(TPLINK_VENDOR_ID, TPLINK_PRODUCT_MA180),  	  .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, +	{ USB_DEVICE(CHANGHONG_VENDOR_ID, CHANGHONG_PRODUCT_CH690) },  	{ } /* Terminating entry */  };  MODULE_DEVICE_TABLE(usb, option_ids); diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c index aa148c21ea4..24662547dc5 100644 --- a/drivers/usb/serial/qcserial.c +++ b/drivers/usb/serial/qcserial.c @@ -53,6 +53,7 @@ static const struct usb_device_id id_table[] = {  	{DEVICE_G1K(0x05c6, 0x9221)},	/* Generic Gobi QDL device */  	{DEVICE_G1K(0x05c6, 0x9231)},	/* Generic Gobi QDL device */  	{DEVICE_G1K(0x1f45, 0x0001)},	/* Unknown Gobi QDL device */ +	{DEVICE_G1K(0x1bc7, 0x900e)},	/* Telit Gobi QDL device */  	/* Gobi 2000 devices */  	{USB_DEVICE(0x1410, 0xa010)},	/* Novatel Gobi 2000 QDL device */ diff --git a/drivers/usb/storage/initializers.c b/drivers/usb/storage/initializers.c index 105d900150c..16b0bf055ee 100644 --- a/drivers/usb/storage/initializers.c +++ b/drivers/usb/storage/initializers.c @@ -92,8 +92,8 @@ int usb_stor_ucr61s2b_init(struct us_data *us)  	return 0;  } -/* This places the HUAWEI E220 devices in multi-port mode */ -int usb_stor_huawei_e220_init(struct us_data *us) +/* This places the HUAWEI usb dongles in multi-port mode */ +static int usb_stor_huawei_feature_init(struct us_data *us)  {  	int result; @@ -104,3 +104,75 @@ int usb_stor_huawei_e220_init(struct us_data *us)  	US_DEBUGP("Huawei mode set result is %d\n", result);  	return 0;  } + +/* + * It will send a scsi switch command called rewind' to huawei dongle. + * When the dongle receives this command at the first time, + * it will reboot immediately. After rebooted, it will ignore this command. + * So it is  unnecessary to read its response. + */ +static int usb_stor_huawei_scsi_init(struct us_data *us) +{ +	int result = 0; +	int act_len = 0; +	struct bulk_cb_wrap *bcbw = (struct bulk_cb_wrap *) us->iobuf; +	char rewind_cmd[] = {0x11, 0x06, 0x20, 0x00, 0x00, 0x01, 0x01, 0x00, +			0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}; + +	bcbw->Signature = cpu_to_le32(US_BULK_CB_SIGN); +	bcbw->Tag = 0; +	bcbw->DataTransferLength = 0; +	bcbw->Flags = bcbw->Lun = 0; +	bcbw->Length = sizeof(rewind_cmd); +	memset(bcbw->CDB, 0, sizeof(bcbw->CDB)); +	memcpy(bcbw->CDB, rewind_cmd, sizeof(rewind_cmd)); + +	result = usb_stor_bulk_transfer_buf(us, us->send_bulk_pipe, bcbw, +					US_BULK_CB_WRAP_LEN, &act_len); +	US_DEBUGP("transfer actual length=%d, result=%d\n", act_len, result); +	return result; +} + +/* + * It tries to find the supported Huawei USB dongles. + * In Huawei, they assign the following product IDs + * for all of their mobile broadband dongles, + * including the new dongles in the future. + * So if the product ID is not included in this list, + * it means it is not Huawei's mobile broadband dongles. + */ +static int usb_stor_huawei_dongles_pid(struct us_data *us) +{ +	struct usb_interface_descriptor *idesc; +	int idProduct; + +	idesc = &us->pusb_intf->cur_altsetting->desc; +	idProduct = us->pusb_dev->descriptor.idProduct; +	/* The first port is CDROM, +	 * means the dongle in the single port mode, +	 * and a switch command is required to be sent. */ +	if (idesc && idesc->bInterfaceNumber == 0) { +		if ((idProduct == 0x1001) +			|| (idProduct == 0x1003) +			|| (idProduct == 0x1004) +			|| (idProduct >= 0x1401 && idProduct <= 0x1500) +			|| (idProduct >= 0x1505 && idProduct <= 0x1600) +			|| (idProduct >= 0x1c02 && idProduct <= 0x2202)) { +			return 1; +		} +	} +	return 0; +} + +int usb_stor_huawei_init(struct us_data *us) +{ +	int result = 0; + +	if (usb_stor_huawei_dongles_pid(us)) { +		if (us->pusb_dev->descriptor.idProduct >= 0x1446) +			result = usb_stor_huawei_scsi_init(us); +		else +			result = usb_stor_huawei_feature_init(us); +	} +	return result; +} diff --git a/drivers/usb/storage/initializers.h b/drivers/usb/storage/initializers.h index 529327fbb06..5376d4fc76f 100644 --- a/drivers/usb/storage/initializers.h +++ b/drivers/usb/storage/initializers.h @@ -46,5 +46,5 @@ int usb_stor_euscsi_init(struct us_data *us);   * flash reader */  int usb_stor_ucr61s2b_init(struct us_data *us); -/* This places the HUAWEI E220 devices in multi-port mode */ -int usb_stor_huawei_e220_init(struct us_data *us); +/* This places the HUAWEI usb dongles in multi-port mode */ +int usb_stor_huawei_init(struct us_data *us); diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h index d305a5aa3a5..72923b56bbf 100644 --- a/drivers/usb/storage/unusual_devs.h +++ b/drivers/usb/storage/unusual_devs.h @@ -1527,335 +1527,10 @@ UNUSUAL_DEV(  0x1210, 0x0003, 0x0100, 0x0100,  /* Reported by fangxiaozhi <huananhu@huawei.com>   * This brings the HUAWEI data card devices into multi-port mode   */ -UNUSUAL_DEV(  0x12d1, 0x1001, 0x0000, 0x0000, +UNUSUAL_VENDOR_INTF(0x12d1, 0x08, 0x06, 0x50,  		"HUAWEI MOBILE",  		"Mass Storage", -		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, -		0), -UNUSUAL_DEV(  0x12d1, 0x1003, 0x0000, 0x0000, -		"HUAWEI MOBILE", -		"Mass Storage", -		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, -		0), -UNUSUAL_DEV(  0x12d1, 0x1004, 0x0000, 0x0000, -		"HUAWEI MOBILE", -		"Mass Storage", -		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, -		0), -UNUSUAL_DEV(  0x12d1, 0x1401, 0x0000, 0x0000, -		"HUAWEI MOBILE", -		"Mass Storage", -		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, -		0), -UNUSUAL_DEV(  0x12d1, 0x1402, 0x0000, 0x0000, -		"HUAWEI MOBILE", -		"Mass Storage", -		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, -		0), -UNUSUAL_DEV(  0x12d1, 0x1403, 0x0000, 0x0000, -		"HUAWEI MOBILE", -		"Mass Storage", -		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, -		0), -UNUSUAL_DEV(  0x12d1, 0x1404, 0x0000, 0x0000, -		"HUAWEI MOBILE", -		"Mass Storage", -		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, -		0), -UNUSUAL_DEV(  0x12d1, 0x1405, 0x0000, 0x0000, -		"HUAWEI MOBILE", -		"Mass Storage", -		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, -		0), -UNUSUAL_DEV(  0x12d1, 0x1406, 0x0000, 0x0000, -		"HUAWEI MOBILE", -		"Mass Storage", -		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, -		0), -UNUSUAL_DEV(  0x12d1, 0x1407, 0x0000, 0x0000, -		"HUAWEI MOBILE", -		"Mass Storage", -		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, -		0), -UNUSUAL_DEV(  0x12d1, 0x1408, 0x0000, 0x0000, -		"HUAWEI MOBILE", -		"Mass Storage", -		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, -		0), -UNUSUAL_DEV(  0x12d1, 0x1409, 0x0000, 0x0000, -		"HUAWEI MOBILE", -		"Mass Storage", -		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, -		0), -UNUSUAL_DEV(  0x12d1, 0x140A, 0x0000, 0x0000, -		"HUAWEI MOBILE", -		"Mass Storage", -		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, -		0), -UNUSUAL_DEV(  0x12d1, 0x140B, 0x0000, 0x0000, -		"HUAWEI MOBILE", -		"Mass Storage", -		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, -		0), -UNUSUAL_DEV(  0x12d1, 0x140C, 0x0000, 0x0000, -		"HUAWEI MOBILE", -		"Mass Storage", -		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, -		0), -UNUSUAL_DEV(  0x12d1, 0x140D, 0x0000, 0x0000, -		"HUAWEI MOBILE", -		"Mass Storage", -		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, -		0), -UNUSUAL_DEV(  0x12d1, 0x140E, 0x0000, 0x0000, -		"HUAWEI MOBILE", -		"Mass Storage", -		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, -		0), -UNUSUAL_DEV(  0x12d1, 0x140F, 0x0000, 0x0000, -		"HUAWEI MOBILE", -		"Mass Storage", -		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, -		0), -UNUSUAL_DEV(  0x12d1, 0x1410, 0x0000, 0x0000, -		"HUAWEI MOBILE", -		"Mass Storage", -		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, -		0), -UNUSUAL_DEV(  0x12d1, 0x1411, 0x0000, 0x0000, -		"HUAWEI MOBILE", -		"Mass Storage", -		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, -		0), -UNUSUAL_DEV(  0x12d1, 0x1412, 0x0000, 0x0000, -		"HUAWEI MOBILE", -		"Mass Storage", -		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, -		0), -UNUSUAL_DEV(  0x12d1, 0x1413, 0x0000, 0x0000, -		"HUAWEI MOBILE", -		"Mass Storage", -		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, -		0), -UNUSUAL_DEV(  0x12d1, 0x1414, 0x0000, 0x0000, -		"HUAWEI MOBILE", -		"Mass Storage", -		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, -		0), -UNUSUAL_DEV(  0x12d1, 0x1415, 0x0000, 0x0000, -		"HUAWEI MOBILE", -		"Mass Storage", -		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, -		0), -UNUSUAL_DEV(  0x12d1, 0x1416, 0x0000, 0x0000, -		"HUAWEI MOBILE", -		"Mass Storage", -		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, -		0), -UNUSUAL_DEV(  0x12d1, 0x1417, 0x0000, 0x0000, -		"HUAWEI MOBILE", -		"Mass Storage", -		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, -		0), -UNUSUAL_DEV(  0x12d1, 0x1418, 0x0000, 0x0000, -		"HUAWEI MOBILE", -		"Mass Storage", -		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, -		0), -UNUSUAL_DEV(  0x12d1, 0x1419, 0x0000, 0x0000, -		"HUAWEI MOBILE", -		"Mass Storage", -		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, -		0), -UNUSUAL_DEV(  0x12d1, 0x141A, 0x0000, 0x0000, -		"HUAWEI MOBILE", -		"Mass Storage", -		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, -		0), -UNUSUAL_DEV(  0x12d1, 0x141B, 0x0000, 0x0000, -		"HUAWEI MOBILE", -		"Mass Storage", -		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, -		0), -UNUSUAL_DEV(  0x12d1, 0x141C, 0x0000, 0x0000, -		"HUAWEI MOBILE", -		"Mass Storage", -		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, -		0), -UNUSUAL_DEV(  0x12d1, 0x141D, 0x0000, 0x0000, -		"HUAWEI MOBILE", -		"Mass Storage", -		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, -		0), -UNUSUAL_DEV(  0x12d1, 0x141E, 0x0000, 0x0000, -		"HUAWEI MOBILE", -		"Mass Storage", -		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, -		0), -UNUSUAL_DEV(  0x12d1, 0x141F, 0x0000, 0x0000, -		"HUAWEI MOBILE", -		"Mass Storage", -		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, -		0), -UNUSUAL_DEV(  0x12d1, 0x1420, 0x0000, 0x0000, -		"HUAWEI MOBILE", -		"Mass Storage", -		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, -		0), -UNUSUAL_DEV(  0x12d1, 0x1421, 0x0000, 0x0000, -		"HUAWEI MOBILE", -		"Mass Storage", -		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, -		0), -UNUSUAL_DEV(  0x12d1, 0x1422, 0x0000, 0x0000, -		"HUAWEI MOBILE", -		"Mass Storage", -		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, -		0), -UNUSUAL_DEV(  0x12d1, 0x1423, 0x0000, 0x0000, -		"HUAWEI MOBILE", -		"Mass Storage", -		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, -		0), -UNUSUAL_DEV(  0x12d1, 0x1424, 0x0000, 0x0000, -		"HUAWEI MOBILE", -		"Mass Storage", -		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, -		0), -UNUSUAL_DEV(  0x12d1, 0x1425, 0x0000, 0x0000, -		"HUAWEI MOBILE", -		"Mass Storage", -		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, -		0), -UNUSUAL_DEV(  0x12d1, 0x1426, 0x0000, 0x0000, -		"HUAWEI MOBILE", -		"Mass Storage", -		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, -		0), -UNUSUAL_DEV(  0x12d1, 0x1427, 0x0000, 0x0000, -		"HUAWEI MOBILE", -		"Mass Storage", -		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, -		0), -UNUSUAL_DEV(  0x12d1, 0x1428, 0x0000, 0x0000, -		"HUAWEI MOBILE", -		"Mass Storage", -		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, -		0), -UNUSUAL_DEV(  0x12d1, 0x1429, 0x0000, 0x0000, -		"HUAWEI MOBILE", -		"Mass Storage", -		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, -		0), -UNUSUAL_DEV(  0x12d1, 0x142A, 0x0000, 0x0000, -		"HUAWEI MOBILE", -		"Mass Storage", -		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, -		0), -UNUSUAL_DEV(  0x12d1, 0x142B, 0x0000, 0x0000, -		"HUAWEI MOBILE", -		"Mass Storage", -		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, -		0), -UNUSUAL_DEV(  0x12d1, 0x142C, 0x0000, 0x0000, -		"HUAWEI MOBILE", -		"Mass Storage", -		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, -		0), -UNUSUAL_DEV(  0x12d1, 0x142D, 0x0000, 0x0000, -		"HUAWEI MOBILE", -		"Mass Storage", -		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, -		0), -UNUSUAL_DEV(  0x12d1, 0x142E, 0x0000, 0x0000, -		"HUAWEI MOBILE", -		"Mass Storage", -		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, -		0), -UNUSUAL_DEV(  0x12d1, 0x142F, 0x0000, 0x0000, -		"HUAWEI MOBILE", -		"Mass Storage", -		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, -		0), -UNUSUAL_DEV(  0x12d1, 0x1430, 0x0000, 0x0000, -		"HUAWEI MOBILE", -		"Mass Storage", -		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, -		0), -UNUSUAL_DEV(  0x12d1, 0x1431, 0x0000, 0x0000, -		"HUAWEI MOBILE", -		"Mass Storage", -		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, -		0), -UNUSUAL_DEV(  0x12d1, 0x1432, 0x0000, 0x0000, -		"HUAWEI MOBILE", -		"Mass Storage", -		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, -		0), -UNUSUAL_DEV(  0x12d1, 0x1433, 0x0000, 0x0000, -		"HUAWEI MOBILE", -		"Mass Storage", -		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, -		0), -UNUSUAL_DEV(  0x12d1, 0x1434, 0x0000, 0x0000, -		"HUAWEI MOBILE", -		"Mass Storage", -		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, -		0), -UNUSUAL_DEV(  0x12d1, 0x1435, 0x0000, 0x0000, -		"HUAWEI MOBILE", -		"Mass Storage", -		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, -		0), -UNUSUAL_DEV(  0x12d1, 0x1436, 0x0000, 0x0000, -		"HUAWEI MOBILE", -		"Mass Storage", -		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, -		0), -UNUSUAL_DEV(  0x12d1, 0x1437, 0x0000, 0x0000, -		"HUAWEI MOBILE", -		"Mass Storage", -		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, -		0), -UNUSUAL_DEV(  0x12d1, 0x1438, 0x0000, 0x0000, -		"HUAWEI MOBILE", -		"Mass Storage", -		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, -		0), -UNUSUAL_DEV(  0x12d1, 0x1439, 0x0000, 0x0000, -		"HUAWEI MOBILE", -		"Mass Storage", -		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, -		0), -UNUSUAL_DEV(  0x12d1, 0x143A, 0x0000, 0x0000, -		"HUAWEI MOBILE", -		"Mass Storage", -		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, -		0), -UNUSUAL_DEV(  0x12d1, 0x143B, 0x0000, 0x0000, -		"HUAWEI MOBILE", -		"Mass Storage", -		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, -		0), -UNUSUAL_DEV(  0x12d1, 0x143C, 0x0000, 0x0000, -		"HUAWEI MOBILE", -		"Mass Storage", -		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, -		0), -UNUSUAL_DEV(  0x12d1, 0x143D, 0x0000, 0x0000, -		"HUAWEI MOBILE", -		"Mass Storage", -		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, -		0), -UNUSUAL_DEV(  0x12d1, 0x143E, 0x0000, 0x0000, -		"HUAWEI MOBILE", -		"Mass Storage", -		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, -		0), -UNUSUAL_DEV(  0x12d1, 0x143F, 0x0000, 0x0000, -		"HUAWEI MOBILE", -		"Mass Storage", -		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, +		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_init,  		0),  /* Reported by Vilius Bilinkevicius <vilisas AT xxx DOT lt) */ diff --git a/drivers/usb/storage/usb.c b/drivers/usb/storage/usb.c index 31b3e1a61bb..cf09b6ba71f 100644 --- a/drivers/usb/storage/usb.c +++ b/drivers/usb/storage/usb.c @@ -120,6 +120,17 @@ MODULE_PARM_DESC(quirks, "supplemental list of device IDs and their quirks");  	.useTransport = use_transport,	\  } +#define UNUSUAL_VENDOR_INTF(idVendor, cl, sc, pr, \ +		vendor_name, product_name, use_protocol, use_transport, \ +		init_function, Flags) \ +{ \ +	.vendorName = vendor_name,	\ +	.productName = product_name,	\ +	.useProtocol = use_protocol,	\ +	.useTransport = use_transport,	\ +	.initFunction = init_function,	\ +} +  static struct us_unusual_dev us_unusual_dev_list[] = {  #	include "unusual_devs.h"  	{ }		/* Terminating entry */ @@ -131,6 +142,7 @@ static struct us_unusual_dev for_dynamic_ids =  #undef UNUSUAL_DEV  #undef COMPLIANT_DEV  #undef USUAL_DEV +#undef UNUSUAL_VENDOR_INTF  #ifdef CONFIG_LOCKDEP diff --git a/drivers/usb/storage/usual-tables.c b/drivers/usb/storage/usual-tables.c index b78a526910f..5ef8ce74aae 100644 --- a/drivers/usb/storage/usual-tables.c +++ b/drivers/usb/storage/usual-tables.c @@ -41,6 +41,20 @@  #define USUAL_DEV(useProto, useTrans) \  { USB_INTERFACE_INFO(USB_CLASS_MASS_STORAGE, useProto, useTrans) } +/* Define the device is matched with Vendor ID and interface descriptors */ +#define UNUSUAL_VENDOR_INTF(id_vendor, cl, sc, pr, \ +			vendorName, productName, useProtocol, useTransport, \ +			initFunction, flags) \ +{ \ +	.match_flags = USB_DEVICE_ID_MATCH_INT_INFO \ +				| USB_DEVICE_ID_MATCH_VENDOR, \ +	.idVendor    = (id_vendor), \ +	.bInterfaceClass = (cl), \ +	.bInterfaceSubClass = (sc), \ +	.bInterfaceProtocol = (pr), \ +	.driver_info = (flags) \ +} +  struct usb_device_id usb_storage_usb_ids[] = {  #	include "unusual_devs.h"  	{ }		/* Terminating entry */ @@ -50,6 +64,7 @@ MODULE_DEVICE_TABLE(usb, usb_storage_usb_ids);  #undef UNUSUAL_DEV  #undef COMPLIANT_DEV  #undef USUAL_DEV +#undef UNUSUAL_VENDOR_INTF  /*   * The table of devices to ignore diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c index ebd08b21b23..959b1cd89e6 100644 --- a/drivers/vhost/net.c +++ b/drivers/vhost/net.c @@ -165,12 +165,16 @@ static void tx_poll_stop(struct vhost_net *net)  }  /* Caller must have TX VQ lock */ -static void tx_poll_start(struct vhost_net *net, struct socket *sock) +static int tx_poll_start(struct vhost_net *net, struct socket *sock)  { +	int ret; +  	if (unlikely(net->tx_poll_state != VHOST_NET_POLL_STOPPED)) -		return; -	vhost_poll_start(net->poll + VHOST_NET_VQ_TX, sock->file); -	net->tx_poll_state = VHOST_NET_POLL_STARTED; +		return 0; +	ret = vhost_poll_start(net->poll + VHOST_NET_VQ_TX, sock->file); +	if (!ret) +		net->tx_poll_state = VHOST_NET_POLL_STARTED; +	return ret;  }  /* In case of DMA done not in order in lower device driver for some reason. @@ -642,20 +646,23 @@ static void vhost_net_disable_vq(struct vhost_net *n,  		vhost_poll_stop(n->poll + VHOST_NET_VQ_RX);  } -static void vhost_net_enable_vq(struct vhost_net *n, +static int vhost_net_enable_vq(struct vhost_net *n,  				struct vhost_virtqueue *vq)  {  	struct socket *sock; +	int ret;  	sock = rcu_dereference_protected(vq->private_data,  					 lockdep_is_held(&vq->mutex));  	if (!sock) -		return; +		return 0;  	if (vq == n->vqs + VHOST_NET_VQ_TX) {  		n->tx_poll_state = VHOST_NET_POLL_STOPPED; -		tx_poll_start(n, sock); +		ret = tx_poll_start(n, sock);  	} else -		vhost_poll_start(n->poll + VHOST_NET_VQ_RX, sock->file); +		ret = vhost_poll_start(n->poll + VHOST_NET_VQ_RX, sock->file); + +	return ret;  }  static struct socket *vhost_net_stop_vq(struct vhost_net *n, @@ -827,15 +834,18 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd)  			r = PTR_ERR(ubufs);  			goto err_ubufs;  		} -		oldubufs = vq->ubufs; -		vq->ubufs = ubufs; +  		vhost_net_disable_vq(n, vq);  		rcu_assign_pointer(vq->private_data, sock); -		vhost_net_enable_vq(n, vq); -  		r = vhost_init_used(vq);  		if (r) -			goto err_vq; +			goto err_used; +		r = vhost_net_enable_vq(n, vq); +		if (r) +			goto err_used; + +		oldubufs = vq->ubufs; +		vq->ubufs = ubufs;  		n->tx_packets = 0;  		n->tx_zcopy_err = 0; @@ -859,6 +869,11 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd)  	mutex_unlock(&n->dev.mutex);  	return 0; +err_used: +	rcu_assign_pointer(vq->private_data, oldsock); +	vhost_net_enable_vq(n, vq); +	if (ubufs) +		vhost_ubuf_put_and_wait(ubufs);  err_ubufs:  	fput(sock->file);  err_vq: diff --git a/drivers/vhost/tcm_vhost.c b/drivers/vhost/tcm_vhost.c index b20df5c829f..22321cf84fb 100644 --- a/drivers/vhost/tcm_vhost.c +++ b/drivers/vhost/tcm_vhost.c @@ -575,10 +575,8 @@ static void vhost_scsi_handle_vq(struct vhost_scsi *vs)  	/* Must use ioctl VHOST_SCSI_SET_ENDPOINT */  	tv_tpg = vs->vs_tpg; -	if (unlikely(!tv_tpg)) { -		pr_err("%s endpoint not set\n", __func__); +	if (unlikely(!tv_tpg))  		return; -	}  	mutex_lock(&vq->mutex);  	vhost_disable_notify(&vs->dev, vq); diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c index 34389f75fe6..9759249e6d9 100644 --- a/drivers/vhost/vhost.c +++ b/drivers/vhost/vhost.c @@ -77,26 +77,38 @@ void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn,  	init_poll_funcptr(&poll->table, vhost_poll_func);  	poll->mask = mask;  	poll->dev = dev; +	poll->wqh = NULL;  	vhost_work_init(&poll->work, fn);  }  /* Start polling a file. We add ourselves to file's wait queue. The caller must   * keep a reference to a file until after vhost_poll_stop is called. */ -void vhost_poll_start(struct vhost_poll *poll, struct file *file) +int vhost_poll_start(struct vhost_poll *poll, struct file *file)  {  	unsigned long mask; +	int ret = 0;  	mask = file->f_op->poll(file, &poll->table);  	if (mask)  		vhost_poll_wakeup(&poll->wait, 0, 0, (void *)mask); +	if (mask & POLLERR) { +		if (poll->wqh) +			remove_wait_queue(poll->wqh, &poll->wait); +		ret = -EINVAL; +	} + +	return ret;  }  /* Stop polling a file. After this function returns, it becomes safe to drop the   * file reference. You must also flush afterwards. */  void vhost_poll_stop(struct vhost_poll *poll)  { -	remove_wait_queue(poll->wqh, &poll->wait); +	if (poll->wqh) { +		remove_wait_queue(poll->wqh, &poll->wait); +		poll->wqh = NULL; +	}  }  static bool vhost_work_seq_done(struct vhost_dev *dev, struct vhost_work *work, @@ -792,7 +804,7 @@ long vhost_vring_ioctl(struct vhost_dev *d, int ioctl, void __user *argp)  		fput(filep);  	if (pollstart && vq->handle_kick) -		vhost_poll_start(&vq->poll, vq->kick); +		r = vhost_poll_start(&vq->poll, vq->kick);  	mutex_unlock(&vq->mutex); diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h index 2639c58b23a..17261e277c0 100644 --- a/drivers/vhost/vhost.h +++ b/drivers/vhost/vhost.h @@ -42,7 +42,7 @@ void vhost_work_queue(struct vhost_dev *dev, struct vhost_work *work);  void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn,  		     unsigned long mask, struct vhost_dev *dev); -void vhost_poll_start(struct vhost_poll *poll, struct file *file); +int vhost_poll_start(struct vhost_poll *poll, struct file *file);  void vhost_poll_stop(struct vhost_poll *poll);  void vhost_poll_flush(struct vhost_poll *poll);  void vhost_poll_queue(struct vhost_poll *poll); diff --git a/drivers/xen/events.c b/drivers/xen/events.c index 0be4df39e95..74d77dfa5f6 100644 --- a/drivers/xen/events.c +++ b/drivers/xen/events.c @@ -840,7 +840,7 @@ int bind_evtchn_to_irq(unsigned int evtchn)  	if (irq == -1) {  		irq = xen_allocate_irq_dynamic(); -		if (irq == -1) +		if (irq < 0)  			goto out;  		irq_set_chip_and_handler_name(irq, &xen_dynamic_chip, @@ -944,7 +944,7 @@ int bind_virq_to_irq(unsigned int virq, unsigned int cpu)  	if (irq == -1) {  		irq = xen_allocate_irq_dynamic(); -		if (irq == -1) +		if (irq < 0)  			goto out;  		irq_set_chip_and_handler_name(irq, &xen_percpu_chip, diff --git a/drivers/xen/xen-pciback/pciback_ops.c b/drivers/xen/xen-pciback/pciback_ops.c index 97f5d264c31..37c1f825f51 100644 --- a/drivers/xen/xen-pciback/pciback_ops.c +++ b/drivers/xen/xen-pciback/pciback_ops.c @@ -135,7 +135,6 @@ int xen_pcibk_enable_msi(struct xen_pcibk_device *pdev,  			 struct pci_dev *dev, struct xen_pci_op *op)  {  	struct xen_pcibk_dev_data *dev_data; -	int otherend = pdev->xdev->otherend_id;  	int status;  	if (unlikely(verbose_request)) @@ -144,8 +143,9 @@ int xen_pcibk_enable_msi(struct xen_pcibk_device *pdev,  	status = pci_enable_msi(dev);  	if (status) { -		printk(KERN_ERR "error enable msi for guest %x status %x\n", -			otherend, status); +		pr_warn_ratelimited(DRV_NAME ": %s: error enabling MSI for guest %u: err %d\n", +				    pci_name(dev), pdev->xdev->otherend_id, +				    status);  		op->value = 0;  		return XEN_PCI_ERR_op_failed;  	} @@ -223,10 +223,10 @@ int xen_pcibk_enable_msix(struct xen_pcibk_device *pdev,  						pci_name(dev), i,  						op->msix_entries[i].vector);  		} -	} else { -		printk(KERN_WARNING DRV_NAME ": %s: failed to enable MSI-X: err %d!\n", -			pci_name(dev), result); -	} +	} else +		pr_warn_ratelimited(DRV_NAME ": %s: error enabling MSI-X for guest %u: err %d!\n", +				    pci_name(dev), pdev->xdev->otherend_id, +				    result);  	kfree(entries);  	op->value = result; diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index a8b8adc0507..5a3327b8f90 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -4534,7 +4534,7 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)  	unsigned nr_extents = 0;  	int extra_reserve = 0;  	enum btrfs_reserve_flush_enum flush = BTRFS_RESERVE_FLUSH_ALL; -	int ret; +	int ret = 0;  	bool delalloc_lock = true;  	/* If we are a free space inode we need to not flush since we will be in @@ -4579,20 +4579,18 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)  	csum_bytes = BTRFS_I(inode)->csum_bytes;  	spin_unlock(&BTRFS_I(inode)->lock); -	if (root->fs_info->quota_enabled) { +	if (root->fs_info->quota_enabled)  		ret = btrfs_qgroup_reserve(root, num_bytes +  					   nr_extents * root->leafsize); -		if (ret) { -			spin_lock(&BTRFS_I(inode)->lock); -			calc_csum_metadata_size(inode, num_bytes, 0); -			spin_unlock(&BTRFS_I(inode)->lock); -			if (delalloc_lock) -				mutex_unlock(&BTRFS_I(inode)->delalloc_mutex); -			return ret; -		} -	} -	ret = reserve_metadata_bytes(root, block_rsv, to_reserve, flush); +	/* +	 * ret != 0 here means the qgroup reservation failed, we go straight to +	 * the shared error handling then. +	 */ +	if (ret == 0) +		ret = reserve_metadata_bytes(root, block_rsv, +					     to_reserve, flush); +  	if (ret) {  		u64 to_free = 0;  		unsigned dropped; diff --git a/fs/btrfs/extent_map.c b/fs/btrfs/extent_map.c index 2e8cae63d24..fdb7a8db3b5 100644 --- a/fs/btrfs/extent_map.c +++ b/fs/btrfs/extent_map.c @@ -288,7 +288,8 @@ out:  void clear_em_logging(struct extent_map_tree *tree, struct extent_map *em)  {  	clear_bit(EXTENT_FLAG_LOGGING, &em->flags); -	try_merge_map(tree, em); +	if (em->in_tree) +		try_merge_map(tree, em);  }  /** diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index f76b1fd160d..aeb84469d2c 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -293,15 +293,24 @@ static int __btrfs_run_defrag_inode(struct btrfs_fs_info *fs_info,  	struct btrfs_key key;  	struct btrfs_ioctl_defrag_range_args range;  	int num_defrag; +	int index; +	int ret;  	/* get the inode */  	key.objectid = defrag->root;  	btrfs_set_key_type(&key, BTRFS_ROOT_ITEM_KEY);  	key.offset = (u64)-1; + +	index = srcu_read_lock(&fs_info->subvol_srcu); +  	inode_root = btrfs_read_fs_root_no_name(fs_info, &key);  	if (IS_ERR(inode_root)) { -		kmem_cache_free(btrfs_inode_defrag_cachep, defrag); -		return PTR_ERR(inode_root); +		ret = PTR_ERR(inode_root); +		goto cleanup; +	} +	if (btrfs_root_refs(&inode_root->root_item) == 0) { +		ret = -ENOENT; +		goto cleanup;  	}  	key.objectid = defrag->ino; @@ -309,9 +318,10 @@ static int __btrfs_run_defrag_inode(struct btrfs_fs_info *fs_info,  	key.offset = 0;  	inode = btrfs_iget(fs_info->sb, &key, inode_root, NULL);  	if (IS_ERR(inode)) { -		kmem_cache_free(btrfs_inode_defrag_cachep, defrag); -		return PTR_ERR(inode); +		ret = PTR_ERR(inode); +		goto cleanup;  	} +	srcu_read_unlock(&fs_info->subvol_srcu, index);  	/* do a chunk of defrag */  	clear_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags); @@ -346,6 +356,10 @@ static int __btrfs_run_defrag_inode(struct btrfs_fs_info *fs_info,  	iput(inode);  	return 0; +cleanup: +	srcu_read_unlock(&fs_info->subvol_srcu, index); +	kmem_cache_free(btrfs_inode_defrag_cachep, defrag); +	return ret;  }  /* @@ -1594,9 +1608,10 @@ static ssize_t btrfs_file_aio_write(struct kiocb *iocb,  		if (err < 0 && num_written > 0)  			num_written = err;  	} -out: +  	if (sync)  		atomic_dec(&BTRFS_I(inode)->sync_writers); +out:  	sb_end_write(inode->i_sb);  	current->backing_dev_info = NULL;  	return num_written ? num_written : err; diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index 5b22d45d3c6..338f2597bf7 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -515,7 +515,6 @@ static noinline int create_subvol(struct btrfs_root *root,  	BUG_ON(ret); -	d_instantiate(dentry, btrfs_lookup_dentry(dir, dentry));  fail:  	if (async_transid) {  		*async_transid = trans->transid; @@ -525,6 +524,10 @@ fail:  	}  	if (err && !ret)  		ret = err; + +	if (!ret) +		d_instantiate(dentry, btrfs_lookup_dentry(dir, dentry)); +  	return ret;  } diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c index f1073129704..e5ed5672960 100644 --- a/fs/btrfs/ordered-data.c +++ b/fs/btrfs/ordered-data.c @@ -836,9 +836,16 @@ int btrfs_ordered_update_i_size(struct inode *inode, u64 offset,  	 * if the disk i_size is already at the inode->i_size, or  	 * this ordered extent is inside the disk i_size, we're done  	 */ -	if (disk_i_size == i_size || offset <= disk_i_size) { +	if (disk_i_size == i_size) +		goto out; + +	/* +	 * We still need to update disk_i_size if outstanding_isize is greater +	 * than disk_i_size. +	 */ +	if (offset <= disk_i_size && +	    (!ordered || ordered->outstanding_isize <= disk_i_size))  		goto out; -	}  	/*  	 * walk backward from this ordered extent to disk_i_size. @@ -870,7 +877,7 @@ int btrfs_ordered_update_i_size(struct inode *inode, u64 offset,  			break;  		if (test->file_offset >= i_size)  			break; -		if (test->file_offset >= disk_i_size) { +		if (entry_end(test) > disk_i_size) {  			/*  			 * we don't update disk_i_size now, so record this  			 * undealt i_size. Or we will not know the real diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c index bdbb94f245c..67783e03d12 100644 --- a/fs/btrfs/scrub.c +++ b/fs/btrfs/scrub.c @@ -580,20 +580,29 @@ static int scrub_fixup_readpage(u64 inum, u64 offset, u64 root, void *fixup_ctx)  	int corrected = 0;  	struct btrfs_key key;  	struct inode *inode = NULL; +	struct btrfs_fs_info *fs_info;  	u64 end = offset + PAGE_SIZE - 1;  	struct btrfs_root *local_root; +	int srcu_index;  	key.objectid = root;  	key.type = BTRFS_ROOT_ITEM_KEY;  	key.offset = (u64)-1; -	local_root = btrfs_read_fs_root_no_name(fixup->root->fs_info, &key); -	if (IS_ERR(local_root)) + +	fs_info = fixup->root->fs_info; +	srcu_index = srcu_read_lock(&fs_info->subvol_srcu); + +	local_root = btrfs_read_fs_root_no_name(fs_info, &key); +	if (IS_ERR(local_root)) { +		srcu_read_unlock(&fs_info->subvol_srcu, srcu_index);  		return PTR_ERR(local_root); +	}  	key.type = BTRFS_INODE_ITEM_KEY;  	key.objectid = inum;  	key.offset = 0; -	inode = btrfs_iget(fixup->root->fs_info->sb, &key, local_root, NULL); +	inode = btrfs_iget(fs_info->sb, &key, local_root, NULL); +	srcu_read_unlock(&fs_info->subvol_srcu, srcu_index);  	if (IS_ERR(inode))  		return PTR_ERR(inode); @@ -606,7 +615,6 @@ static int scrub_fixup_readpage(u64 inum, u64 offset, u64 root, void *fixup_ctx)  	}  	if (PageUptodate(page)) { -		struct btrfs_fs_info *fs_info;  		if (PageDirty(page)) {  			/*  			 * we need to write the data to the defect sector. the @@ -3180,18 +3188,25 @@ static int copy_nocow_pages_for_inode(u64 inum, u64 offset, u64 root, void *ctx)  	u64 physical_for_dev_replace;  	u64 len;  	struct btrfs_fs_info *fs_info = nocow_ctx->sctx->dev_root->fs_info; +	int srcu_index;  	key.objectid = root;  	key.type = BTRFS_ROOT_ITEM_KEY;  	key.offset = (u64)-1; + +	srcu_index = srcu_read_lock(&fs_info->subvol_srcu); +  	local_root = btrfs_read_fs_root_no_name(fs_info, &key); -	if (IS_ERR(local_root)) +	if (IS_ERR(local_root)) { +		srcu_read_unlock(&fs_info->subvol_srcu, srcu_index);  		return PTR_ERR(local_root); +	}  	key.type = BTRFS_INODE_ITEM_KEY;  	key.objectid = inum;  	key.offset = 0;  	inode = btrfs_iget(fs_info->sb, &key, local_root, NULL); +	srcu_read_unlock(&fs_info->subvol_srcu, srcu_index);  	if (IS_ERR(inode))  		return PTR_ERR(inode); diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index f15494699f3..fc03aa60b68 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -333,12 +333,14 @@ start_transaction(struct btrfs_root *root, u64 num_items, int type,  					  &root->fs_info->trans_block_rsv,  					  num_bytes, flush);  		if (ret) -			return ERR_PTR(ret); +			goto reserve_fail;  	}  again:  	h = kmem_cache_alloc(btrfs_trans_handle_cachep, GFP_NOFS); -	if (!h) -		return ERR_PTR(-ENOMEM); +	if (!h) { +		ret = -ENOMEM; +		goto alloc_fail; +	}  	/*  	 * If we are JOIN_NOLOCK we're already committing a transaction and @@ -365,11 +367,7 @@ again:  	if (ret < 0) {  		/* We must get the transaction if we are JOIN_NOLOCK. */  		BUG_ON(type == TRANS_JOIN_NOLOCK); - -		if (type < TRANS_JOIN_NOLOCK) -			sb_end_intwrite(root->fs_info->sb); -		kmem_cache_free(btrfs_trans_handle_cachep, h); -		return ERR_PTR(ret); +		goto join_fail;  	}  	cur_trans = root->fs_info->running_transaction; @@ -410,6 +408,19 @@ got_it:  	if (!current->journal_info && type != TRANS_USERSPACE)  		current->journal_info = h;  	return h; + +join_fail: +	if (type < TRANS_JOIN_NOLOCK) +		sb_end_intwrite(root->fs_info->sb); +	kmem_cache_free(btrfs_trans_handle_cachep, h); +alloc_fail: +	if (num_bytes) +		btrfs_block_rsv_release(root, &root->fs_info->trans_block_rsv, +					num_bytes); +reserve_fail: +	if (qgroup_reserved) +		btrfs_qgroup_free(root, qgroup_reserved); +	return ERR_PTR(ret);  }  struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root, diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 15f6efdf646..5cbb7f4b167 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -1556,7 +1556,8 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path)  	ret = 0;  	/* Notify udev that device has changed */ -	btrfs_kobject_uevent(bdev, KOBJ_CHANGE); +	if (bdev) +		btrfs_kobject_uevent(bdev, KOBJ_CHANGE);  error_brelse:  	brelse(bh); diff --git a/fs/dlm/user.c b/fs/dlm/user.c index 7ff49852b0c..911649a47dd 100644 --- a/fs/dlm/user.c +++ b/fs/dlm/user.c @@ -503,11 +503,11 @@ static ssize_t device_write(struct file *file, const char __user *buf,  #endif  		return -EINVAL; -#ifdef CONFIG_COMPAT -	if (count > sizeof(struct dlm_write_request32) + DLM_RESNAME_MAXLEN) -#else +	/* +	 * can't compare against COMPAT/dlm_write_request32 because +	 * we don't yet know if is64bit is zero +	 */  	if (count > sizeof(struct dlm_write_request) + DLM_RESNAME_MAXLEN) -#endif  		return -EINVAL;  	kbuf = kzalloc(count + 1, GFP_NOFS); diff --git a/fs/nilfs2/ioctl.c b/fs/nilfs2/ioctl.c index fdb18076948..f3859354e41 100644 --- a/fs/nilfs2/ioctl.c +++ b/fs/nilfs2/ioctl.c @@ -664,8 +664,11 @@ static int nilfs_ioctl_clean_segments(struct inode *inode, struct file *filp,  	if (ret < 0)  		printk(KERN_ERR "NILFS: GC failed during preparation: "  			"cannot read source blocks: err=%d\n", ret); -	else +	else { +		if (nilfs_sb_need_update(nilfs)) +			set_nilfs_discontinued(nilfs);  		ret = nilfs_clean_segments(inode->i_sb, argv, kbufs); +	}  	nilfs_remove_all_gcinodes(nilfs);  	clear_nilfs_gc_running(nilfs); diff --git a/include/linux/llist.h b/include/linux/llist.h index a5199f6d0e8..d0ab98f73d3 100644 --- a/include/linux/llist.h +++ b/include/linux/llist.h @@ -125,6 +125,31 @@ static inline void init_llist_head(struct llist_head *list)  	     (pos) = llist_entry((pos)->member.next, typeof(*(pos)), member))  /** + * llist_for_each_entry_safe - iterate safely against remove over some entries + * of lock-less list of given type. + * @pos:	the type * to use as a loop cursor. + * @n:		another type * to use as a temporary storage. + * @node:	the fist entry of deleted list entries. + * @member:	the name of the llist_node with the struct. + * + * In general, some entries of the lock-less list can be traversed + * safely only after being removed from list, so start with an entry + * instead of list head. This variant allows removal of entries + * as we iterate. + * + * If being used on entries deleted from lock-less list directly, the + * traverse order is from the newest to the oldest added entry.  If + * you want to traverse from the oldest to the newest, you must + * reverse the order by yourself before traversing. + */ +#define llist_for_each_entry_safe(pos, n, node, member)		\ +	for ((pos) = llist_entry((node), typeof(*(pos)), member),	\ +	     (n) = (pos)->member.next;					\ +	     &(pos)->member != NULL;					\ +	     (pos) = llist_entry(n, typeof(*(pos)), member),		\ +	     (n) = (&(pos)->member != NULL) ? (pos)->member.next : NULL) + +/**   * llist_empty - tests whether a lock-less list is empty   * @head:	the list to test   * diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 0108a56f814..28bd5fa2ff2 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -429,7 +429,7 @@ extern int memcg_limited_groups_array_size;   * the slab_mutex must be held when looping through those caches   */  #define for_each_memcg_cache_index(_idx)	\ -	for ((_idx) = 0; i < memcg_limited_groups_array_size; (_idx)++) +	for ((_idx) = 0; (_idx) < memcg_limited_groups_array_size; (_idx)++)  static inline bool memcg_kmem_enabled(void)  { diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h index bc823c4c028..deca8745252 100644 --- a/include/linux/mmu_notifier.h +++ b/include/linux/mmu_notifier.h @@ -151,7 +151,7 @@ struct mmu_notifier_ops {   * Therefore notifier chains can only be traversed when either   *   * 1. mmap_sem is held. - * 2. One of the reverse map locks is held (i_mmap_mutex or anon_vma->mutex). + * 2. One of the reverse map locks is held (i_mmap_mutex or anon_vma->rwsem).   * 3. No other concurrent thread can access the list (release)   */  struct mmu_notifier { diff --git a/include/linux/usb.h b/include/linux/usb.h index 689b14b26c8..4d22d0f6167 100644 --- a/include/linux/usb.h +++ b/include/linux/usb.h @@ -357,6 +357,8 @@ struct usb_bus {  	int bandwidth_int_reqs;		/* number of Interrupt requests */  	int bandwidth_isoc_reqs;	/* number of Isoc. requests */ +	unsigned resuming_ports;	/* bit array: resuming root-hub ports */ +  #if defined(CONFIG_USB_MON) || defined(CONFIG_USB_MON_MODULE)  	struct mon_bus *mon_bus;	/* non-null when associated */  	int monitored;			/* non-zero when monitored */ diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h index 608050b2545..0a78df5f6cf 100644 --- a/include/linux/usb/hcd.h +++ b/include/linux/usb/hcd.h @@ -430,6 +430,9 @@ extern void usb_hcd_poll_rh_status(struct usb_hcd *hcd);  extern void usb_wakeup_notification(struct usb_device *hdev,  		unsigned int portnum); +extern void usb_hcd_start_port_resume(struct usb_bus *bus, int portnum); +extern void usb_hcd_end_port_resume(struct usb_bus *bus, int portnum); +  /* The D0/D1 toggle bits ... USE WITH CAUTION (they're almost hcd-internal) */  #define usb_gettoggle(dev, ep, out) (((dev)->toggle[out] >> (ep)) & 1)  #define	usb_dotoggle(dev, ep, out)  ((dev)->toggle[out] ^= (1 << (ep))) diff --git a/include/linux/usb/usbnet.h b/include/linux/usb/usbnet.h index 5de7a220e98..0e5ac93bab1 100644 --- a/include/linux/usb/usbnet.h +++ b/include/linux/usb/usbnet.h @@ -33,6 +33,7 @@ struct usbnet {  	wait_queue_head_t	*wait;  	struct mutex		phy_mutex;  	unsigned char		suspend_count; +	unsigned char		pkt_cnt, pkt_err;  	/* i/o info: pipes etc */  	unsigned		in, out; @@ -70,6 +71,7 @@ struct usbnet {  #		define EVENT_DEV_OPEN	7  #		define EVENT_DEVICE_REPORT_IDLE	8  #		define EVENT_NO_RUNTIME_PM	9 +#		define EVENT_RX_KILL	10  };  static inline struct usb_driver *driver_of(struct usb_interface *intf) @@ -100,7 +102,6 @@ struct driver_info {  #define FLAG_LINK_INTR	0x0800		/* updates link (carrier) status */  #define FLAG_POINTTOPOINT 0x1000	/* possibly use "usb%d" names */ -#define FLAG_NOARP	0x2000		/* device can't do ARP */  /*   * Indicates to usbnet, that USB driver accumulates multiple IP packets. @@ -108,6 +109,7 @@ struct driver_info {   */  #define FLAG_MULTI_PACKET	0x2000  #define FLAG_RX_ASSEMBLE	0x4000	/* rx packets may span >1 frames */ +#define FLAG_NOARP		0x8000	/* device can't do ARP */  	/* init device ... can sleep, or cause probe() failure */  	int	(*bind)(struct usbnet *, struct usb_interface *); diff --git a/include/net/transp_v6.h b/include/net/transp_v6.h index 498433dd067..938b7fd1120 100644 --- a/include/net/transp_v6.h +++ b/include/net/transp_v6.h @@ -34,17 +34,17 @@ extern int				udpv6_connect(struct sock *sk,  						      struct sockaddr *uaddr,  						      int addr_len); -extern int			datagram_recv_ctl(struct sock *sk, -						  struct msghdr *msg, -						  struct sk_buff *skb); +extern int			ip6_datagram_recv_ctl(struct sock *sk, +						      struct msghdr *msg, +						      struct sk_buff *skb); -extern int			datagram_send_ctl(struct net *net, -						  struct sock *sk, -						  struct msghdr *msg, -						  struct flowi6 *fl6, -						  struct ipv6_txoptions *opt, -						  int *hlimit, int *tclass, -						  int *dontfrag); +extern int			ip6_datagram_send_ctl(struct net *net, +						      struct sock *sk, +						      struct msghdr *msg, +						      struct flowi6 *fl6, +						      struct ipv6_txoptions *opt, +						      int *hlimit, int *tclass, +						      int *dontfrag);  #define		LOOPBACK4_IPV6		cpu_to_be32(0x7f000006) diff --git a/include/uapi/linux/usb/ch9.h b/include/uapi/linux/usb/ch9.h index 50598472dc4..f738e25377f 100644 --- a/include/uapi/linux/usb/ch9.h +++ b/include/uapi/linux/usb/ch9.h @@ -152,6 +152,12 @@  #define USB_INTRF_FUNC_SUSPEND_LP	(1 << (8 + 0))  #define USB_INTRF_FUNC_SUSPEND_RW	(1 << (8 + 1)) +/* + * Interface status, Figure 9-5 USB 3.0 spec + */ +#define USB_INTRF_STAT_FUNC_RW_CAP     1 +#define USB_INTRF_STAT_FUNC_RW         2 +  #define USB_ENDPOINT_HALT		0	/* IN/OUT will STALL */  /* Bit array elements as returned by the USB_REQ_GET_STATUS request. */ diff --git a/kernel/events/core.c b/kernel/events/core.c index 301079d06f2..7b6646a8c06 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -908,6 +908,15 @@ list_add_event(struct perf_event *event, struct perf_event_context *ctx)  }  /* + * Initialize event state based on the perf_event_attr::disabled. + */ +static inline void perf_event__state_init(struct perf_event *event) +{ +	event->state = event->attr.disabled ? PERF_EVENT_STATE_OFF : +					      PERF_EVENT_STATE_INACTIVE; +} + +/*   * Called at perf_event creation and when events are attached/detached from a   * group.   */ @@ -6179,8 +6188,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,  	event->overflow_handler	= overflow_handler;  	event->overflow_handler_context = context; -	if (attr->disabled) -		event->state = PERF_EVENT_STATE_OFF; +	perf_event__state_init(event);  	pmu = NULL; @@ -6609,9 +6617,17 @@ SYSCALL_DEFINE5(perf_event_open,  		mutex_lock(&gctx->mutex);  		perf_remove_from_context(group_leader); + +		/* +		 * Removing from the context ends up with disabled +		 * event. What we want here is event in the initial +		 * startup state, ready to be add into new context. +		 */ +		perf_event__state_init(group_leader);  		list_for_each_entry(sibling, &group_leader->sibling_list,  				    group_entry) {  			perf_remove_from_context(sibling); +			perf_event__state_init(sibling);  			put_ctx(gctx);  		}  		mutex_unlock(&gctx->mutex); diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h index f6e5ec2932b..c1cc7e17ff9 100644 --- a/kernel/rcutree_plugin.h +++ b/kernel/rcutree_plugin.h @@ -40,8 +40,7 @@  #ifdef CONFIG_RCU_NOCB_CPU  static cpumask_var_t rcu_nocb_mask; /* CPUs to have callbacks offloaded. */  static bool have_rcu_nocb_mask;	    /* Was rcu_nocb_mask allocated? */ -static bool rcu_nocb_poll;	    /* Offload kthread are to poll. */ -module_param(rcu_nocb_poll, bool, 0444); +static bool __read_mostly rcu_nocb_poll;    /* Offload kthread are to poll. */  static char __initdata nocb_buf[NR_CPUS * 5];  #endif /* #ifdef CONFIG_RCU_NOCB_CPU */ @@ -2159,6 +2158,13 @@ static int __init rcu_nocb_setup(char *str)  }  __setup("rcu_nocbs=", rcu_nocb_setup); +static int __init parse_rcu_nocb_poll(char *arg) +{ +	rcu_nocb_poll = 1; +	return 0; +} +early_param("rcu_nocb_poll", parse_rcu_nocb_poll); +  /* Is the specified CPU a no-CPUs CPU? */  static bool is_nocb_cpu(int cpu)  { @@ -2366,10 +2372,11 @@ static int rcu_nocb_kthread(void *arg)  	for (;;) {  		/* If not polling, wait for next batch of callbacks. */  		if (!rcu_nocb_poll) -			wait_event(rdp->nocb_wq, rdp->nocb_head); +			wait_event_interruptible(rdp->nocb_wq, rdp->nocb_head);  		list = ACCESS_ONCE(rdp->nocb_head);  		if (!list) {  			schedule_timeout_interruptible(1); +			flush_signals(current);  			continue;  		} diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c index 2cd3c1b4e58..7ae4c4c5420 100644 --- a/kernel/sched/debug.c +++ b/kernel/sched/debug.c @@ -222,8 +222,8 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)  			cfs_rq->runnable_load_avg);  	SEQ_printf(m, "  .%-30s: %lld\n", "blocked_load_avg",  			cfs_rq->blocked_load_avg); -	SEQ_printf(m, "  .%-30s: %ld\n", "tg_load_avg", -			atomic64_read(&cfs_rq->tg->load_avg)); +	SEQ_printf(m, "  .%-30s: %lld\n", "tg_load_avg", +			(unsigned long long)atomic64_read(&cfs_rq->tg->load_avg));  	SEQ_printf(m, "  .%-30s: %lld\n", "tg_load_contrib",  			cfs_rq->tg_load_contrib);  	SEQ_printf(m, "  .%-30s: %d\n", "tg_runnable_contrib", diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 5eea8707234..81fa5364340 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -2663,7 +2663,7 @@ static void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b)  	hrtimer_cancel(&cfs_b->slack_timer);  } -static void unthrottle_offline_cfs_rqs(struct rq *rq) +static void __maybe_unused unthrottle_offline_cfs_rqs(struct rq *rq)  {  	struct cfs_rq *cfs_rq; diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index 418feb01344..4f02b284735 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c @@ -566,7 +566,7 @@ static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)  static int do_balance_runtime(struct rt_rq *rt_rq)  {  	struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq); -	struct root_domain *rd = cpu_rq(smp_processor_id())->rd; +	struct root_domain *rd = rq_of_rt_rq(rt_rq)->rd;  	int i, weight, more = 0;  	u64 rt_period; diff --git a/lib/digsig.c b/lib/digsig.c index 8c0e62975c8..dc2be7ed176 100644 --- a/lib/digsig.c +++ b/lib/digsig.c @@ -162,6 +162,8 @@ static int digsig_verify_rsa(struct key *key,  	memset(out1, 0, head);  	memcpy(out1 + head, p, l); +	kfree(p); +  	err = pkcs_1_v1_5_decode_emsa(out1, len, mblen, out2, &len);  	if (err)  		goto err; diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 6001ee6347a..b5783d81eda 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -1257,6 +1257,10 @@ struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,  	if (flags & FOLL_WRITE && !pmd_write(*pmd))  		goto out; +	/* Avoid dumping huge zero page */ +	if ((flags & FOLL_DUMP) && is_huge_zero_pmd(*pmd)) +		return ERR_PTR(-EFAULT); +  	page = pmd_page(*pmd);  	VM_BUG_ON(!PageHead(page));  	if (flags & FOLL_TOUCH) { diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 4f3ea0b1e57..546db81820e 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -3033,6 +3033,7 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,  		if (!huge_pte_none(huge_ptep_get(ptep))) {  			pte = huge_ptep_get_and_clear(mm, address, ptep);  			pte = pte_mkhuge(pte_modify(pte, newprot)); +			pte = arch_make_huge_pte(pte, vma, NULL, 0);  			set_huge_pte_at(mm, address, ptep, pte);  			pages++;  		} diff --git a/mm/migrate.c b/mm/migrate.c index c38778610aa..2fd8b4af474 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -160,8 +160,10 @@ static int remove_migration_pte(struct page *new, struct vm_area_struct *vma,  	if (is_write_migration_entry(entry))  		pte = pte_mkwrite(pte);  #ifdef CONFIG_HUGETLB_PAGE -	if (PageHuge(new)) +	if (PageHuge(new)) {  		pte = pte_mkhuge(pte); +		pte = arch_make_huge_pte(pte, vma, new, 0); +	}  #endif  	flush_cache_page(vma, addr, pte_pfn(pte));  	set_pte_at(mm, addr, ptep, pte); diff --git a/mm/mmap.c b/mm/mmap.c index 35730ee9d51..d1e4124f3d0 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -2943,7 +2943,7 @@ static void vm_lock_mapping(struct mm_struct *mm, struct address_space *mapping)   * vma in this mm is backed by the same anon_vma or address_space.   *   * We can take all the locks in random order because the VM code - * taking i_mmap_mutex or anon_vma->mutex outside the mmap_sem never + * taking i_mmap_mutex or anon_vma->rwsem outside the mmap_sem never   * takes more than one of them in a row. Secondly we're protected   * against a concurrent mm_take_all_locks() by the mm_all_locks_mutex.   * diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c index 25bfce0666e..4925a02ae7e 100644 --- a/net/bluetooth/hci_conn.c +++ b/net/bluetooth/hci_conn.c @@ -249,12 +249,12 @@ static void hci_conn_disconnect(struct hci_conn *conn)  	__u8 reason = hci_proto_disconn_ind(conn);  	switch (conn->type) { -	case ACL_LINK: -		hci_acl_disconn(conn, reason); -		break;  	case AMP_LINK:  		hci_amp_disconn(conn, reason);  		break; +	default: +		hci_acl_disconn(conn, reason); +		break;  	}  } diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c index 68a9587c969..5abefb12891 100644 --- a/net/bluetooth/smp.c +++ b/net/bluetooth/smp.c @@ -859,6 +859,19 @@ int smp_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)  	skb_pull(skb, sizeof(code)); +	/* +	 * The SMP context must be initialized for all other PDUs except +	 * pairing and security requests. If we get any other PDU when +	 * not initialized simply disconnect (done if this function +	 * returns an error). +	 */ +	if (code != SMP_CMD_PAIRING_REQ && code != SMP_CMD_SECURITY_REQ && +	    !conn->smp_chan) { +		BT_ERR("Unexpected SMP command 0x%02x. Disconnecting.", code); +		kfree_skb(skb); +		return -ENOTSUPP; +	} +  	switch (code) {  	case SMP_CMD_PAIRING_REQ:  		reason = smp_cmd_pairing_req(conn, skb); diff --git a/net/core/pktgen.c b/net/core/pktgen.c index b29dacf900f..e6e1cbe863f 100644 --- a/net/core/pktgen.c +++ b/net/core/pktgen.c @@ -1781,10 +1781,13 @@ static ssize_t pktgen_thread_write(struct file *file,  			return -EFAULT;  		i += len;  		mutex_lock(&pktgen_thread_lock); -		pktgen_add_device(t, f); +		ret = pktgen_add_device(t, f);  		mutex_unlock(&pktgen_thread_lock); -		ret = count; -		sprintf(pg_result, "OK: add_device=%s", f); +		if (!ret) { +			ret = count; +			sprintf(pg_result, "OK: add_device=%s", f); +		} else +			sprintf(pg_result, "ERROR: can not add device %s", f);  		goto out;  	} diff --git a/net/core/skbuff.c b/net/core/skbuff.c index a9a2ae3e221..32443ebc3e8 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -683,7 +683,7 @@ static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)  	new->network_header	= old->network_header;  	new->mac_header		= old->mac_header;  	new->inner_transport_header = old->inner_transport_header; -	new->inner_network_header = old->inner_transport_header; +	new->inner_network_header = old->inner_network_header;  	skb_dst_copy(new, old);  	new->rxhash		= old->rxhash;  	new->ooo_okay		= old->ooo_okay; diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c index 291f2ed7cc3..cdf2e707bb1 100644 --- a/net/ipv4/tcp_cong.c +++ b/net/ipv4/tcp_cong.c @@ -310,6 +310,12 @@ void tcp_slow_start(struct tcp_sock *tp)  {  	int cnt; /* increase in packets */  	unsigned int delta = 0; +	u32 snd_cwnd = tp->snd_cwnd; + +	if (unlikely(!snd_cwnd)) { +		pr_err_once("snd_cwnd is nul, please report this bug.\n"); +		snd_cwnd = 1U; +	}  	/* RFC3465: ABC Slow start  	 * Increase only after a full MSS of bytes is acked @@ -324,7 +330,7 @@ void tcp_slow_start(struct tcp_sock *tp)  	if (sysctl_tcp_max_ssthresh > 0 && tp->snd_cwnd > sysctl_tcp_max_ssthresh)  		cnt = sysctl_tcp_max_ssthresh >> 1;	/* limited slow start */  	else -		cnt = tp->snd_cwnd;			/* exponential increase */ +		cnt = snd_cwnd;				/* exponential increase */  	/* RFC3465: ABC  	 * We MAY increase by 2 if discovered delayed ack @@ -334,11 +340,11 @@ void tcp_slow_start(struct tcp_sock *tp)  	tp->bytes_acked = 0;  	tp->snd_cwnd_cnt += cnt; -	while (tp->snd_cwnd_cnt >= tp->snd_cwnd) { -		tp->snd_cwnd_cnt -= tp->snd_cwnd; +	while (tp->snd_cwnd_cnt >= snd_cwnd) { +		tp->snd_cwnd_cnt -= snd_cwnd;  		delta++;  	} -	tp->snd_cwnd = min(tp->snd_cwnd + delta, tp->snd_cwnd_clamp); +	tp->snd_cwnd = min(snd_cwnd + delta, tp->snd_cwnd_clamp);  }  EXPORT_SYMBOL_GPL(tcp_slow_start); diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 18f97ca76b0..ad70a962c20 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -3504,6 +3504,11 @@ static bool tcp_process_frto(struct sock *sk, int flag)  		}  	} else {  		if (!(flag & FLAG_DATA_ACKED) && (tp->frto_counter == 1)) { +			if (!tcp_packets_in_flight(tp)) { +				tcp_enter_frto_loss(sk, 2, flag); +				return true; +			} +  			/* Prevent sending of new data. */  			tp->snd_cwnd = min(tp->snd_cwnd,  					   tcp_packets_in_flight(tp)); @@ -5649,8 +5654,7 @@ static bool tcp_rcv_fastopen_synack(struct sock *sk, struct sk_buff *synack,  	 * the remote receives only the retransmitted (regular) SYNs: either  	 * the original SYN-data or the corresponding SYN-ACK is lost.  	 */ -	syn_drop = (cookie->len <= 0 && data && -		    inet_csk(sk)->icsk_retransmits); +	syn_drop = (cookie->len <= 0 && data && tp->total_retrans);  	tcp_fastopen_cache_set(sk, mss, cookie, syn_drop); diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 70b09ef2463..eadb693eef5 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -496,6 +496,7 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)  		 * errors returned from accept().  		 */  		inet_csk_reqsk_queue_drop(sk, req, prev); +		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);  		goto out;  	case TCP_SYN_SENT: @@ -1500,8 +1501,10 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)  	 * clogging syn queue with openreqs with exponentially increasing  	 * timeout.  	 */ -	if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1) +	if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1) { +		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);  		goto drop; +	}  	req = inet_reqsk_alloc(&tcp_request_sock_ops);  	if (!req) @@ -1666,6 +1669,7 @@ drop_and_release:  drop_and_free:  	reqsk_free(req);  drop: +	NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);  	return 0;  }  EXPORT_SYMBOL(tcp_v4_conn_request); diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 420e5632638..1b5d8cb9b12 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -1660,6 +1660,7 @@ static int addrconf_ifid_eui64(u8 *eui, struct net_device *dev)  	if (dev->addr_len != IEEE802154_ADDR_LEN)  		return -1;  	memcpy(eui, dev->dev_addr, 8); +	eui[0] ^= 2;  	return 0;  } diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c index 8edf2601065..7a778b9a7b8 100644 --- a/net/ipv6/datagram.c +++ b/net/ipv6/datagram.c @@ -380,7 +380,7 @@ int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len)  		if (skb->protocol == htons(ETH_P_IPV6)) {  			sin->sin6_addr = ipv6_hdr(skb)->saddr;  			if (np->rxopt.all) -				datagram_recv_ctl(sk, msg, skb); +				ip6_datagram_recv_ctl(sk, msg, skb);  			if (ipv6_addr_type(&sin->sin6_addr) & IPV6_ADDR_LINKLOCAL)  				sin->sin6_scope_id = IP6CB(skb)->iif;  		} else { @@ -468,7 +468,8 @@ out:  } -int datagram_recv_ctl(struct sock *sk, struct msghdr *msg, struct sk_buff *skb) +int ip6_datagram_recv_ctl(struct sock *sk, struct msghdr *msg, +			  struct sk_buff *skb)  {  	struct ipv6_pinfo *np = inet6_sk(sk);  	struct inet6_skb_parm *opt = IP6CB(skb); @@ -597,11 +598,12 @@ int datagram_recv_ctl(struct sock *sk, struct msghdr *msg, struct sk_buff *skb)  	}  	return 0;  } +EXPORT_SYMBOL_GPL(ip6_datagram_recv_ctl); -int datagram_send_ctl(struct net *net, struct sock *sk, -		      struct msghdr *msg, struct flowi6 *fl6, -		      struct ipv6_txoptions *opt, -		      int *hlimit, int *tclass, int *dontfrag) +int ip6_datagram_send_ctl(struct net *net, struct sock *sk, +			  struct msghdr *msg, struct flowi6 *fl6, +			  struct ipv6_txoptions *opt, +			  int *hlimit, int *tclass, int *dontfrag)  {  	struct in6_pktinfo *src_info;  	struct cmsghdr *cmsg; @@ -871,4 +873,4 @@ int datagram_send_ctl(struct net *net, struct sock *sk,  exit_f:  	return err;  } -EXPORT_SYMBOL_GPL(datagram_send_ctl); +EXPORT_SYMBOL_GPL(ip6_datagram_send_ctl); diff --git a/net/ipv6/ip6_flowlabel.c b/net/ipv6/ip6_flowlabel.c index 29124b7a04c..d6de4b44725 100644 --- a/net/ipv6/ip6_flowlabel.c +++ b/net/ipv6/ip6_flowlabel.c @@ -365,8 +365,8 @@ fl_create(struct net *net, struct sock *sk, struct in6_flowlabel_req *freq,  		msg.msg_control = (void*)(fl->opt+1);  		memset(&flowi6, 0, sizeof(flowi6)); -		err = datagram_send_ctl(net, sk, &msg, &flowi6, fl->opt, &junk, -					&junk, &junk); +		err = ip6_datagram_send_ctl(net, sk, &msg, &flowi6, fl->opt, +					    &junk, &junk, &junk);  		if (err)  			goto done;  		err = -EINVAL; diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c index c727e471275..131dd097736 100644 --- a/net/ipv6/ip6_gre.c +++ b/net/ipv6/ip6_gre.c @@ -960,7 +960,7 @@ static netdev_tx_t ip6gre_tunnel_xmit(struct sk_buff *skb,  	int ret;  	if (!ip6_tnl_xmit_ctl(t)) -		return -1; +		goto tx_err;  	switch (skb->protocol) {  	case htons(ETH_P_IP): diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c index ee94d31c9d4..d1e2e8ef29c 100644 --- a/net/ipv6/ipv6_sockglue.c +++ b/net/ipv6/ipv6_sockglue.c @@ -476,8 +476,8 @@ sticky_done:  		msg.msg_controllen = optlen;  		msg.msg_control = (void*)(opt+1); -		retv = datagram_send_ctl(net, sk, &msg, &fl6, opt, &junk, &junk, -					 &junk); +		retv = ip6_datagram_send_ctl(net, sk, &msg, &fl6, opt, &junk, +					     &junk, &junk);  		if (retv)  			goto done;  update: @@ -1002,7 +1002,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,  		release_sock(sk);  		if (skb) { -			int err = datagram_recv_ctl(sk, &msg, skb); +			int err = ip6_datagram_recv_ctl(sk, &msg, skb);  			kfree_skb(skb);  			if (err)  				return err; diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c index 6cd29b1e8b9..70fa8144999 100644 --- a/net/ipv6/raw.c +++ b/net/ipv6/raw.c @@ -507,7 +507,7 @@ static int rawv6_recvmsg(struct kiocb *iocb, struct sock *sk,  	sock_recv_ts_and_drops(msg, sk, skb);  	if (np->rxopt.all) -		datagram_recv_ctl(sk, msg, skb); +		ip6_datagram_recv_ctl(sk, msg, skb);  	err = copied;  	if (flags & MSG_TRUNC) @@ -822,8 +822,8 @@ static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk,  		memset(opt, 0, sizeof(struct ipv6_txoptions));  		opt->tot_len = sizeof(struct ipv6_txoptions); -		err = datagram_send_ctl(sock_net(sk), sk, msg, &fl6, opt, -					&hlimit, &tclass, &dontfrag); +		err = ip6_datagram_send_ctl(sock_net(sk), sk, msg, &fl6, opt, +					    &hlimit, &tclass, &dontfrag);  		if (err < 0) {  			fl6_sock_release(flowlabel);  			return err; diff --git a/net/ipv6/route.c b/net/ipv6/route.c index e229a3bc345..363d8b7772e 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -928,7 +928,7 @@ restart:  	dst_hold(&rt->dst);  	read_unlock_bh(&table->tb6_lock); -	if (!rt->n && !(rt->rt6i_flags & RTF_NONEXTHOP)) +	if (!rt->n && !(rt->rt6i_flags & (RTF_NONEXTHOP | RTF_LOCAL)))  		nrt = rt6_alloc_cow(rt, &fl6->daddr, &fl6->saddr);  	else if (!(rt->dst.flags & DST_HOST))  		nrt = rt6_alloc_clone(rt, &fl6->daddr); diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 93825dd3a7c..4f43537197e 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -423,6 +423,7 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,  		}  		inet_csk_reqsk_queue_drop(sk, req, prev); +		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);  		goto out;  	case TCP_SYN_SENT: @@ -958,8 +959,10 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)  			goto drop;  	} -	if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1) +	if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1) { +		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);  		goto drop; +	}  	req = inet6_reqsk_alloc(&tcp6_request_sock_ops);  	if (req == NULL) @@ -1108,6 +1111,7 @@ drop_and_release:  drop_and_free:  	reqsk_free(req);  drop: +	NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);  	return 0; /* don't send reset */  } diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index dfaa29b8b29..fb083295ff0 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@ -443,7 +443,7 @@ try_again:  			ip_cmsg_recv(msg, skb);  	} else {  		if (np->rxopt.all) -			datagram_recv_ctl(sk, msg, skb); +			ip6_datagram_recv_ctl(sk, msg, skb);  	}  	err = copied; @@ -1153,8 +1153,8 @@ do_udp_sendmsg:  		memset(opt, 0, sizeof(struct ipv6_txoptions));  		opt->tot_len = sizeof(*opt); -		err = datagram_send_ctl(sock_net(sk), sk, msg, &fl6, opt, -					&hlimit, &tclass, &dontfrag); +		err = ip6_datagram_send_ctl(sock_net(sk), sk, msg, &fl6, opt, +					    &hlimit, &tclass, &dontfrag);  		if (err < 0) {  			fl6_sock_release(flowlabel);  			return err; diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c index 1a9f3723c13..2ac884d0e89 100644 --- a/net/l2tp/l2tp_core.c +++ b/net/l2tp/l2tp_core.c @@ -168,6 +168,51 @@ l2tp_session_id_hash_2(struct l2tp_net *pn, u32 session_id)  } +/* Lookup the tunnel socket, possibly involving the fs code if the socket is + * owned by userspace.  A struct sock returned from this function must be + * released using l2tp_tunnel_sock_put once you're done with it. + */ +struct sock *l2tp_tunnel_sock_lookup(struct l2tp_tunnel *tunnel) +{ +	int err = 0; +	struct socket *sock = NULL; +	struct sock *sk = NULL; + +	if (!tunnel) +		goto out; + +	if (tunnel->fd >= 0) { +		/* Socket is owned by userspace, who might be in the process +		 * of closing it.  Look the socket up using the fd to ensure +		 * consistency. +		 */ +		sock = sockfd_lookup(tunnel->fd, &err); +		if (sock) +			sk = sock->sk; +	} else { +		/* Socket is owned by kernelspace */ +		sk = tunnel->sock; +	} + +out: +	return sk; +} +EXPORT_SYMBOL_GPL(l2tp_tunnel_sock_lookup); + +/* Drop a reference to a tunnel socket obtained via. l2tp_tunnel_sock_put */ +void l2tp_tunnel_sock_put(struct sock *sk) +{ +	struct l2tp_tunnel *tunnel = l2tp_sock_to_tunnel(sk); +	if (tunnel) { +		if (tunnel->fd >= 0) { +			/* Socket is owned by userspace */ +			sockfd_put(sk->sk_socket); +		} +		sock_put(sk); +	} +} +EXPORT_SYMBOL_GPL(l2tp_tunnel_sock_put); +  /* Lookup a session by id in the global session list   */  static struct l2tp_session *l2tp_session_find_2(struct net *net, u32 session_id) @@ -1123,8 +1168,6 @@ int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len  	struct udphdr *uh;  	struct inet_sock *inet;  	__wsum csum; -	int old_headroom; -	int new_headroom;  	int headroom;  	int uhlen = (tunnel->encap == L2TP_ENCAPTYPE_UDP) ? sizeof(struct udphdr) : 0;  	int udp_len; @@ -1136,16 +1179,12 @@ int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len  	 */  	headroom = NET_SKB_PAD + sizeof(struct iphdr) +  		uhlen + hdr_len; -	old_headroom = skb_headroom(skb);  	if (skb_cow_head(skb, headroom)) {  		kfree_skb(skb);  		return NET_XMIT_DROP;  	} -	new_headroom = skb_headroom(skb);  	skb_orphan(skb); -	skb->truesize += new_headroom - old_headroom; -  	/* Setup L2TP header */  	session->build_header(session, __skb_push(skb, hdr_len)); @@ -1607,6 +1646,7 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32  	tunnel->old_sk_destruct = sk->sk_destruct;  	sk->sk_destruct = &l2tp_tunnel_destruct;  	tunnel->sock = sk; +	tunnel->fd = fd;  	lockdep_set_class_and_name(&sk->sk_lock.slock, &l2tp_socket_class, "l2tp_sock");  	sk->sk_allocation = GFP_ATOMIC; @@ -1642,24 +1682,32 @@ EXPORT_SYMBOL_GPL(l2tp_tunnel_create);   */  int l2tp_tunnel_delete(struct l2tp_tunnel *tunnel)  { -	int err = 0; -	struct socket *sock = tunnel->sock ? tunnel->sock->sk_socket : NULL; +	int err = -EBADF; +	struct socket *sock = NULL; +	struct sock *sk = NULL; + +	sk = l2tp_tunnel_sock_lookup(tunnel); +	if (!sk) +		goto out; + +	sock = sk->sk_socket; +	BUG_ON(!sock);  	/* Force the tunnel socket to close. This will eventually  	 * cause the tunnel to be deleted via the normal socket close  	 * mechanisms when userspace closes the tunnel socket.  	 */ -	if (sock != NULL) { -		err = inet_shutdown(sock, 2); +	err = inet_shutdown(sock, 2); -		/* If the tunnel's socket was created by the kernel, -		 * close the socket here since the socket was not -		 * created by userspace. -		 */ -		if (sock->file == NULL) -			err = inet_release(sock); -	} +	/* If the tunnel's socket was created by the kernel, +	 * close the socket here since the socket was not +	 * created by userspace. +	 */ +	if (sock->file == NULL) +		err = inet_release(sock); +	l2tp_tunnel_sock_put(sk); +out:  	return err;  }  EXPORT_SYMBOL_GPL(l2tp_tunnel_delete); diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h index 56d583e083a..e62204cad4f 100644 --- a/net/l2tp/l2tp_core.h +++ b/net/l2tp/l2tp_core.h @@ -188,7 +188,8 @@ struct l2tp_tunnel {  	int (*recv_payload_hook)(struct sk_buff *skb);  	void (*old_sk_destruct)(struct sock *);  	struct sock		*sock;		/* Parent socket */ -	int			fd; +	int			fd;		/* Parent fd, if tunnel socket +						 * was created by userspace */  	uint8_t			priv[0];	/* private data */  }; @@ -228,6 +229,8 @@ out:  	return tunnel;  } +extern struct sock *l2tp_tunnel_sock_lookup(struct l2tp_tunnel *tunnel); +extern void l2tp_tunnel_sock_put(struct sock *sk);  extern struct l2tp_session *l2tp_session_find(struct net *net, struct l2tp_tunnel *tunnel, u32 session_id);  extern struct l2tp_session *l2tp_session_find_nth(struct l2tp_tunnel *tunnel, int nth);  extern struct l2tp_session *l2tp_session_find_by_ifname(struct net *net, char *ifname); diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c index 927547171bc..8ee4a86ae99 100644 --- a/net/l2tp/l2tp_ip6.c +++ b/net/l2tp/l2tp_ip6.c @@ -554,8 +554,8 @@ static int l2tp_ip6_sendmsg(struct kiocb *iocb, struct sock *sk,  		memset(opt, 0, sizeof(struct ipv6_txoptions));  		opt->tot_len = sizeof(struct ipv6_txoptions); -		err = datagram_send_ctl(sock_net(sk), sk, msg, &fl6, opt, -					&hlimit, &tclass, &dontfrag); +		err = ip6_datagram_send_ctl(sock_net(sk), sk, msg, &fl6, opt, +					    &hlimit, &tclass, &dontfrag);  		if (err < 0) {  			fl6_sock_release(flowlabel);  			return err; @@ -646,7 +646,7 @@ static int l2tp_ip6_recvmsg(struct kiocb *iocb, struct sock *sk,  			    struct msghdr *msg, size_t len, int noblock,  			    int flags, int *addr_len)  { -	struct inet_sock *inet = inet_sk(sk); +	struct ipv6_pinfo *np = inet6_sk(sk);  	struct sockaddr_l2tpip6 *lsa = (struct sockaddr_l2tpip6 *)msg->msg_name;  	size_t copied = 0;  	int err = -EOPNOTSUPP; @@ -688,8 +688,8 @@ static int l2tp_ip6_recvmsg(struct kiocb *iocb, struct sock *sk,  			lsa->l2tp_scope_id = IP6CB(skb)->iif;  	} -	if (inet->cmsg_flags) -		ip_cmsg_recv(msg, skb); +	if (np->rxopt.all) +		ip6_datagram_recv_ctl(sk, msg, skb);  	if (flags & MSG_TRUNC)  		copied = skb->len; diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c index 286366ef893..716605c241f 100644 --- a/net/l2tp/l2tp_ppp.c +++ b/net/l2tp/l2tp_ppp.c @@ -388,8 +388,6 @@ static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb)  	struct l2tp_session *session;  	struct l2tp_tunnel *tunnel;  	struct pppol2tp_session *ps; -	int old_headroom; -	int new_headroom;  	int uhlen, headroom;  	if (sock_flag(sk, SOCK_DEAD) || !(sk->sk_state & PPPOX_CONNECTED)) @@ -408,7 +406,6 @@ static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb)  	if (tunnel == NULL)  		goto abort_put_sess; -	old_headroom = skb_headroom(skb);  	uhlen = (tunnel->encap == L2TP_ENCAPTYPE_UDP) ? sizeof(struct udphdr) : 0;  	headroom = NET_SKB_PAD +  		   sizeof(struct iphdr) + /* IP header */ @@ -418,9 +415,6 @@ static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb)  	if (skb_cow_head(skb, headroom))  		goto abort_put_sess_tun; -	new_headroom = skb_headroom(skb); -	skb->truesize += new_headroom - old_headroom; -  	/* Setup PPP header */  	__skb_push(skb, sizeof(ppph));  	skb->data[0] = ppph[0]; diff --git a/net/openvswitch/vport-netdev.c b/net/openvswitch/vport-netdev.c index a9327e2e48c..670cbc3518d 100644 --- a/net/openvswitch/vport-netdev.c +++ b/net/openvswitch/vport-netdev.c @@ -35,10 +35,11 @@  /* Must be called with rcu_read_lock. */  static void netdev_port_receive(struct vport *vport, struct sk_buff *skb)  { -	if (unlikely(!vport)) { -		kfree_skb(skb); -		return; -	} +	if (unlikely(!vport)) +		goto error; + +	if (unlikely(skb_warn_if_lro(skb))) +		goto error;  	/* Make our own copy of the packet.  Otherwise we will mangle the  	 * packet for anyone who came before us (e.g. tcpdump via AF_PACKET). @@ -50,6 +51,10 @@ static void netdev_port_receive(struct vport *vport, struct sk_buff *skb)  	skb_push(skb, ETH_HLEN);  	ovs_vport_receive(vport, skb); +	return; + +error: +	kfree_skb(skb);  }  /* Called with rcu_read_lock and bottom-halves disabled. */ @@ -169,9 +174,6 @@ static int netdev_send(struct vport *vport, struct sk_buff *skb)  		goto error;  	} -	if (unlikely(skb_warn_if_lro(skb))) -		goto error; -  	skb->dev = netdev_vport->dev;  	len = skb->len;  	dev_queue_xmit(skb); diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index e639645e8fe..c111bd0e083 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -2361,13 +2361,15 @@ static int packet_release(struct socket *sock)  	packet_flush_mclist(sk); -	memset(&req_u, 0, sizeof(req_u)); - -	if (po->rx_ring.pg_vec) +	if (po->rx_ring.pg_vec) { +		memset(&req_u, 0, sizeof(req_u));  		packet_set_ring(sk, &req_u, 1, 0); +	} -	if (po->tx_ring.pg_vec) +	if (po->tx_ring.pg_vec) { +		memset(&req_u, 0, sizeof(req_u));  		packet_set_ring(sk, &req_u, 1, 1); +	}  	fanout_release(sk); diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c index 298c0ddfb57..3d2acc7a9c8 100644 --- a/net/sched/sch_netem.c +++ b/net/sched/sch_netem.c @@ -438,18 +438,18 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)  		if (q->rate) {  			struct sk_buff_head *list = &sch->q; -			delay += packet_len_2_sched_time(skb->len, q); -  			if (!skb_queue_empty(list)) {  				/* -				 * Last packet in queue is reference point (now). -				 * First packet in queue is already in flight, -				 * calculate this time bonus and substract +				 * Last packet in queue is reference point (now), +				 * calculate this time bonus and subtract  				 * from delay.  				 */ -				delay -= now - netem_skb_cb(skb_peek(list))->time_to_send; +				delay -= netem_skb_cb(skb_peek_tail(list))->time_to_send - now; +				delay = max_t(psched_tdiff_t, 0, delay);  				now = netem_skb_cb(skb_peek_tail(list))->time_to_send;  			} + +			delay += packet_len_2_sched_time(skb->len, q);  		}  		cb->time_to_send = now + delay; diff --git a/net/sctp/auth.c b/net/sctp/auth.c index 159b9bc5d63..d8420ae614d 100644 --- a/net/sctp/auth.c +++ b/net/sctp/auth.c @@ -71,7 +71,7 @@ void sctp_auth_key_put(struct sctp_auth_bytes *key)  		return;  	if (atomic_dec_and_test(&key->refcnt)) { -		kfree(key); +		kzfree(key);  		SCTP_DBG_OBJCNT_DEC(keys);  	}  } diff --git a/net/sctp/endpointola.c b/net/sctp/endpointola.c index 17a001bac2c..1a9c5fb7731 100644 --- a/net/sctp/endpointola.c +++ b/net/sctp/endpointola.c @@ -249,6 +249,8 @@ void sctp_endpoint_free(struct sctp_endpoint *ep)  /* Final destructor for endpoint.  */  static void sctp_endpoint_destroy(struct sctp_endpoint *ep)  { +	int i; +  	SCTP_ASSERT(ep->base.dead, "Endpoint is not dead", return);  	/* Free up the HMAC transform. */ @@ -271,6 +273,9 @@ static void sctp_endpoint_destroy(struct sctp_endpoint *ep)  	sctp_inq_free(&ep->base.inqueue);  	sctp_bind_addr_free(&ep->base.bind_addr); +	for (i = 0; i < SCTP_HOW_MANY_SECRETS; ++i) +		memset(&ep->secret_key[i], 0, SCTP_SECRET_SIZE); +  	/* Remove and free the port */  	if (sctp_sk(ep->base.sk)->bind_hash)  		sctp_put_port(ep->base.sk); diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 9e65758cb03..cedd9bf67b8 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -3390,7 +3390,7 @@ static int sctp_setsockopt_auth_key(struct sock *sk,  	ret = sctp_auth_set_key(sctp_sk(sk)->ep, asoc, authkey);  out: -	kfree(authkey); +	kzfree(authkey);  	return ret;  } diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c index 0a148c9d2a5..0f679df7d07 100644 --- a/net/sunrpc/svcsock.c +++ b/net/sunrpc/svcsock.c @@ -465,7 +465,7 @@ static int svc_udp_get_dest_address4(struct svc_rqst *rqstp,  }  /* - * See net/ipv6/datagram.c : datagram_recv_ctl + * See net/ipv6/datagram.c : ip6_datagram_recv_ctl   */  static int svc_udp_get_dest_address6(struct svc_rqst *rqstp,  				     struct cmsghdr *cmh) diff --git a/net/wireless/scan.c b/net/wireless/scan.c index 01592d7d478..45f1618c8e2 100644 --- a/net/wireless/scan.c +++ b/net/wireless/scan.c @@ -1358,7 +1358,7 @@ ieee80211_bss(struct wiphy *wiphy, struct iw_request_info *info,  						  &iwe, IW_EV_UINT_LEN);  	} -	buf = kmalloc(30, GFP_ATOMIC); +	buf = kmalloc(31, GFP_ATOMIC);  	if (buf) {  		memset(&iwe, 0, sizeof(iwe));  		iwe.cmd = IWEVCUSTOM; diff --git a/samples/seccomp/Makefile b/samples/seccomp/Makefile index bbbd276659b..7203e66dcd6 100644 --- a/samples/seccomp/Makefile +++ b/samples/seccomp/Makefile @@ -19,6 +19,7 @@ bpf-direct-objs := bpf-direct.o  # Try to match the kernel target.  ifndef CONFIG_64BIT +ifndef CROSS_COMPILE  # s390 has -m31 flag to build 31 bit binaries  ifndef CONFIG_S390 @@ -35,6 +36,7 @@ HOSTLOADLIBES_bpf-direct += $(MFLAG)  HOSTLOADLIBES_bpf-fancy += $(MFLAG)  HOSTLOADLIBES_dropper += $(MFLAG)  endif +endif  # Tell kbuild to always build the programs  always := $(hostprogs-y) diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl index 4d2c7dfdaab..2bb08a962ce 100755 --- a/scripts/checkpatch.pl +++ b/scripts/checkpatch.pl @@ -230,12 +230,12 @@ our $Inline	= qr{inline|__always_inline|noinline};  our $Member	= qr{->$Ident|\.$Ident|\[[^]]*\]};  our $Lval	= qr{$Ident(?:$Member)*}; -our $Float_hex	= qr{(?i:0x[0-9a-f]+p-?[0-9]+[fl]?)}; -our $Float_dec	= qr{(?i:((?:[0-9]+\.[0-9]*|[0-9]*\.[0-9]+)(?:e-?[0-9]+)?[fl]?))}; -our $Float_int	= qr{(?i:[0-9]+e-?[0-9]+[fl]?)}; +our $Float_hex	= qr{(?i)0x[0-9a-f]+p-?[0-9]+[fl]?}; +our $Float_dec	= qr{(?i)(?:[0-9]+\.[0-9]*|[0-9]*\.[0-9]+)(?:e-?[0-9]+)?[fl]?}; +our $Float_int	= qr{(?i)[0-9]+e-?[0-9]+[fl]?};  our $Float	= qr{$Float_hex|$Float_dec|$Float_int}; -our $Constant	= qr{(?:$Float|(?i:(?:0x[0-9a-f]+|[0-9]+)[ul]*))}; -our $Assignment	= qr{(?:\*\=|/=|%=|\+=|-=|<<=|>>=|&=|\^=|\|=|=)}; +our $Constant	= qr{$Float|(?i)(?:0x[0-9a-f]+|[0-9]+)[ul]*}; +our $Assignment	= qr{\*\=|/=|%=|\+=|-=|<<=|>>=|&=|\^=|\|=|=};  our $Compare    = qr{<=|>=|==|!=|<|>};  our $Operators	= qr{  			<=|>=|==|!=| diff --git a/sound/soc/fsl/Kconfig b/sound/soc/fsl/Kconfig index a210c8d7b4b..3b98159d964 100644 --- a/sound/soc/fsl/Kconfig +++ b/sound/soc/fsl/Kconfig @@ -108,13 +108,18 @@ if SND_IMX_SOC  config SND_SOC_IMX_SSI  	tristate -config SND_SOC_IMX_PCM_FIQ +config SND_SOC_IMX_PCM  	tristate + +config SND_SOC_IMX_PCM_FIQ +	bool  	select FIQ +	select SND_SOC_IMX_PCM  config SND_SOC_IMX_PCM_DMA -	tristate +	bool  	select SND_SOC_DMAENGINE_PCM +	select SND_SOC_IMX_PCM  config SND_SOC_IMX_AUDMUX  	tristate diff --git a/sound/soc/fsl/Makefile b/sound/soc/fsl/Makefile index ec1457915d7..afd34794db5 100644 --- a/sound/soc/fsl/Makefile +++ b/sound/soc/fsl/Makefile @@ -41,10 +41,7 @@ endif  obj-$(CONFIG_SND_SOC_IMX_SSI) += snd-soc-imx-ssi.o  obj-$(CONFIG_SND_SOC_IMX_AUDMUX) += snd-soc-imx-audmux.o -obj-$(CONFIG_SND_SOC_IMX_PCM_FIQ) += snd-soc-imx-pcm-fiq.o -snd-soc-imx-pcm-fiq-y := imx-pcm-fiq.o imx-pcm.o -obj-$(CONFIG_SND_SOC_IMX_PCM_DMA) += snd-soc-imx-pcm-dma.o -snd-soc-imx-pcm-dma-y := imx-pcm-dma.o imx-pcm.o +obj-$(CONFIG_SND_SOC_IMX_PCM) += snd-soc-imx-pcm.o  # i.MX Machine Support  snd-soc-eukrea-tlv320-objs := eukrea-tlv320.o diff --git a/sound/soc/fsl/imx-pcm-dma.c b/sound/soc/fsl/imx-pcm-dma.c index bf363d8d044..500f8ce55d7 100644 --- a/sound/soc/fsl/imx-pcm-dma.c +++ b/sound/soc/fsl/imx-pcm-dma.c @@ -154,26 +154,7 @@ static struct snd_soc_platform_driver imx_soc_platform_mx2 = {  	.pcm_free	= imx_pcm_free,  }; -static int imx_soc_platform_probe(struct platform_device *pdev) +int imx_pcm_dma_init(struct platform_device *pdev)  {  	return snd_soc_register_platform(&pdev->dev, &imx_soc_platform_mx2);  } - -static int imx_soc_platform_remove(struct platform_device *pdev) -{ -	snd_soc_unregister_platform(&pdev->dev); -	return 0; -} - -static struct platform_driver imx_pcm_driver = { -	.driver = { -			.name = "imx-pcm-audio", -			.owner = THIS_MODULE, -	}, -	.probe = imx_soc_platform_probe, -	.remove = imx_soc_platform_remove, -}; - -module_platform_driver(imx_pcm_driver); -MODULE_LICENSE("GPL"); -MODULE_ALIAS("platform:imx-pcm-audio"); diff --git a/sound/soc/fsl/imx-pcm-fiq.c b/sound/soc/fsl/imx-pcm-fiq.c index 5ec362ae4d0..920f945cb2f 100644 --- a/sound/soc/fsl/imx-pcm-fiq.c +++ b/sound/soc/fsl/imx-pcm-fiq.c @@ -281,7 +281,7 @@ static struct snd_soc_platform_driver imx_soc_platform_fiq = {  	.pcm_free	= imx_pcm_fiq_free,  }; -static int imx_soc_platform_probe(struct platform_device *pdev) +int imx_pcm_fiq_init(struct platform_device *pdev)  {  	struct imx_ssi *ssi = platform_get_drvdata(pdev);  	int ret; @@ -314,23 +314,3 @@ failed_register:  	return ret;  } - -static int imx_soc_platform_remove(struct platform_device *pdev) -{ -	snd_soc_unregister_platform(&pdev->dev); -	return 0; -} - -static struct platform_driver imx_pcm_driver = { -	.driver = { -			.name = "imx-fiq-pcm-audio", -			.owner = THIS_MODULE, -	}, - -	.probe = imx_soc_platform_probe, -	.remove = imx_soc_platform_remove, -}; - -module_platform_driver(imx_pcm_driver); - -MODULE_LICENSE("GPL"); diff --git a/sound/soc/fsl/imx-pcm.c b/sound/soc/fsl/imx-pcm.c index 0c9f188ddc6..0d0625bfcb6 100644 --- a/sound/soc/fsl/imx-pcm.c +++ b/sound/soc/fsl/imx-pcm.c @@ -31,6 +31,7 @@ int snd_imx_pcm_mmap(struct snd_pcm_substream *substream,  			runtime->dma_bytes);  	return ret;  } +EXPORT_SYMBOL_GPL(snd_imx_pcm_mmap);  static int imx_pcm_preallocate_dma_buffer(struct snd_pcm *pcm, int stream)  { @@ -79,6 +80,7 @@ int imx_pcm_new(struct snd_soc_pcm_runtime *rtd)  out:  	return ret;  } +EXPORT_SYMBOL_GPL(imx_pcm_new);  void imx_pcm_free(struct snd_pcm *pcm)  { @@ -100,6 +102,39 @@ void imx_pcm_free(struct snd_pcm *pcm)  		buf->area = NULL;  	}  } +EXPORT_SYMBOL_GPL(imx_pcm_free); + +static int imx_pcm_probe(struct platform_device *pdev) +{ +	if (strcmp(pdev->id_entry->name, "imx-fiq-pcm-audio") == 0) +		return imx_pcm_fiq_init(pdev); + +	return imx_pcm_dma_init(pdev); +} + +static int imx_pcm_remove(struct platform_device *pdev) +{ +	snd_soc_unregister_platform(&pdev->dev); +	return 0; +} + +static struct platform_device_id imx_pcm_devtype[] = { +	{ .name = "imx-pcm-audio", }, +	{ .name = "imx-fiq-pcm-audio", }, +	{ /* sentinel */ } +}; +MODULE_DEVICE_TABLE(platform, imx_pcm_devtype); + +static struct platform_driver imx_pcm_driver = { +	.driver = { +			.name = "imx-pcm", +			.owner = THIS_MODULE, +	}, +	.id_table = imx_pcm_devtype, +	.probe = imx_pcm_probe, +	.remove = imx_pcm_remove, +}; +module_platform_driver(imx_pcm_driver);  MODULE_DESCRIPTION("Freescale i.MX PCM driver");  MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de>"); diff --git a/sound/soc/fsl/imx-pcm.h b/sound/soc/fsl/imx-pcm.h index 83c0ed7d55c..5ae13a13a35 100644 --- a/sound/soc/fsl/imx-pcm.h +++ b/sound/soc/fsl/imx-pcm.h @@ -30,4 +30,22 @@ int snd_imx_pcm_mmap(struct snd_pcm_substream *substream,  int imx_pcm_new(struct snd_soc_pcm_runtime *rtd);  void imx_pcm_free(struct snd_pcm *pcm); +#ifdef CONFIG_SND_SOC_IMX_PCM_DMA +int imx_pcm_dma_init(struct platform_device *pdev); +#else +static inline int imx_pcm_dma_init(struct platform_device *pdev) +{ +	return -ENODEV; +} +#endif + +#ifdef CONFIG_SND_SOC_IMX_PCM_FIQ +int imx_pcm_fiq_init(struct platform_device *pdev); +#else +static inline int imx_pcm_fiq_init(struct platform_device *pdev) +{ +	return -ENODEV; +} +#endif +  #endif /* _IMX_PCM_H */ diff --git a/tools/vm/.gitignore b/tools/vm/.gitignore new file mode 100644 index 00000000000..44f095fa260 --- /dev/null +++ b/tools/vm/.gitignore @@ -0,0 +1,2 @@ +slabinfo +page-types  |