diff options
| author | Arnd Bergmann <arnd@arndb.de> | 2011-10-07 23:07:41 +0200 | 
|---|---|---|
| committer | Arnd Bergmann <arnd@arndb.de> | 2011-10-07 23:07:41 +0200 | 
| commit | 8efc59ad6764ade520012cb192cd484e5191cd9c (patch) | |
| tree | d2465d91ebb2652e35cd08f715fc202b3f9e042a | |
| parent | 6f6184a9d01880a3e21349544f73b3720ce5e152 (diff) | |
| parent | 492c4a0df11573e141a2decc6012b3bddd14a11e (diff) | |
| download | olio-linux-3.10-8efc59ad6764ade520012cb192cd484e5191cd9c.tar.xz olio-linux-3.10-8efc59ad6764ade520012cb192cd484e5191cd9c.zip  | |
Merge branch 'sirf/cleanup' into next/cleanup
286 files changed, 2618 insertions, 2053 deletions
diff --git a/Documentation/ABI/testing/sysfs-class-scsi_host b/Documentation/ABI/testing/sysfs-class-scsi_host new file mode 100644 index 00000000000..29a4f892e43 --- /dev/null +++ b/Documentation/ABI/testing/sysfs-class-scsi_host @@ -0,0 +1,13 @@ +What:		/sys/class/scsi_host/hostX/isci_id +Date:		June 2011 +Contact:	Dave Jiang <dave.jiang@intel.com> +Description: +		This file contains the enumerated host ID for the Intel +		SCU controller. The Intel(R) C600 Series Chipset SATA/SAS +		Storage Control Unit embeds up to two 4-port controllers in +		a single PCI device.  The controllers are enumerated in order +		which usually means the lowest number scsi_host corresponds +		with the first controller, but this association is not +		guaranteed.  The 'isci_id' attribute unambiguously identifies +		the controller index: '0' for the first controller, +		'1' for the second. diff --git a/Documentation/cgroups/memory.txt b/Documentation/cgroups/memory.txt index 6f3c598971f..06eb6d957c8 100644 --- a/Documentation/cgroups/memory.txt +++ b/Documentation/cgroups/memory.txt @@ -380,7 +380,7 @@ will be charged as a new owner of it.  5.2 stat file -5.2.1 memory.stat file includes following statistics +memory.stat file includes following statistics  # per-memory cgroup local status  cache		- # of bytes of page cache memory. @@ -438,89 +438,6 @@ Note:  	 file_mapped is accounted only when the memory cgroup is owner of page  	 cache.) -5.2.2 memory.vmscan_stat - -memory.vmscan_stat includes statistics information for memory scanning and -freeing, reclaiming. The statistics shows memory scanning information since -memory cgroup creation and can be reset to 0 by writing 0 as - - #echo 0 > ../memory.vmscan_stat - -This file contains following statistics. - -[param]_[file_or_anon]_pages_by_[reason]_[under_heararchy] -[param]_elapsed_ns_by_[reason]_[under_hierarchy] - -For example, - -  scanned_file_pages_by_limit indicates the number of scanned -  file pages at vmscan. - -Now, 3 parameters are supported - -  scanned - the number of pages scanned by vmscan -  rotated - the number of pages activated at vmscan -  freed   - the number of pages freed by vmscan - -If "rotated" is high against scanned/freed, the memcg seems busy. - -Now, 2 reason are supported - -  limit - the memory cgroup's limit -  system - global memory pressure + softlimit -           (global memory pressure not under softlimit is not handled now) - -When under_hierarchy is added in the tail, the number indicates the -total memcg scan of its children and itself. - -elapsed_ns is a elapsed time in nanosecond. This may include sleep time -and not indicates CPU usage. So, please take this as just showing -latency. - -Here is an example. - -# cat /cgroup/memory/A/memory.vmscan_stat -scanned_pages_by_limit 9471864 -scanned_anon_pages_by_limit 6640629 -scanned_file_pages_by_limit 2831235 -rotated_pages_by_limit 4243974 -rotated_anon_pages_by_limit 3971968 -rotated_file_pages_by_limit 272006 -freed_pages_by_limit 2318492 -freed_anon_pages_by_limit 962052 -freed_file_pages_by_limit 1356440 -elapsed_ns_by_limit 351386416101 -scanned_pages_by_system 0 -scanned_anon_pages_by_system 0 -scanned_file_pages_by_system 0 -rotated_pages_by_system 0 -rotated_anon_pages_by_system 0 -rotated_file_pages_by_system 0 -freed_pages_by_system 0 -freed_anon_pages_by_system 0 -freed_file_pages_by_system 0 -elapsed_ns_by_system 0 -scanned_pages_by_limit_under_hierarchy 9471864 -scanned_anon_pages_by_limit_under_hierarchy 6640629 -scanned_file_pages_by_limit_under_hierarchy 2831235 -rotated_pages_by_limit_under_hierarchy 4243974 -rotated_anon_pages_by_limit_under_hierarchy 3971968 -rotated_file_pages_by_limit_under_hierarchy 272006 -freed_pages_by_limit_under_hierarchy 2318492 -freed_anon_pages_by_limit_under_hierarchy 962052 -freed_file_pages_by_limit_under_hierarchy 1356440 -elapsed_ns_by_limit_under_hierarchy 351386416101 -scanned_pages_by_system_under_hierarchy 0 -scanned_anon_pages_by_system_under_hierarchy 0 -scanned_file_pages_by_system_under_hierarchy 0 -rotated_pages_by_system_under_hierarchy 0 -rotated_anon_pages_by_system_under_hierarchy 0 -rotated_file_pages_by_system_under_hierarchy 0 -freed_pages_by_system_under_hierarchy 0 -freed_anon_pages_by_system_under_hierarchy 0 -freed_file_pages_by_system_under_hierarchy 0 -elapsed_ns_by_system_under_hierarchy 0 -  5.3 swappiness  Similar to /proc/sys/vm/swappiness, but affecting a hierarchy of groups only. diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index 614d0382e2c..854ed5ca7e3 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt @@ -2086,9 +2086,12 @@ bytes respectively. Such letter suffixes can also be entirely omitted.  			Override pmtimer IOPort with a hex value.  			e.g. pmtmr=0x508 -	pnp.debug	[PNP] -			Enable PNP debug messages.  This depends on the -			CONFIG_PNP_DEBUG_MESSAGES option. +	pnp.debug=1	[PNP] +			Enable PNP debug messages (depends on the +			CONFIG_PNP_DEBUG_MESSAGES option).  Change at run-time +			via /sys/module/pnp/parameters/debug.  We always show +			current resource usage; turning this on also shows +			possible settings and some assignment information.  	pnpacpi=	[ACPI]  			{ off } diff --git a/Documentation/networking/dmfe.txt b/Documentation/networking/dmfe.txt index 8006c227fda..25320bf19c8 100644 --- a/Documentation/networking/dmfe.txt +++ b/Documentation/networking/dmfe.txt @@ -1,3 +1,5 @@ +Note: This driver doesn't have a maintainer. +  Davicom DM9102(A)/DM9132/DM9801 fast ethernet driver for Linux.  This program is free software; you can redistribute it and/or @@ -55,7 +57,6 @@ Test and make sure PCI latency is now correct for all cases.  Authors:  Sten Wang <sten_wang@davicom.com.tw >   : Original Author -Tobias Ringstrom <tori@unhappy.mine.nu> : Current Maintainer  Contributors: diff --git a/MAINTAINERS b/MAINTAINERS index 28f65c249b9..ae8820e173a 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -1278,7 +1278,6 @@ F:	drivers/input/misc/ati_remote2.c  ATLX ETHERNET DRIVERS  M:	Jay Cliburn <jcliburn@gmail.com>  M:	Chris Snook <chris.snook@gmail.com> -M:	Jie Yang <jie.yang@atheros.com>  L:	netdev@vger.kernel.org  W:	http://sourceforge.net/projects/atl1  W:	http://atl1.sourceforge.net @@ -1574,7 +1573,6 @@ F:	drivers/scsi/bfa/  BROCADE BNA 10 GIGABIT ETHERNET DRIVER  M:	Rasesh Mody <rmody@brocade.com> -M:	Debashis Dutt <ddutt@brocade.com>  L:	netdev@vger.kernel.org  S:	Supported  F:	drivers/net/bna/ @@ -1758,7 +1756,6 @@ F:	Documentation/zh_CN/  CISCO VIC ETHERNET NIC DRIVER  M:	Christian Benvenuti <benve@cisco.com> -M:	Vasanthy Kolluri <vkolluri@cisco.com>  M:	Roopa Prabhu <roprabhu@cisco.com>  M:	David Wang <dwang2@cisco.com>  S:	Supported @@ -3262,6 +3259,17 @@ F:	Documentation/input/multi-touch-protocol.txt  F:	drivers/input/input-mt.c  K:	\b(ABS|SYN)_MT_ +INTEL C600 SERIES SAS CONTROLLER DRIVER +M:	Intel SCU Linux support <intel-linux-scu@intel.com> +M:	Dan Williams <dan.j.williams@intel.com> +M:	Dave Jiang <dave.jiang@intel.com> +M:	Ed Nadolski <edmund.nadolski@intel.com> +L:	linux-scsi@vger.kernel.org +T:	git git://git.kernel.org/pub/scm/linux/kernel/git/djbw/isci.git +S:	Maintained +F:	drivers/scsi/isci/ +F:	firmware/isci/ +  INTEL IDLE DRIVER  M:	Len Brown <lenb@kernel.org>  L:	linux-pm@lists.linux-foundation.org @@ -4404,7 +4412,8 @@ L:	netfilter@vger.kernel.org  L:	coreteam@netfilter.org  W:	http://www.netfilter.org/  W:	http://www.iptables.org/ -T:	git git://git.kernel.org/pub/scm/linux/kernel/git/kaber/nf-2.6.git +T:	git git://git.kernel.org/pub/scm/linux/kernel/git/netfilter/nf-2.6.git +T:	git git://git.kernel.org/pub/scm/linux/kernel/git/netfilter/nf-next-2.6.git  S:	Supported  F:	include/linux/netfilter*  F:	include/linux/netfilter/ @@ -4774,7 +4783,7 @@ F:	drivers/net/wireless/orinoco/  OSD LIBRARY and FILESYSTEM  M:	Boaz Harrosh <bharrosh@panasas.com> -M:	Benny Halevy <bhalevy@panasas.com> +M:	Benny Halevy <bhalevy@tonian.com>  L:	osd-dev@open-osd.org  W:	http://open-osd.org  T:	git git://git.open-osd.org/open-osd.git @@ -7200,6 +7209,9 @@ W:	http://opensource.wolfsonmicro.com/content/linux-drivers-wolfson-devices  S:	Supported  F:	Documentation/hwmon/wm83??  F:	drivers/leds/leds-wm83*.c +F:	drivers/input/misc/wm831x-on.c +F:	drivers/input/touchscreen/wm831x-ts.c +F:	drivers/input/touchscreen/wm97*.c  F:	drivers/mfd/wm8*.c  F:	drivers/power/wm83*.c  F:	drivers/rtc/rtc-wm83*.c @@ -7209,6 +7221,7 @@ F:	drivers/watchdog/wm83*_wdt.c  F:	include/linux/mfd/wm831x/  F:	include/linux/mfd/wm8350/  F:	include/linux/mfd/wm8400* +F:	include/linux/wm97xx.h  F:	include/sound/wm????.h  F:	sound/soc/codecs/wm* @@ -1,7 +1,7 @@  VERSION = 3  PATCHLEVEL = 1  SUBLEVEL = 0 -EXTRAVERSION = -rc6 +EXTRAVERSION = -rc7  NAME = "Divemaster Edition"  # *DOCUMENTATION* diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig index 60cde53d266..8bb936226de 100644 --- a/arch/alpha/Kconfig +++ b/arch/alpha/Kconfig @@ -51,7 +51,7 @@ config GENERIC_CMOS_UPDATE          def_bool y  config GENERIC_GPIO -	def_bool y +	bool  config ZONE_DMA  	bool diff --git a/arch/arm/boot/dts/tegra-harmony.dts b/arch/arm/boot/dts/tegra-harmony.dts index 4c053340ce3..e5818668d09 100644 --- a/arch/arm/boot/dts/tegra-harmony.dts +++ b/arch/arm/boot/dts/tegra-harmony.dts @@ -57,14 +57,14 @@  	};  	sdhci@c8000200 { -		gpios = <&gpio 69 0>, /* cd, gpio PI5 */ -			<&gpio 57 0>, /* wp, gpio PH1 */ -			<&gpio 155 0>; /* power, gpio PT3 */ +		cd-gpios = <&gpio 69 0>; /* gpio PI5 */ +		wp-gpios = <&gpio 57 0>; /* gpio PH1 */ +		power-gpios = <&gpio 155 0>; /* gpio PT3 */  	};  	sdhci@c8000600 { -		gpios = <&gpio 58 0>, /* cd, gpio PH2 */ -			<&gpio 59 0>, /* wp, gpio PH3 */ -			<&gpio 70 0>; /* power, gpio PI6 */ +		cd-gpios = <&gpio 58 0>; /* gpio PH2 */ +		wp-gpios = <&gpio 59 0>; /* gpio PH3 */ +		power-gpios = <&gpio 70 0>; /* gpio PI6 */  	};  }; diff --git a/arch/arm/boot/dts/tegra-seaboard.dts b/arch/arm/boot/dts/tegra-seaboard.dts index 1940cae0074..64cedca6fc7 100644 --- a/arch/arm/boot/dts/tegra-seaboard.dts +++ b/arch/arm/boot/dts/tegra-seaboard.dts @@ -21,8 +21,8 @@  	};  	sdhci@c8000400 { -		gpios = <&gpio 69 0>, /* cd, gpio PI5 */ -			<&gpio 57 0>, /* wp, gpio PH1 */ -			<&gpio 70 0>; /* power, gpio PI6 */ +		cd-gpios = <&gpio 69 0>; /* gpio PI5 */ +		wp-gpios = <&gpio 57 0>; /* gpio PH1 */ +		power-gpios = <&gpio 70 0>; /* gpio PI6 */  	};  }; diff --git a/arch/arm/mach-dove/common.c b/arch/arm/mach-dove/common.c index 83dce859886..a9e0dae86a2 100644 --- a/arch/arm/mach-dove/common.c +++ b/arch/arm/mach-dove/common.c @@ -158,7 +158,7 @@ void __init dove_spi0_init(void)  void __init dove_spi1_init(void)  { -	orion_spi_init(DOVE_SPI1_PHYS_BASE, get_tclk()); +	orion_spi_1_init(DOVE_SPI1_PHYS_BASE, get_tclk());  }  /***************************************************************************** diff --git a/arch/arm/mach-exynos4/clock.c b/arch/arm/mach-exynos4/clock.c index 1561b036a9b..79d6cd0c8e7 100644 --- a/arch/arm/mach-exynos4/clock.c +++ b/arch/arm/mach-exynos4/clock.c @@ -1160,7 +1160,7 @@ void __init_or_cpufreq exynos4_setup_clocks(void)  	vpllsrc = clk_get_rate(&clk_vpllsrc.clk);  	vpll = s5p_get_pll46xx(vpllsrc, __raw_readl(S5P_VPLL_CON0), -				__raw_readl(S5P_VPLL_CON1), pll_4650); +				__raw_readl(S5P_VPLL_CON1), pll_4650c);  	clk_fout_apll.ops = &exynos4_fout_apll_ops;  	clk_fout_mpll.rate = mpll; diff --git a/arch/arm/mach-exynos4/mct.c b/arch/arm/mach-exynos4/mct.c index 1ae059b7ad7..ddd86864fb8 100644 --- a/arch/arm/mach-exynos4/mct.c +++ b/arch/arm/mach-exynos4/mct.c @@ -132,12 +132,18 @@ static cycle_t exynos4_frc_read(struct clocksource *cs)  	return ((cycle_t)hi << 32) | lo;  } +static void exynos4_frc_resume(struct clocksource *cs) +{ +	exynos4_mct_frc_start(0, 0); +} +  struct clocksource mct_frc = {  	.name		= "mct-frc",  	.rating		= 400,  	.read		= exynos4_frc_read,  	.mask		= CLOCKSOURCE_MASK(64),  	.flags		= CLOCK_SOURCE_IS_CONTINUOUS, +	.resume		= exynos4_frc_resume,  };  static void __init exynos4_clocksource_init(void) @@ -389,9 +395,11 @@ static void exynos4_mct_tick_init(struct clock_event_device *evt)  }  /* Setup the local clock events for a CPU */ -void __cpuinit local_timer_setup(struct clock_event_device *evt) +int __cpuinit local_timer_setup(struct clock_event_device *evt)  {  	exynos4_mct_tick_init(evt); + +	return 0;  }  int local_timer_ack(void) diff --git a/arch/arm/mach-exynos4/platsmp.c b/arch/arm/mach-exynos4/platsmp.c index 7c2282c6ba8..df6ef1b2f98 100644 --- a/arch/arm/mach-exynos4/platsmp.c +++ b/arch/arm/mach-exynos4/platsmp.c @@ -106,6 +106,8 @@ void __cpuinit platform_secondary_init(unsigned int cpu)  	 */  	spin_lock(&boot_lock);  	spin_unlock(&boot_lock); + +	set_cpu_online(cpu, true);  }  int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle) diff --git a/arch/arm/mach-exynos4/setup-keypad.c b/arch/arm/mach-exynos4/setup-keypad.c index 1ee0ebff111..7862bfb5933 100644 --- a/arch/arm/mach-exynos4/setup-keypad.c +++ b/arch/arm/mach-exynos4/setup-keypad.c @@ -19,15 +19,16 @@ void samsung_keypad_cfg_gpio(unsigned int rows, unsigned int cols)  	if (rows > 8) {  		/* Set all the necessary GPX2 pins: KP_ROW[0~7] */ -		s3c_gpio_cfgrange_nopull(EXYNOS4_GPX2(0), 8, S3C_GPIO_SFN(3)); +		s3c_gpio_cfgall_range(EXYNOS4_GPX2(0), 8, S3C_GPIO_SFN(3), +					S3C_GPIO_PULL_UP);  		/* Set all the necessary GPX3 pins: KP_ROW[8~] */ -		s3c_gpio_cfgrange_nopull(EXYNOS4_GPX3(0), (rows - 8), -					 S3C_GPIO_SFN(3)); +		s3c_gpio_cfgall_range(EXYNOS4_GPX3(0), (rows - 8), +					 S3C_GPIO_SFN(3), S3C_GPIO_PULL_UP);  	} else {  		/* Set all the necessary GPX2 pins: KP_ROW[x] */ -		s3c_gpio_cfgrange_nopull(EXYNOS4_GPX2(0), rows, -					 S3C_GPIO_SFN(3)); +		s3c_gpio_cfgall_range(EXYNOS4_GPX2(0), rows, S3C_GPIO_SFN(3), +					S3C_GPIO_PULL_UP);  	}  	/* Set all the necessary GPX1 pins to special-function 3: KP_COL[x] */ diff --git a/arch/arm/mach-integrator/integrator_ap.c b/arch/arm/mach-integrator/integrator_ap.c index fcf0ae95651..8cdc730dcb3 100644 --- a/arch/arm/mach-integrator/integrator_ap.c +++ b/arch/arm/mach-integrator/integrator_ap.c @@ -32,6 +32,7 @@  #include <linux/interrupt.h>  #include <linux/io.h>  #include <linux/mtd/physmap.h> +#include <video/vga.h>  #include <mach/hardware.h>  #include <mach/platform.h> @@ -154,6 +155,7 @@ static struct map_desc ap_io_desc[] __initdata = {  static void __init ap_map_io(void)  {  	iotable_init(ap_io_desc, ARRAY_SIZE(ap_io_desc)); +	vga_base = PCI_MEMORY_VADDR;  }  #define INTEGRATOR_SC_VALID_INT	0x003fffff diff --git a/arch/arm/mach-integrator/pci_v3.c b/arch/arm/mach-integrator/pci_v3.c index dd56bfb351e..11b86e5b71c 100644 --- a/arch/arm/mach-integrator/pci_v3.c +++ b/arch/arm/mach-integrator/pci_v3.c @@ -27,7 +27,6 @@  #include <linux/spinlock.h>  #include <linux/init.h>  #include <linux/io.h> -#include <video/vga.h>  #include <mach/hardware.h>  #include <mach/platform.h> @@ -505,7 +504,6 @@ void __init pci_v3_preinit(void)  	pcibios_min_io = 0x6000;  	pcibios_min_mem = 0x00100000; -	vga_base = PCI_MEMORY_VADDR;  	/*  	 * Hook in our fault handler for PCI errors diff --git a/arch/arm/mach-prima2/clock.c b/arch/arm/mach-prima2/clock.c index 615a4e75cea..aebad7e565c 100644 --- a/arch/arm/mach-prima2/clock.c +++ b/arch/arm/mach-prima2/clock.c @@ -350,10 +350,10 @@ static struct clk_lookup onchip_clks[] = {  		.clk = &clk_mem,  	}, {  		.dev_id = "sys", -			.clk = &clk_sys, +		.clk = &clk_sys,  	}, {  		.dev_id = "io", -			.clk = &clk_io, +		.clk = &clk_io,  	},  }; diff --git a/arch/arm/mach-prima2/prima2.c b/arch/arm/mach-prima2/prima2.c index f57124bdd14..0ddf44698ec 100644 --- a/arch/arm/mach-prima2/prima2.c +++ b/arch/arm/mach-prima2/prima2.c @@ -1,5 +1,5 @@  /* - * Defines machines for CSR SiRFprimaII  + * Defines machines for CSR SiRFprimaII   *   * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company.   * diff --git a/arch/arm/mach-prima2/timer.c b/arch/arm/mach-prima2/timer.c index ed7ec48d11d..26ab6fee561 100644 --- a/arch/arm/mach-prima2/timer.c +++ b/arch/arm/mach-prima2/timer.c @@ -133,14 +133,14 @@ static struct irqaction sirfsoc_timer_irq = {  /* Overwrite weak default sched_clock with more precise one */  unsigned long long notrace sched_clock(void)  { -	static int is_mapped = 0; +	static int is_mapped;  	/*  	 * sched_clock is called earlier than .init of sys_timer  	 * if we map timer memory in .init of sys_timer, system  	 * will panic due to illegal memory access  	 */ -	if(!is_mapped) { +	if (!is_mapped) {  		sirfsoc_of_timer_map();  		is_mapped = 1;  	} diff --git a/arch/arm/mach-s3c64xx/mach-smdk6410.c b/arch/arm/mach-s3c64xx/mach-smdk6410.c index c5190a50f0d..d831c97833b 100644 --- a/arch/arm/mach-s3c64xx/mach-smdk6410.c +++ b/arch/arm/mach-s3c64xx/mach-smdk6410.c @@ -262,45 +262,6 @@ static struct samsung_keypad_platdata smdk6410_keypad_data __initdata = {  	.cols		= 8,  }; -static int smdk6410_backlight_init(struct device *dev) -{ -	int ret; - -	ret = gpio_request(S3C64XX_GPF(15), "Backlight"); -	if (ret) { -		printk(KERN_ERR "failed to request GPF for PWM-OUT1\n"); -		return ret; -	} - -	/* Configure GPIO pin with S3C64XX_GPF15_PWM_TOUT1 */ -	s3c_gpio_cfgpin(S3C64XX_GPF(15), S3C_GPIO_SFN(2)); - -	return 0; -} - -static void smdk6410_backlight_exit(struct device *dev) -{ -	s3c_gpio_cfgpin(S3C64XX_GPF(15), S3C_GPIO_OUTPUT); -	gpio_free(S3C64XX_GPF(15)); -} - -static struct platform_pwm_backlight_data smdk6410_backlight_data = { -	.pwm_id		= 1, -	.max_brightness	= 255, -	.dft_brightness	= 255, -	.pwm_period_ns	= 78770, -	.init		= smdk6410_backlight_init, -	.exit		= smdk6410_backlight_exit, -}; - -static struct platform_device smdk6410_backlight_device = { -	.name		= "pwm-backlight", -	.dev		= { -		.parent		= &s3c_device_timer[1].dev, -		.platform_data	= &smdk6410_backlight_data, -	}, -}; -  static struct map_desc smdk6410_iodesc[] = {};  static struct platform_device *smdk6410_devices[] __initdata = { diff --git a/arch/arm/plat-samsung/clock.c b/arch/arm/plat-samsung/clock.c index 302c42670bd..3b4451979d1 100644 --- a/arch/arm/plat-samsung/clock.c +++ b/arch/arm/plat-samsung/clock.c @@ -64,6 +64,17 @@ static LIST_HEAD(clocks);   */  DEFINE_SPINLOCK(clocks_lock); +/* Global watchdog clock used by arch_wtd_reset() callback */ +struct clk *s3c2410_wdtclk; +static int __init s3c_wdt_reset_init(void) +{ +	s3c2410_wdtclk = clk_get(NULL, "watchdog"); +	if (IS_ERR(s3c2410_wdtclk)) +		printk(KERN_WARNING "%s: warning: cannot get watchdog clock\n", __func__); +	return 0; +} +arch_initcall(s3c_wdt_reset_init); +  /* enable and disable calls for use with the clk struct */  static int clk_null_enable(struct clk *clk, int enable) diff --git a/arch/arm/plat-samsung/include/plat/clock.h b/arch/arm/plat-samsung/include/plat/clock.h index 87d5b38a86f..73c66d4d10f 100644 --- a/arch/arm/plat-samsung/include/plat/clock.h +++ b/arch/arm/plat-samsung/include/plat/clock.h @@ -9,6 +9,9 @@   * published by the Free Software Foundation.  */ +#ifndef __ASM_PLAT_CLOCK_H +#define __ASM_PLAT_CLOCK_H __FILE__ +  #include <linux/spinlock.h>  #include <linux/clkdev.h> @@ -121,3 +124,8 @@ extern int s3c64xx_sclk_ctrl(struct clk *clk, int enable);  extern void s3c_pwmclk_init(void); +/* Global watchdog clock used by arch_wtd_reset() callback */ + +extern struct clk *s3c2410_wdtclk; + +#endif /* __ASM_PLAT_CLOCK_H */ diff --git a/arch/arm/plat-samsung/include/plat/watchdog-reset.h b/arch/arm/plat-samsung/include/plat/watchdog-reset.h index 54b762acb5a..40dbb2b0ae2 100644 --- a/arch/arm/plat-samsung/include/plat/watchdog-reset.h +++ b/arch/arm/plat-samsung/include/plat/watchdog-reset.h @@ -10,6 +10,7 @@   * published by the Free Software Foundation.  */ +#include <plat/clock.h>  #include <plat/regs-watchdog.h>  #include <mach/map.h> @@ -19,17 +20,12 @@  static inline void arch_wdt_reset(void)  { -	struct clk *wdtclk; -  	printk("arch_reset: attempting watchdog reset\n");  	__raw_writel(0, S3C2410_WTCON);	  /* disable watchdog, to be safe  */ -	wdtclk = clk_get(NULL, "watchdog"); -	if (!IS_ERR(wdtclk)) { -		clk_enable(wdtclk); -	} else -		printk(KERN_WARNING "%s: warning: cannot get watchdog clock\n", __func__); +	if (s3c2410_wdtclk) +		clk_enable(s3c2410_wdtclk);  	/* put initial values into count and data */  	__raw_writel(0x80, S3C2410_WTCNT); diff --git a/arch/um/Kconfig.x86 b/arch/um/Kconfig.x86 index d31ecf346b4..21bebe63df6 100644 --- a/arch/um/Kconfig.x86 +++ b/arch/um/Kconfig.x86 @@ -10,6 +10,10 @@ config CMPXCHG_LOCAL  	bool  	default n +config CMPXCHG_DOUBLE +	bool +	default n +  source "arch/x86/Kconfig.cpu"  endmenu diff --git a/arch/um/Makefile b/arch/um/Makefile index fab8121d2b3..c0f712cc7c5 100644 --- a/arch/um/Makefile +++ b/arch/um/Makefile @@ -41,7 +41,7 @@ KBUILD_CPPFLAGS += -I$(srctree)/$(ARCH_DIR)/sys-$(SUBARCH)  KBUILD_CFLAGS += $(CFLAGS) $(CFLAGS-y) -D__arch_um__ -DSUBARCH=\"$(SUBARCH)\" \  	$(ARCH_INCLUDE) $(MODE_INCLUDE) -Dvmap=kernel_vmap	\  	-Din6addr_loopback=kernel_in6addr_loopback \ -	-Din6addr_any=kernel_in6addr_any +	-Din6addr_any=kernel_in6addr_any -Dstrrchr=kernel_strrchr  KBUILD_AFLAGS += $(ARCH_INCLUDE) diff --git a/arch/um/drivers/line.c b/arch/um/drivers/line.c index d51c404239a..364c8a15c4c 100644 --- a/arch/um/drivers/line.c +++ b/arch/um/drivers/line.c @@ -399,8 +399,8 @@ int line_setup_irq(int fd, int input, int output, struct line *line, void *data)   * is done under a spinlock.  Checking whether the device is in use is   * line->tty->count > 1, also under the spinlock.   * - * tty->count serves to decide whether the device should be enabled or - * disabled on the host.  If it's equal to 1, then we are doing the + * line->count serves to decide whether the device should be enabled or + * disabled on the host.  If it's equal to 0, then we are doing the   * first open or last close.  Otherwise, open and close just return.   */ @@ -414,16 +414,16 @@ int line_open(struct line *lines, struct tty_struct *tty)  		goto out_unlock;  	err = 0; -	if (tty->count > 1) +	if (line->count++)  		goto out_unlock; -	spin_unlock(&line->count_lock); - +	BUG_ON(tty->driver_data);  	tty->driver_data = line;  	line->tty = tty; +	spin_unlock(&line->count_lock);  	err = enable_chan(line); -	if (err) +	if (err) /* line_close() will be called by our caller */  		return err;  	INIT_DELAYED_WORK(&line->task, line_timer_cb); @@ -436,7 +436,7 @@ int line_open(struct line *lines, struct tty_struct *tty)  	chan_window_size(&line->chan_list, &tty->winsize.ws_row,  			 &tty->winsize.ws_col); -	return err; +	return 0;  out_unlock:  	spin_unlock(&line->count_lock); @@ -460,17 +460,16 @@ void line_close(struct tty_struct *tty, struct file * filp)  	flush_buffer(line);  	spin_lock(&line->count_lock); -	if (!line->valid) -		goto out_unlock; +	BUG_ON(!line->valid); -	if (tty->count > 1) +	if (--line->count)  		goto out_unlock; -	spin_unlock(&line->count_lock); -  	line->tty = NULL;  	tty->driver_data = NULL; +	spin_unlock(&line->count_lock); +  	if (line->sigio) {  		unregister_winch(tty);  		line->sigio = 0; @@ -498,7 +497,7 @@ static int setup_one_line(struct line *lines, int n, char *init, int init_prio,  	spin_lock(&line->count_lock); -	if (line->tty != NULL) { +	if (line->count) {  		*error_out = "Device is already open";  		goto out;  	} @@ -722,41 +721,53 @@ struct winch {  	int pid;  	struct tty_struct *tty;  	unsigned long stack; +	struct work_struct work;  }; -static void free_winch(struct winch *winch, int free_irq_ok) +static void __free_winch(struct work_struct *work)  { -	if (free_irq_ok) -		free_irq(WINCH_IRQ, winch); - -	list_del(&winch->list); +	struct winch *winch = container_of(work, struct winch, work); +	free_irq(WINCH_IRQ, winch);  	if (winch->pid != -1)  		os_kill_process(winch->pid, 1); -	if (winch->fd != -1) -		os_close_file(winch->fd);  	if (winch->stack != 0)  		free_stack(winch->stack, 0);  	kfree(winch);  } +static void free_winch(struct winch *winch) +{ +	int fd = winch->fd; +	winch->fd = -1; +	if (fd != -1) +		os_close_file(fd); +	list_del(&winch->list); +	__free_winch(&winch->work); +} +  static irqreturn_t winch_interrupt(int irq, void *data)  {  	struct winch *winch = data;  	struct tty_struct *tty;  	struct line *line; +	int fd = winch->fd;  	int err;  	char c; -	if (winch->fd != -1) { -		err = generic_read(winch->fd, &c, NULL); +	if (fd != -1) { +		err = generic_read(fd, &c, NULL);  		if (err < 0) {  			if (err != -EAGAIN) { +				winch->fd = -1; +				list_del(&winch->list); +				os_close_file(fd);  				printk(KERN_ERR "winch_interrupt : "  				       "read failed, errno = %d\n", -err);  				printk(KERN_ERR "fd %d is losing SIGWINCH "  				       "support\n", winch->tty_fd); -				free_winch(winch, 0); +				INIT_WORK(&winch->work, __free_winch); +				schedule_work(&winch->work);  				return IRQ_HANDLED;  			}  			goto out; @@ -828,7 +839,7 @@ static void unregister_winch(struct tty_struct *tty)  	list_for_each_safe(ele, next, &winch_handlers) {  		winch = list_entry(ele, struct winch, list);  		if (winch->tty == tty) { -			free_winch(winch, 1); +			free_winch(winch);  			break;  		}  	} @@ -844,7 +855,7 @@ static void winch_cleanup(void)  	list_for_each_safe(ele, next, &winch_handlers) {  		winch = list_entry(ele, struct winch, list); -		free_winch(winch, 1); +		free_winch(winch);  	}  	spin_unlock(&winch_handler_lock); diff --git a/arch/um/drivers/xterm.c b/arch/um/drivers/xterm.c index 8ac7146c237..2e1de572860 100644 --- a/arch/um/drivers/xterm.c +++ b/arch/um/drivers/xterm.c @@ -123,6 +123,7 @@ static int xterm_open(int input, int output, int primary, void *d,  		err = -errno;  		printk(UM_KERN_ERR "xterm_open : unlink failed, errno = %d\n",  		       errno); +		close(fd);  		return err;  	}  	close(fd); diff --git a/arch/um/include/asm/ptrace-generic.h b/arch/um/include/asm/ptrace-generic.h index ae084ad1a3a..1a7d2757fe0 100644 --- a/arch/um/include/asm/ptrace-generic.h +++ b/arch/um/include/asm/ptrace-generic.h @@ -42,10 +42,6 @@ extern long subarch_ptrace(struct task_struct *child, long request,  	unsigned long addr, unsigned long data);  extern unsigned long getreg(struct task_struct *child, int regno);  extern int putreg(struct task_struct *child, int regno, unsigned long value); -extern int get_fpregs(struct user_i387_struct __user *buf, -		      struct task_struct *child); -extern int set_fpregs(struct user_i387_struct __user *buf, -		      struct task_struct *child);  extern int arch_copy_tls(struct task_struct *new);  extern void clear_flushed_tls(struct task_struct *task); diff --git a/arch/um/include/shared/line.h b/arch/um/include/shared/line.h index 72f4f25af24..63df3ca02ac 100644 --- a/arch/um/include/shared/line.h +++ b/arch/um/include/shared/line.h @@ -33,6 +33,7 @@ struct line_driver {  struct line {  	struct tty_struct *tty;  	spinlock_t count_lock; +	unsigned long count;  	int valid;  	char *init_str; diff --git a/arch/um/include/shared/registers.h b/arch/um/include/shared/registers.h index b0b4589e0eb..f1e0aa56c52 100644 --- a/arch/um/include/shared/registers.h +++ b/arch/um/include/shared/registers.h @@ -16,7 +16,7 @@ extern int restore_fpx_registers(int pid, unsigned long *fp_regs);  extern int save_registers(int pid, struct uml_pt_regs *regs);  extern int restore_registers(int pid, struct uml_pt_regs *regs);  extern int init_registers(int pid); -extern void get_safe_registers(unsigned long *regs); +extern void get_safe_registers(unsigned long *regs, unsigned long *fp_regs);  extern unsigned long get_thread_reg(int reg, jmp_buf *buf);  extern int get_fp_registers(int pid, unsigned long *regs);  extern int put_fp_registers(int pid, unsigned long *regs); diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c index fab4371184f..21c1ae7c3d7 100644 --- a/arch/um/kernel/process.c +++ b/arch/um/kernel/process.c @@ -202,7 +202,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,  		arch_copy_thread(¤t->thread.arch, &p->thread.arch);  	}  	else { -		get_safe_registers(p->thread.regs.regs.gp); +		get_safe_registers(p->thread.regs.regs.gp, p->thread.regs.regs.fp);  		p->thread.request.u.thread = current->thread.request.u.thread;  		handler = new_thread_handler;  	} diff --git a/arch/um/kernel/ptrace.c b/arch/um/kernel/ptrace.c index 701b672c112..c9da32b0c70 100644 --- a/arch/um/kernel/ptrace.c +++ b/arch/um/kernel/ptrace.c @@ -50,23 +50,11 @@ long arch_ptrace(struct task_struct *child, long request,  	void __user *vp = p;  	switch (request) { -	/* read word at location addr. */ -	case PTRACE_PEEKTEXT: -	case PTRACE_PEEKDATA: -		ret = generic_ptrace_peekdata(child, addr, data); -		break; -  	/* read the word at location addr in the USER area. */  	case PTRACE_PEEKUSR:  		ret = peek_user(child, addr, data);  		break; -	/* write the word at location addr. */ -	case PTRACE_POKETEXT: -	case PTRACE_POKEDATA: -		ret = generic_ptrace_pokedata(child, addr, data); -		break; -  	/* write the word at location addr in the USER area */  	case PTRACE_POKEUSR:  		ret = poke_user(child, addr, data); @@ -107,16 +95,6 @@ long arch_ptrace(struct task_struct *child, long request,  		break;  	}  #endif -#ifdef PTRACE_GETFPREGS -	case PTRACE_GETFPREGS: /* Get the child FPU state. */ -		ret = get_fpregs(vp, child); -		break; -#endif -#ifdef PTRACE_SETFPREGS -	case PTRACE_SETFPREGS: /* Set the child FPU state. */ -		ret = set_fpregs(vp, child); -		break; -#endif  	case PTRACE_GET_THREAD_AREA:  		ret = ptrace_get_thread_area(child, addr, vp);  		break; @@ -154,12 +132,6 @@ long arch_ptrace(struct task_struct *child, long request,  		break;  	}  #endif -#ifdef PTRACE_ARCH_PRCTL -	case PTRACE_ARCH_PRCTL: -		/* XXX Calls ptrace on the host - needs some SMP thinking */ -		ret = arch_prctl(child, data, (void __user *) addr); -		break; -#endif  	default:  		ret = ptrace_request(child, request, addr, data);  		if (ret == -EIO) diff --git a/arch/um/os-Linux/registers.c b/arch/um/os-Linux/registers.c index 830fe6a1518..b866b9e3bef 100644 --- a/arch/um/os-Linux/registers.c +++ b/arch/um/os-Linux/registers.c @@ -8,6 +8,8 @@  #include <string.h>  #include <sys/ptrace.h>  #include "sysdep/ptrace.h" +#include "sysdep/ptrace_user.h" +#include "registers.h"  int save_registers(int pid, struct uml_pt_regs *regs)  { @@ -32,6 +34,7 @@ int restore_registers(int pid, struct uml_pt_regs *regs)  /* This is set once at boot time and not changed thereafter */  static unsigned long exec_regs[MAX_REG_NR]; +static unsigned long exec_fp_regs[FP_SIZE];  int init_registers(int pid)  { @@ -42,10 +45,14 @@ int init_registers(int pid)  		return -errno;  	arch_init_registers(pid); +	get_fp_registers(pid, exec_fp_regs);  	return 0;  } -void get_safe_registers(unsigned long *regs) +void get_safe_registers(unsigned long *regs, unsigned long *fp_regs)  {  	memcpy(regs, exec_regs, sizeof(exec_regs)); + +	if (fp_regs) +		memcpy(fp_regs, exec_fp_regs, sizeof(exec_fp_regs));  } diff --git a/arch/um/os-Linux/skas/mem.c b/arch/um/os-Linux/skas/mem.c index d261f170d12..e771398be5f 100644 --- a/arch/um/os-Linux/skas/mem.c +++ b/arch/um/os-Linux/skas/mem.c @@ -39,7 +39,7 @@ static unsigned long syscall_regs[MAX_REG_NR];  static int __init init_syscall_regs(void)  { -	get_safe_registers(syscall_regs); +	get_safe_registers(syscall_regs, NULL);  	syscall_regs[REGS_IP_INDEX] = STUB_CODE +  		((unsigned long) &batch_syscall_stub -  		 (unsigned long) &__syscall_stub_start); diff --git a/arch/um/os-Linux/skas/process.c b/arch/um/os-Linux/skas/process.c index d6e0a2234b8..dee0e8cf8ad 100644 --- a/arch/um/os-Linux/skas/process.c +++ b/arch/um/os-Linux/skas/process.c @@ -373,6 +373,9 @@ void userspace(struct uml_pt_regs *regs)  		if (ptrace(PTRACE_SETREGS, pid, 0, regs->gp))  			fatal_sigsegv(); +		if (put_fp_registers(pid, regs->fp)) +			fatal_sigsegv(); +  		/* Now we set local_using_sysemu to be used for one loop */  		local_using_sysemu = get_using_sysemu(); @@ -399,6 +402,12 @@ void userspace(struct uml_pt_regs *regs)  			fatal_sigsegv();  		} +		if (get_fp_registers(pid, regs->fp)) { +			printk(UM_KERN_ERR "userspace -  get_fp_registers failed, " +			       "errno = %d\n", errno); +			fatal_sigsegv(); +		} +  		UPT_SYSCALL_NR(regs) = -1; /* Assume: It's not a syscall */  		if (WIFSTOPPED(status)) { @@ -457,10 +466,11 @@ void userspace(struct uml_pt_regs *regs)  }  static unsigned long thread_regs[MAX_REG_NR]; +static unsigned long thread_fp_regs[FP_SIZE];  static int __init init_thread_regs(void)  { -	get_safe_registers(thread_regs); +	get_safe_registers(thread_regs, thread_fp_regs);  	/* Set parent's instruction pointer to start of clone-stub */  	thread_regs[REGS_IP_INDEX] = STUB_CODE +  				(unsigned long) stub_clone_handler - @@ -503,6 +513,13 @@ int copy_context_skas0(unsigned long new_stack, int pid)  		return err;  	} +	err = put_fp_registers(pid, thread_fp_regs); +	if (err < 0) { +		printk(UM_KERN_ERR "copy_context_skas0 : put_fp_registers " +		       "failed, pid = %d, err = %d\n", pid, err); +		return err; +	} +  	/* set a well known return code for detection of child write failure */  	child_data->err = 12345678; diff --git a/arch/um/sys-i386/asm/ptrace.h b/arch/um/sys-i386/asm/ptrace.h index 0273e4d09af..5d2a5911253 100644 --- a/arch/um/sys-i386/asm/ptrace.h +++ b/arch/um/sys-i386/asm/ptrace.h @@ -42,11 +42,6 @@   */  struct user_desc; -extern int get_fpxregs(struct user_fxsr_struct __user *buf, -		       struct task_struct *child); -extern int set_fpxregs(struct user_fxsr_struct __user *buf, -		       struct task_struct *tsk); -  extern int ptrace_get_thread_area(struct task_struct *child, int idx,                                    struct user_desc __user *user_desc); diff --git a/arch/um/sys-i386/ptrace.c b/arch/um/sys-i386/ptrace.c index d23b2d3ea38..3375c271785 100644 --- a/arch/um/sys-i386/ptrace.c +++ b/arch/um/sys-i386/ptrace.c @@ -145,7 +145,7 @@ int peek_user(struct task_struct *child, long addr, long data)  	return put_user(tmp, (unsigned long __user *) data);  } -int get_fpregs(struct user_i387_struct __user *buf, struct task_struct *child) +static int get_fpregs(struct user_i387_struct __user *buf, struct task_struct *child)  {  	int err, n, cpu = ((struct thread_info *) child->stack)->cpu;  	struct user_i387_struct fpregs; @@ -161,7 +161,7 @@ int get_fpregs(struct user_i387_struct __user *buf, struct task_struct *child)  	return n;  } -int set_fpregs(struct user_i387_struct __user *buf, struct task_struct *child) +static int set_fpregs(struct user_i387_struct __user *buf, struct task_struct *child)  {  	int n, cpu = ((struct thread_info *) child->stack)->cpu;  	struct user_i387_struct fpregs; @@ -174,7 +174,7 @@ int set_fpregs(struct user_i387_struct __user *buf, struct task_struct *child)  				    (unsigned long *) &fpregs);  } -int get_fpxregs(struct user_fxsr_struct __user *buf, struct task_struct *child) +static int get_fpxregs(struct user_fxsr_struct __user *buf, struct task_struct *child)  {  	int err, n, cpu = ((struct thread_info *) child->stack)->cpu;  	struct user_fxsr_struct fpregs; @@ -190,7 +190,7 @@ int get_fpxregs(struct user_fxsr_struct __user *buf, struct task_struct *child)  	return n;  } -int set_fpxregs(struct user_fxsr_struct __user *buf, struct task_struct *child) +static int set_fpxregs(struct user_fxsr_struct __user *buf, struct task_struct *child)  {  	int n, cpu = ((struct thread_info *) child->stack)->cpu;  	struct user_fxsr_struct fpregs; @@ -206,5 +206,23 @@ int set_fpxregs(struct user_fxsr_struct __user *buf, struct task_struct *child)  long subarch_ptrace(struct task_struct *child, long request,  		    unsigned long addr, unsigned long data)  { -	return -EIO; +	int ret = -EIO; +	void __user *datap = (void __user *) data; +	switch (request) { +	case PTRACE_GETFPREGS: /* Get the child FPU state. */ +		ret = get_fpregs(datap, child); +		break; +	case PTRACE_SETFPREGS: /* Set the child FPU state. */ +		ret = set_fpregs(datap, child); +		break; +	case PTRACE_GETFPXREGS: /* Get the child FPU state. */ +		ret = get_fpxregs(datap, child); +		break; +	case PTRACE_SETFPXREGS: /* Set the child FPU state. */ +		ret = set_fpxregs(datap, child); +		break; +	default: +		ret = -EIO; +	} +	return ret;  } diff --git a/arch/um/sys-i386/shared/sysdep/ptrace.h b/arch/um/sys-i386/shared/sysdep/ptrace.h index d50e62e0707..c398a507611 100644 --- a/arch/um/sys-i386/shared/sysdep/ptrace.h +++ b/arch/um/sys-i386/shared/sysdep/ptrace.h @@ -53,6 +53,7 @@ extern int sysemu_supported;  struct uml_pt_regs {  	unsigned long gp[MAX_REG_NR]; +	unsigned long fp[HOST_FPX_SIZE];  	struct faultinfo faultinfo;  	long syscall;  	int is_user; diff --git a/arch/um/sys-x86_64/ptrace.c b/arch/um/sys-x86_64/ptrace.c index f43613643cd..4005506834f 100644 --- a/arch/um/sys-x86_64/ptrace.c +++ b/arch/um/sys-x86_64/ptrace.c @@ -145,7 +145,7 @@ int is_syscall(unsigned long addr)  	return instr == 0x050f;  } -int get_fpregs(struct user_i387_struct __user *buf, struct task_struct *child) +static int get_fpregs(struct user_i387_struct __user *buf, struct task_struct *child)  {  	int err, n, cpu = ((struct thread_info *) child->stack)->cpu;  	long fpregs[HOST_FP_SIZE]; @@ -162,7 +162,7 @@ int get_fpregs(struct user_i387_struct __user *buf, struct task_struct *child)  	return n;  } -int set_fpregs(struct user_i387_struct __user *buf, struct task_struct *child) +static int set_fpregs(struct user_i387_struct __user *buf, struct task_struct *child)  {  	int n, cpu = ((struct thread_info *) child->stack)->cpu;  	long fpregs[HOST_FP_SIZE]; @@ -182,12 +182,16 @@ long subarch_ptrace(struct task_struct *child, long request,  	void __user *datap = (void __user *) data;  	switch (request) { -	case PTRACE_GETFPXREGS: /* Get the child FPU state. */ +	case PTRACE_GETFPREGS: /* Get the child FPU state. */  		ret = get_fpregs(datap, child);  		break; -	case PTRACE_SETFPXREGS: /* Set the child FPU state. */ +	case PTRACE_SETFPREGS: /* Set the child FPU state. */  		ret = set_fpregs(datap, child);  		break; +	case PTRACE_ARCH_PRCTL: +		/* XXX Calls ptrace on the host - needs some SMP thinking */ +		ret = arch_prctl(child, data, (void __user *) addr); +		break;  	}  	return ret; diff --git a/arch/um/sys-x86_64/shared/sysdep/ptrace.h b/arch/um/sys-x86_64/shared/sysdep/ptrace.h index fdba5457947..8ee8f8e12af 100644 --- a/arch/um/sys-x86_64/shared/sysdep/ptrace.h +++ b/arch/um/sys-x86_64/shared/sysdep/ptrace.h @@ -85,6 +85,7 @@  struct uml_pt_regs {  	unsigned long gp[MAX_REG_NR]; +	unsigned long fp[HOST_FP_SIZE];  	struct faultinfo faultinfo;  	long syscall;  	int is_user; diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h index 4554cc6fb96..091508b533b 100644 --- a/arch/x86/include/asm/alternative-asm.h +++ b/arch/x86/include/asm/alternative-asm.h @@ -16,7 +16,6 @@  #endif  .macro altinstruction_entry orig alt feature orig_len alt_len -	.align 8  	.long \orig - .  	.long \alt - .  	.word \feature diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h index 23fb6d79f20..37ad100a221 100644 --- a/arch/x86/include/asm/alternative.h +++ b/arch/x86/include/asm/alternative.h @@ -48,9 +48,6 @@ struct alt_instr {  	u16 cpuid;		/* cpuid bit set for replacement */  	u8  instrlen;		/* length of original instruction */  	u8  replacementlen;	/* length of new instruction, <= instrlen */ -#ifdef CONFIG_X86_64 -	u32 pad2; -#endif  };  extern void alternative_instructions(void); @@ -83,7 +80,6 @@ static inline int alternatives_text_reserved(void *start, void *end)  									\        "661:\n\t" oldinstr "\n662:\n"					\        ".section .altinstructions,\"a\"\n"				\ -      _ASM_ALIGN "\n"							\        "	 .long 661b - .\n"			/* label           */	\        "	 .long 663f - .\n"			/* new instruction */	\        "	 .word " __stringify(feature) "\n"	/* feature bit     */	\ diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h index 4258aac99a6..88b23a43f34 100644 --- a/arch/x86/include/asm/cpufeature.h +++ b/arch/x86/include/asm/cpufeature.h @@ -332,7 +332,6 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)  		asm goto("1: jmp %l[t_no]\n"  			 "2:\n"  			 ".section .altinstructions,\"a\"\n" -			 _ASM_ALIGN "\n"  			 " .long 1b - .\n"  			 " .long 0\n"		/* no replacement */  			 " .word %P0\n"		/* feature bit */ @@ -350,7 +349,6 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)  		asm volatile("1: movb $0,%0\n"  			     "2:\n"  			     ".section .altinstructions,\"a\"\n" -			     _ASM_ALIGN "\n"  			     " .long 1b - .\n"  			     " .long 3f - .\n"  			     " .word %P1\n"		/* feature bit */ diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index 20a61427506..3dd53f997b1 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c @@ -1721,10 +1721,8 @@ void __init xen_setup_machphys_mapping(void)  		machine_to_phys_nr = MACH2PHYS_NR_ENTRIES;  	}  #ifdef CONFIG_X86_32 -	if ((machine_to_phys_mapping + machine_to_phys_nr) -	    < machine_to_phys_mapping) -		machine_to_phys_nr = (unsigned long *)NULL -				     - machine_to_phys_mapping; +	WARN_ON((machine_to_phys_mapping + (machine_to_phys_nr - 1)) +		< machine_to_phys_mapping);  #endif  } diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c index c3b8d440873..46d6d21dbdb 100644 --- a/arch/x86/xen/setup.c +++ b/arch/x86/xen/setup.c @@ -306,10 +306,12 @@ char * __init xen_memory_setup(void)  	sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);  	extra_limit = xen_get_max_pages(); -	if (extra_limit >= max_pfn) -		extra_pages = extra_limit - max_pfn; -	else -		extra_pages = 0; +	if (max_pfn + extra_pages > extra_limit) { +		if (extra_limit > max_pfn) +			extra_pages = extra_limit - max_pfn; +		else +			extra_pages = 0; +	}  	extra_pages += xen_return_unused_memory(xen_start_info->nr_pages, &e820); diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c index d4fc6d454f8..041d4fe9dfe 100644 --- a/arch/x86/xen/smp.c +++ b/arch/x86/xen/smp.c @@ -532,7 +532,6 @@ static void __init xen_hvm_smp_prepare_cpus(unsigned int max_cpus)  	WARN_ON(xen_smp_intr_init(0));  	xen_init_lock_cpu(0); -	xen_init_spinlocks();  }  static int __cpuinit xen_hvm_cpu_up(unsigned int cpu) diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c index 5158c505bef..163b4679556 100644 --- a/arch/x86/xen/time.c +++ b/arch/x86/xen/time.c @@ -168,9 +168,10 @@ cycle_t xen_clocksource_read(void)          struct pvclock_vcpu_time_info *src;  	cycle_t ret; -	src = &get_cpu_var(xen_vcpu)->time; +	preempt_disable_notrace(); +	src = &__get_cpu_var(xen_vcpu)->time;  	ret = pvclock_clocksource_read(src); -	put_cpu_var(xen_vcpu); +	preempt_enable_notrace();  	return ret;  } diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index bcaf16ee6ad..b596e54ddd7 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c @@ -785,10 +785,10 @@ static int blkio_policy_parse_and_set(char *buf,  {  	char *s[4], *p, *major_s = NULL, *minor_s = NULL;  	int ret; -	unsigned long major, minor, temp; +	unsigned long major, minor;  	int i = 0;  	dev_t dev; -	u64 bps, iops; +	u64 temp;  	memset(s, 0, sizeof(s)); @@ -826,20 +826,23 @@ static int blkio_policy_parse_and_set(char *buf,  	dev = MKDEV(major, minor); -	ret = blkio_check_dev_num(dev); +	ret = strict_strtoull(s[1], 10, &temp);  	if (ret) -		return ret; +		return -EINVAL; -	newpn->dev = dev; +	/* For rule removal, do not check for device presence. */ +	if (temp) { +		ret = blkio_check_dev_num(dev); +		if (ret) +			return ret; +	} -	if (s[1] == NULL) -		return -EINVAL; +	newpn->dev = dev;  	switch (plid) {  	case BLKIO_POLICY_PROP: -		ret = strict_strtoul(s[1], 10, &temp); -		if (ret || (temp < BLKIO_WEIGHT_MIN && temp > 0) || -			temp > BLKIO_WEIGHT_MAX) +		if ((temp < BLKIO_WEIGHT_MIN && temp > 0) || +		     temp > BLKIO_WEIGHT_MAX)  			return -EINVAL;  		newpn->plid = plid; @@ -850,26 +853,18 @@ static int blkio_policy_parse_and_set(char *buf,  		switch(fileid) {  		case BLKIO_THROTL_read_bps_device:  		case BLKIO_THROTL_write_bps_device: -			ret = strict_strtoull(s[1], 10, &bps); -			if (ret) -				return -EINVAL; -  			newpn->plid = plid;  			newpn->fileid = fileid; -			newpn->val.bps = bps; +			newpn->val.bps = temp;  			break;  		case BLKIO_THROTL_read_iops_device:  		case BLKIO_THROTL_write_iops_device: -			ret = strict_strtoull(s[1], 10, &iops); -			if (ret) -				return -EINVAL; - -			if (iops > THROTL_IOPS_MAX) +			if (temp > THROTL_IOPS_MAX)  				return -EINVAL;  			newpn->plid = plid;  			newpn->fileid = fileid; -			newpn->val.iops = (unsigned int)iops; +			newpn->val.iops = (unsigned int)temp;  			break;  		}  		break; diff --git a/block/blk-core.c b/block/blk-core.c index 90e1ffdeb41..b2ed78afd9f 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -1167,7 +1167,7 @@ static bool bio_attempt_front_merge(struct request_queue *q,   * true if merge was successful, otherwise false.   */  static bool attempt_plug_merge(struct task_struct *tsk, struct request_queue *q, -			       struct bio *bio) +			       struct bio *bio, unsigned int *request_count)  {  	struct blk_plug *plug;  	struct request *rq; @@ -1176,10 +1176,13 @@ static bool attempt_plug_merge(struct task_struct *tsk, struct request_queue *q,  	plug = tsk->plug;  	if (!plug)  		goto out; +	*request_count = 0;  	list_for_each_entry_reverse(rq, &plug->list, queuelist) {  		int el_ret; +		(*request_count)++; +  		if (rq->q != q)  			continue; @@ -1219,6 +1222,7 @@ static int __make_request(struct request_queue *q, struct bio *bio)  	struct blk_plug *plug;  	int el_ret, rw_flags, where = ELEVATOR_INSERT_SORT;  	struct request *req; +	unsigned int request_count = 0;  	/*  	 * low level driver can indicate that it wants pages above a @@ -1237,7 +1241,7 @@ static int __make_request(struct request_queue *q, struct bio *bio)  	 * Check if we can merge with the plugged list before grabbing  	 * any locks.  	 */ -	if (attempt_plug_merge(current, q, bio)) +	if (attempt_plug_merge(current, q, bio, &request_count))  		goto out;  	spin_lock_irq(q->queue_lock); @@ -1302,11 +1306,10 @@ get_rq:  			if (__rq->q != q)  				plug->should_sort = 1;  		} +		if (request_count >= BLK_MAX_REQUEST_COUNT) +			blk_flush_plug_list(plug, false);  		list_add_tail(&req->queuelist, &plug->list); -		plug->count++;  		drive_stat_acct(req, 1); -		if (plug->count >= BLK_MAX_REQUEST_COUNT) -			blk_flush_plug_list(plug, false);  	} else {  		spin_lock_irq(q->queue_lock);  		add_acct_request(q, req, where); @@ -2634,7 +2637,6 @@ void blk_start_plug(struct blk_plug *plug)  	INIT_LIST_HEAD(&plug->list);  	INIT_LIST_HEAD(&plug->cb_list);  	plug->should_sort = 0; -	plug->count = 0;  	/*  	 * If this is a nested plug, don't actually assign it. It will be @@ -2718,7 +2720,6 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)  		return;  	list_splice_init(&plug->list, &list); -	plug->count = 0;  	if (plug->should_sort) {  		list_sort(NULL, &list, plug_rq_cmp); diff --git a/block/blk-softirq.c b/block/blk-softirq.c index 58340d0cb23..1366a89d8e6 100644 --- a/block/blk-softirq.c +++ b/block/blk-softirq.c @@ -115,7 +115,7 @@ void __blk_complete_request(struct request *req)  	/*  	 * Select completion CPU  	 */ -	if (test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags) && req->cpu != -1) { +	if (req->cpu != -1) {  		ccpu = req->cpu;  		if (!test_bit(QUEUE_FLAG_SAME_FORCE, &q->queue_flags)) {  			ccpu = blk_cpu_to_group(ccpu); diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index 0ee17b5e7fb..e681805cdb4 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c @@ -258,11 +258,13 @@ queue_rq_affinity_store(struct request_queue *q, const char *page, size_t count)  	ret = queue_var_store(&val, page, count);  	spin_lock_irq(q->queue_lock); -	if (val) { +	if (val == 2) {  		queue_flag_set(QUEUE_FLAG_SAME_COMP, q); -		if (val == 2) -			queue_flag_set(QUEUE_FLAG_SAME_FORCE, q); -	} else { +		queue_flag_set(QUEUE_FLAG_SAME_FORCE, q); +	} else if (val == 1) { +		queue_flag_set(QUEUE_FLAG_SAME_COMP, q); +		queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q); +	} else if (val == 0) {  		queue_flag_clear(QUEUE_FLAG_SAME_COMP, q);  		queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q);  	} diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index a33bd4377c6..16ace89613b 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c @@ -130,8 +130,8 @@ struct cfq_queue {  	unsigned long slice_end;  	long slice_resid; -	/* pending metadata requests */ -	int meta_pending; +	/* pending priority requests */ +	int prio_pending;  	/* number of requests that are on the dispatch list or inside driver */  	int dispatched; @@ -684,8 +684,8 @@ cfq_choose_req(struct cfq_data *cfqd, struct request *rq1, struct request *rq2,  	if (rq_is_sync(rq1) != rq_is_sync(rq2))  		return rq_is_sync(rq1) ? rq1 : rq2; -	if ((rq1->cmd_flags ^ rq2->cmd_flags) & REQ_META) -		return rq1->cmd_flags & REQ_META ? rq1 : rq2; +	if ((rq1->cmd_flags ^ rq2->cmd_flags) & REQ_PRIO) +		return rq1->cmd_flags & REQ_PRIO ? rq1 : rq2;  	s1 = blk_rq_pos(rq1);  	s2 = blk_rq_pos(rq2); @@ -1612,9 +1612,9 @@ static void cfq_remove_request(struct request *rq)  	cfqq->cfqd->rq_queued--;  	cfq_blkiocg_update_io_remove_stats(&(RQ_CFQG(rq))->blkg,  					rq_data_dir(rq), rq_is_sync(rq)); -	if (rq->cmd_flags & REQ_META) { -		WARN_ON(!cfqq->meta_pending); -		cfqq->meta_pending--; +	if (rq->cmd_flags & REQ_PRIO) { +		WARN_ON(!cfqq->prio_pending); +		cfqq->prio_pending--;  	}  } @@ -3372,7 +3372,7 @@ cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,  	 * So both queues are sync. Let the new request get disk time if  	 * it's a metadata request and the current queue is doing regular IO.  	 */ -	if ((rq->cmd_flags & REQ_META) && !cfqq->meta_pending) +	if ((rq->cmd_flags & REQ_PRIO) && !cfqq->prio_pending)  		return true;  	/* @@ -3439,8 +3439,8 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,  	struct cfq_io_context *cic = RQ_CIC(rq);  	cfqd->rq_queued++; -	if (rq->cmd_flags & REQ_META) -		cfqq->meta_pending++; +	if (rq->cmd_flags & REQ_PRIO) +		cfqq->prio_pending++;  	cfq_update_io_thinktime(cfqd, cfqq, cic);  	cfq_update_io_seektime(cfqd, cfqq, rq); diff --git a/drivers/acpi/acpica/acconfig.h b/drivers/acpi/acpica/acconfig.h index bc533dde16c..f895a244ca7 100644 --- a/drivers/acpi/acpica/acconfig.h +++ b/drivers/acpi/acpica/acconfig.h @@ -121,7 +121,7 @@  /* Maximum sleep allowed via Sleep() operator */ -#define ACPI_MAX_SLEEP                  20000	/* Two seconds */ +#define ACPI_MAX_SLEEP                  2000	/* Two seconds */  /******************************************************************************   * diff --git a/drivers/acpi/apei/Kconfig b/drivers/acpi/apei/Kconfig index c34aa51af4e..e3f47872ec2 100644 --- a/drivers/acpi/apei/Kconfig +++ b/drivers/acpi/apei/Kconfig @@ -13,6 +13,7 @@ config ACPI_APEI_GHES  	bool "APEI Generic Hardware Error Source"  	depends on ACPI_APEI && X86  	select ACPI_HED +	select IRQ_WORK  	select LLIST  	select GENERIC_ALLOCATOR  	help diff --git a/drivers/acpi/apei/apei-base.c b/drivers/acpi/apei/apei-base.c index 8041248fce9..61540360d5c 100644 --- a/drivers/acpi/apei/apei-base.c +++ b/drivers/acpi/apei/apei-base.c @@ -618,7 +618,7 @@ int apei_osc_setup(void)  	};  	capbuf[OSC_QUERY_TYPE] = OSC_QUERY_ENABLE; -	capbuf[OSC_SUPPORT_TYPE] = 0; +	capbuf[OSC_SUPPORT_TYPE] = 1;  	capbuf[OSC_CONTROL_TYPE] = 0;  	if (ACPI_FAILURE(acpi_get_handle(NULL, "\\_SB", &handle)) diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c index 98de8f41867..9955a53733b 100644 --- a/drivers/block/floppy.c +++ b/drivers/block/floppy.c @@ -4250,7 +4250,7 @@ static int __init floppy_init(void)  	use_virtual_dma = can_use_virtual_dma & 1;  	fdc_state[0].address = FDC1;  	if (fdc_state[0].address == -1) { -		del_timer(&fd_timeout); +		del_timer_sync(&fd_timeout);  		err = -ENODEV;  		goto out_unreg_region;  	} @@ -4261,7 +4261,7 @@ static int __init floppy_init(void)  	fdc = 0;		/* reset fdc in case of unexpected interrupt */  	err = floppy_grab_irq_and_dma();  	if (err) { -		del_timer(&fd_timeout); +		del_timer_sync(&fd_timeout);  		err = -EBUSY;  		goto out_unreg_region;  	} @@ -4318,7 +4318,7 @@ static int __init floppy_init(void)  		user_reset_fdc(-1, FD_RESET_ALWAYS, false);  	}  	fdc = 0; -	del_timer(&fd_timeout); +	del_timer_sync(&fd_timeout);  	current_drive = 0;  	initialized = true;  	if (have_no_fdc) { @@ -4368,7 +4368,7 @@ out_unreg_blkdev:  	unregister_blkdev(FLOPPY_MAJOR, "fd");  out_put_disk:  	while (dr--) { -		del_timer(&motor_off_timer[dr]); +		del_timer_sync(&motor_off_timer[dr]);  		if (disks[dr]->queue)  			blk_cleanup_queue(disks[dr]->queue);  		put_disk(disks[dr]); diff --git a/drivers/block/xen-blkback/common.h b/drivers/block/xen-blkback/common.h index 9e40b283a46..00c57c90e2d 100644 --- a/drivers/block/xen-blkback/common.h +++ b/drivers/block/xen-blkback/common.h @@ -46,7 +46,7 @@  #define DRV_PFX "xen-blkback:"  #define DPRINTK(fmt, args...)				\ -	pr_debug(DRV_PFX "(%s:%d) " fmt ".\n",	\ +	pr_debug(DRV_PFX "(%s:%d) " fmt ".\n",		\  		 __func__, __LINE__, ##args) diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c index 3f129b45451..5fd2010f7d2 100644 --- a/drivers/block/xen-blkback/xenbus.c +++ b/drivers/block/xen-blkback/xenbus.c @@ -590,7 +590,7 @@ static void frontend_changed(struct xenbus_device *dev,  		/*  		 * Enforce precondition before potential leak point. -		 * blkif_disconnect() is idempotent. +		 * xen_blkif_disconnect() is idempotent.  		 */  		xen_blkif_disconnect(be->blkif); @@ -601,17 +601,17 @@ static void frontend_changed(struct xenbus_device *dev,  		break;  	case XenbusStateClosing: -		xen_blkif_disconnect(be->blkif);  		xenbus_switch_state(dev, XenbusStateClosing);  		break;  	case XenbusStateClosed: +		xen_blkif_disconnect(be->blkif);  		xenbus_switch_state(dev, XenbusStateClosed);  		if (xenbus_dev_is_online(dev))  			break;  		/* fall through if not online */  	case XenbusStateUnknown: -		/* implies blkif_disconnect() via blkback_remove() */ +		/* implies xen_blkif_disconnect() via xen_blkbk_remove() */  		device_unregister(&dev->dev);  		break; diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index 3ef476070ba..9cbac6b445e 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c @@ -72,9 +72,15 @@ static struct usb_device_id btusb_table[] = {  	/* Apple MacBookAir3,1, MacBookAir3,2 */  	{ USB_DEVICE(0x05ac, 0x821b) }, +	/* Apple MacBookAir4,1 */ +	{ USB_DEVICE(0x05ac, 0x821f) }, +  	/* Apple MacBookPro8,2 */  	{ USB_DEVICE(0x05ac, 0x821a) }, +	/* Apple MacMini5,1 */ +	{ USB_DEVICE(0x05ac, 0x8281) }, +  	/* AVM BlueFRITZ! USB v2.0 */  	{ USB_DEVICE(0x057c, 0x3800) }, diff --git a/drivers/bluetooth/btwilink.c b/drivers/bluetooth/btwilink.c index 65d27aff553..04d353f58d7 100644 --- a/drivers/bluetooth/btwilink.c +++ b/drivers/bluetooth/btwilink.c @@ -125,6 +125,13 @@ static long st_receive(void *priv_data, struct sk_buff *skb)  /* protocol structure registered with shared transport */  static struct st_proto_s ti_st_proto[MAX_BT_CHNL_IDS] = {  	{ +		.chnl_id = HCI_EVENT_PKT, /* HCI Events */ +		.hdr_len = sizeof(struct hci_event_hdr), +		.offset_len_in_hdr = offsetof(struct hci_event_hdr, plen), +		.len_size = 1, /* sizeof(plen) in struct hci_event_hdr */ +		.reserve = 8, +	}, +	{  		.chnl_id = HCI_ACLDATA_PKT, /* ACL */  		.hdr_len = sizeof(struct hci_acl_hdr),  		.offset_len_in_hdr = offsetof(struct hci_acl_hdr, dlen), @@ -138,13 +145,6 @@ static struct st_proto_s ti_st_proto[MAX_BT_CHNL_IDS] = {  		.len_size = 1, /* sizeof(dlen) in struct hci_sco_hdr */  		.reserve = 8,  	}, -	{ -		.chnl_id = HCI_EVENT_PKT, /* HCI Events */ -		.hdr_len = sizeof(struct hci_event_hdr), -		.offset_len_in_hdr = offsetof(struct hci_event_hdr, plen), -		.len_size = 1, /* sizeof(plen) in struct hci_event_hdr */ -		.reserve = 8, -	},  };  /* Called from HCI core to initialize the device */ @@ -240,7 +240,7 @@ static int ti_st_close(struct hci_dev *hdev)  	if (!test_and_clear_bit(HCI_RUNNING, &hdev->flags))  		return 0; -	for (i = 0; i < MAX_BT_CHNL_IDS; i++) { +	for (i = MAX_BT_CHNL_IDS-1; i >= 0; i--) {  		err = st_unregister(&ti_st_proto[i]);  		if (err)  			BT_ERR("st_unregister(%d) failed with error %d", diff --git a/drivers/cpufreq/pcc-cpufreq.c b/drivers/cpufreq/pcc-cpufreq.c index 7b0603eb012..cdc02ac8f41 100644 --- a/drivers/cpufreq/pcc-cpufreq.c +++ b/drivers/cpufreq/pcc-cpufreq.c @@ -261,6 +261,9 @@ static int pcc_get_offset(int cpu)  	pr = per_cpu(processors, cpu);  	pcc_cpu_data = per_cpu_ptr(pcc_cpu_info, cpu); +	if (!pr) +		return -ENODEV; +  	status = acpi_evaluate_object(pr->handle, "PCCP", NULL, &buffer);  	if (ACPI_FAILURE(status))  		return -ENODEV; diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c index 57cd3a406ed..fd7170a9ad2 100644 --- a/drivers/firewire/ohci.c +++ b/drivers/firewire/ohci.c @@ -290,6 +290,9 @@ static const struct {  	{PCI_VENDOR_ID_NEC, PCI_ANY_ID, PCI_ANY_ID,  		QUIRK_CYCLE_TIMER}, +	{PCI_VENDOR_ID_O2, PCI_ANY_ID, PCI_ANY_ID, +		QUIRK_NO_MSI}, +  	{PCI_VENDOR_ID_RICOH, PCI_ANY_ID, PCI_ANY_ID,  		QUIRK_CYCLE_TIMER}, diff --git a/drivers/gpio/gpio-generic.c b/drivers/gpio/gpio-generic.c index 231714def4d..4e24436b0f8 100644 --- a/drivers/gpio/gpio-generic.c +++ b/drivers/gpio/gpio-generic.c @@ -351,7 +351,7 @@ static int bgpio_setup_direction(struct bgpio_chip *bgc,  	return 0;  } -int __devexit bgpio_remove(struct bgpio_chip *bgc) +int bgpio_remove(struct bgpio_chip *bgc)  {  	int err = gpiochip_remove(&bgc->gc); @@ -361,15 +361,10 @@ int __devexit bgpio_remove(struct bgpio_chip *bgc)  }  EXPORT_SYMBOL_GPL(bgpio_remove); -int __devinit bgpio_init(struct bgpio_chip *bgc, -			 struct device *dev, -			 unsigned long sz, -			 void __iomem *dat, -			 void __iomem *set, -			 void __iomem *clr, -			 void __iomem *dirout, -			 void __iomem *dirin, -			 bool big_endian) +int bgpio_init(struct bgpio_chip *bgc, struct device *dev, +	       unsigned long sz, void __iomem *dat, void __iomem *set, +	       void __iomem *clr, void __iomem *dirout, void __iomem *dirin, +	       bool big_endian)  {  	int ret; diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index dc0a5b56c81..e8a746712b5 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c @@ -1404,7 +1404,8 @@ int evergreen_cp_resume(struct radeon_device *rdev)  	/* Initialize the ring buffer's read and write pointers */  	WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA);  	WREG32(CP_RB_RPTR_WR, 0); -	WREG32(CP_RB_WPTR, 0); +	rdev->cp.wptr = 0; +	WREG32(CP_RB_WPTR, rdev->cp.wptr);  	/* set the wb address wether it's enabled or not */  	WREG32(CP_RB_RPTR_ADDR, @@ -1426,7 +1427,6 @@ int evergreen_cp_resume(struct radeon_device *rdev)  	WREG32(CP_DEBUG, (1 << 27) | (1 << 28));  	rdev->cp.rptr = RREG32(CP_RB_RPTR); -	rdev->cp.wptr = RREG32(CP_RB_WPTR);  	evergreen_cp_start(rdev);  	rdev->cp.ready = true; @@ -3171,21 +3171,23 @@ int evergreen_suspend(struct radeon_device *rdev)  }  int evergreen_copy_blit(struct radeon_device *rdev, -			uint64_t src_offset, uint64_t dst_offset, -			unsigned num_pages, struct radeon_fence *fence) +			uint64_t src_offset, +			uint64_t dst_offset, +			unsigned num_gpu_pages, +			struct radeon_fence *fence)  {  	int r;  	mutex_lock(&rdev->r600_blit.mutex);  	rdev->r600_blit.vb_ib = NULL; -	r = evergreen_blit_prepare_copy(rdev, num_pages * RADEON_GPU_PAGE_SIZE); +	r = evergreen_blit_prepare_copy(rdev, num_gpu_pages * RADEON_GPU_PAGE_SIZE);  	if (r) {  		if (rdev->r600_blit.vb_ib)  			radeon_ib_free(rdev, &rdev->r600_blit.vb_ib);  		mutex_unlock(&rdev->r600_blit.mutex);  		return r;  	} -	evergreen_kms_blit_copy(rdev, src_offset, dst_offset, num_pages * RADEON_GPU_PAGE_SIZE); +	evergreen_kms_blit_copy(rdev, src_offset, dst_offset, num_gpu_pages * RADEON_GPU_PAGE_SIZE);  	evergreen_blit_done_copy(rdev, fence);  	mutex_unlock(&rdev->r600_blit.mutex);  	return 0; diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c index cbf57d75d92..99fbd793c08 100644 --- a/drivers/gpu/drm/radeon/ni.c +++ b/drivers/gpu/drm/radeon/ni.c @@ -1187,7 +1187,8 @@ int cayman_cp_resume(struct radeon_device *rdev)  	/* Initialize the ring buffer's read and write pointers */  	WREG32(CP_RB0_CNTL, tmp | RB_RPTR_WR_ENA); -	WREG32(CP_RB0_WPTR, 0); +	rdev->cp.wptr = 0; +	WREG32(CP_RB0_WPTR, rdev->cp.wptr);  	/* set the wb address wether it's enabled or not */  	WREG32(CP_RB0_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC); @@ -1207,7 +1208,6 @@ int cayman_cp_resume(struct radeon_device *rdev)  	WREG32(CP_RB0_BASE, rdev->cp.gpu_addr >> 8);  	rdev->cp.rptr = RREG32(CP_RB0_RPTR); -	rdev->cp.wptr = RREG32(CP_RB0_WPTR);  	/* ring1  - compute only */  	/* Set ring buffer size */ @@ -1220,7 +1220,8 @@ int cayman_cp_resume(struct radeon_device *rdev)  	/* Initialize the ring buffer's read and write pointers */  	WREG32(CP_RB1_CNTL, tmp | RB_RPTR_WR_ENA); -	WREG32(CP_RB1_WPTR, 0); +	rdev->cp1.wptr = 0; +	WREG32(CP_RB1_WPTR, rdev->cp1.wptr);  	/* set the wb address wether it's enabled or not */  	WREG32(CP_RB1_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP1_RPTR_OFFSET) & 0xFFFFFFFC); @@ -1232,7 +1233,6 @@ int cayman_cp_resume(struct radeon_device *rdev)  	WREG32(CP_RB1_BASE, rdev->cp1.gpu_addr >> 8);  	rdev->cp1.rptr = RREG32(CP_RB1_RPTR); -	rdev->cp1.wptr = RREG32(CP_RB1_WPTR);  	/* ring2 - compute only */  	/* Set ring buffer size */ @@ -1245,7 +1245,8 @@ int cayman_cp_resume(struct radeon_device *rdev)  	/* Initialize the ring buffer's read and write pointers */  	WREG32(CP_RB2_CNTL, tmp | RB_RPTR_WR_ENA); -	WREG32(CP_RB2_WPTR, 0); +	rdev->cp2.wptr = 0; +	WREG32(CP_RB2_WPTR, rdev->cp2.wptr);  	/* set the wb address wether it's enabled or not */  	WREG32(CP_RB2_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP2_RPTR_OFFSET) & 0xFFFFFFFC); @@ -1257,7 +1258,6 @@ int cayman_cp_resume(struct radeon_device *rdev)  	WREG32(CP_RB2_BASE, rdev->cp2.gpu_addr >> 8);  	rdev->cp2.rptr = RREG32(CP_RB2_RPTR); -	rdev->cp2.wptr = RREG32(CP_RB2_WPTR);  	/* start the rings */  	cayman_cp_start(rdev); diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c index f2204cb1ccd..5b1837b4aac 100644 --- a/drivers/gpu/drm/radeon/r100.c +++ b/drivers/gpu/drm/radeon/r100.c @@ -721,11 +721,11 @@ void r100_fence_ring_emit(struct radeon_device *rdev,  int r100_copy_blit(struct radeon_device *rdev,  		   uint64_t src_offset,  		   uint64_t dst_offset, -		   unsigned num_pages, +		   unsigned num_gpu_pages,  		   struct radeon_fence *fence)  {  	uint32_t cur_pages; -	uint32_t stride_bytes = PAGE_SIZE; +	uint32_t stride_bytes = RADEON_GPU_PAGE_SIZE;  	uint32_t pitch;  	uint32_t stride_pixels;  	unsigned ndw; @@ -737,7 +737,7 @@ int r100_copy_blit(struct radeon_device *rdev,  	/* radeon pitch is /64 */  	pitch = stride_bytes / 64;  	stride_pixels = stride_bytes / 4; -	num_loops = DIV_ROUND_UP(num_pages, 8191); +	num_loops = DIV_ROUND_UP(num_gpu_pages, 8191);  	/* Ask for enough room for blit + flush + fence */  	ndw = 64 + (10 * num_loops); @@ -746,12 +746,12 @@ int r100_copy_blit(struct radeon_device *rdev,  		DRM_ERROR("radeon: moving bo (%d) asking for %u dw.\n", r, ndw);  		return -EINVAL;  	} -	while (num_pages > 0) { -		cur_pages = num_pages; +	while (num_gpu_pages > 0) { +		cur_pages = num_gpu_pages;  		if (cur_pages > 8191) {  			cur_pages = 8191;  		} -		num_pages -= cur_pages; +		num_gpu_pages -= cur_pages;  		/* pages are in Y direction - height  		   page width in X direction - width */ @@ -773,8 +773,8 @@ int r100_copy_blit(struct radeon_device *rdev,  		radeon_ring_write(rdev, (0x1fff) | (0x1fff << 16));  		radeon_ring_write(rdev, 0);  		radeon_ring_write(rdev, (0x1fff) | (0x1fff << 16)); -		radeon_ring_write(rdev, num_pages); -		radeon_ring_write(rdev, num_pages); +		radeon_ring_write(rdev, cur_pages); +		radeon_ring_write(rdev, cur_pages);  		radeon_ring_write(rdev, cur_pages | (stride_pixels << 16));  	}  	radeon_ring_write(rdev, PACKET0(RADEON_DSTCACHE_CTLSTAT, 0)); @@ -990,7 +990,8 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)  	/* Force read & write ptr to 0 */  	WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA | RADEON_RB_NO_UPDATE);  	WREG32(RADEON_CP_RB_RPTR_WR, 0); -	WREG32(RADEON_CP_RB_WPTR, 0); +	rdev->cp.wptr = 0; +	WREG32(RADEON_CP_RB_WPTR, rdev->cp.wptr);  	/* set the wb address whether it's enabled or not */  	WREG32(R_00070C_CP_RB_RPTR_ADDR, @@ -1007,9 +1008,6 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)  	WREG32(RADEON_CP_RB_CNTL, tmp);  	udelay(10);  	rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR); -	rdev->cp.wptr = RREG32(RADEON_CP_RB_WPTR); -	/* protect against crazy HW on resume */ -	rdev->cp.wptr &= rdev->cp.ptr_mask;  	/* Set cp mode to bus mastering & enable cp*/  	WREG32(RADEON_CP_CSQ_MODE,  	       REG_SET(RADEON_INDIRECT2_START, indirect2_start) | diff --git a/drivers/gpu/drm/radeon/r200.c b/drivers/gpu/drm/radeon/r200.c index f2405830041..a1f3ba063c2 100644 --- a/drivers/gpu/drm/radeon/r200.c +++ b/drivers/gpu/drm/radeon/r200.c @@ -84,7 +84,7 @@ static int r200_get_vtx_size_0(uint32_t vtx_fmt_0)  int r200_copy_dma(struct radeon_device *rdev,  		  uint64_t src_offset,  		  uint64_t dst_offset, -		  unsigned num_pages, +		  unsigned num_gpu_pages,  		  struct radeon_fence *fence)  {  	uint32_t size; @@ -93,7 +93,7 @@ int r200_copy_dma(struct radeon_device *rdev,  	int r = 0;  	/* radeon pitch is /64 */ -	size = num_pages << PAGE_SHIFT; +	size = num_gpu_pages << RADEON_GPU_PAGE_SHIFT;  	num_loops = DIV_ROUND_UP(size, 0x1FFFFF);  	r = radeon_ring_lock(rdev, num_loops * 4 + 64);  	if (r) { diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index aa5571b73aa..720dd99163f 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c @@ -2209,7 +2209,8 @@ int r600_cp_resume(struct radeon_device *rdev)  	/* Initialize the ring buffer's read and write pointers */  	WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA);  	WREG32(CP_RB_RPTR_WR, 0); -	WREG32(CP_RB_WPTR, 0); +	rdev->cp.wptr = 0; +	WREG32(CP_RB_WPTR, rdev->cp.wptr);  	/* set the wb address whether it's enabled or not */  	WREG32(CP_RB_RPTR_ADDR, @@ -2231,7 +2232,6 @@ int r600_cp_resume(struct radeon_device *rdev)  	WREG32(CP_DEBUG, (1 << 27) | (1 << 28));  	rdev->cp.rptr = RREG32(CP_RB_RPTR); -	rdev->cp.wptr = RREG32(CP_RB_WPTR);  	r600_cp_start(rdev);  	rdev->cp.ready = true; @@ -2353,21 +2353,23 @@ void r600_fence_ring_emit(struct radeon_device *rdev,  }  int r600_copy_blit(struct radeon_device *rdev, -		   uint64_t src_offset, uint64_t dst_offset, -		   unsigned num_pages, struct radeon_fence *fence) +		   uint64_t src_offset, +		   uint64_t dst_offset, +		   unsigned num_gpu_pages, +		   struct radeon_fence *fence)  {  	int r;  	mutex_lock(&rdev->r600_blit.mutex);  	rdev->r600_blit.vb_ib = NULL; -	r = r600_blit_prepare_copy(rdev, num_pages * RADEON_GPU_PAGE_SIZE); +	r = r600_blit_prepare_copy(rdev, num_gpu_pages * RADEON_GPU_PAGE_SIZE);  	if (r) {  		if (rdev->r600_blit.vb_ib)  			radeon_ib_free(rdev, &rdev->r600_blit.vb_ib);  		mutex_unlock(&rdev->r600_blit.mutex);  		return r;  	} -	r600_kms_blit_copy(rdev, src_offset, dst_offset, num_pages * RADEON_GPU_PAGE_SIZE); +	r600_kms_blit_copy(rdev, src_offset, dst_offset, num_gpu_pages * RADEON_GPU_PAGE_SIZE);  	r600_blit_done_copy(rdev, fence);  	mutex_unlock(&rdev->r600_blit.mutex);  	return 0; diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 32807baf55e..c1e056b35b2 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -322,6 +322,7 @@ union radeon_gart_table {  #define RADEON_GPU_PAGE_SIZE 4096  #define RADEON_GPU_PAGE_MASK (RADEON_GPU_PAGE_SIZE - 1) +#define RADEON_GPU_PAGE_SHIFT 12  struct radeon_gart {  	dma_addr_t			table_addr; @@ -914,17 +915,17 @@ struct radeon_asic {  	int (*copy_blit)(struct radeon_device *rdev,  			 uint64_t src_offset,  			 uint64_t dst_offset, -			 unsigned num_pages, +			 unsigned num_gpu_pages,  			 struct radeon_fence *fence);  	int (*copy_dma)(struct radeon_device *rdev,  			uint64_t src_offset,  			uint64_t dst_offset, -			unsigned num_pages, +			unsigned num_gpu_pages,  			struct radeon_fence *fence);  	int (*copy)(struct radeon_device *rdev,  		    uint64_t src_offset,  		    uint64_t dst_offset, -		    unsigned num_pages, +		    unsigned num_gpu_pages,  		    struct radeon_fence *fence);  	uint32_t (*get_engine_clock)(struct radeon_device *rdev);  	void (*set_engine_clock)(struct radeon_device *rdev, uint32_t eng_clock); diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h index 3d7a0d7c6a9..3dedaa07aac 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.h +++ b/drivers/gpu/drm/radeon/radeon_asic.h @@ -75,7 +75,7 @@ uint32_t r100_pll_rreg(struct radeon_device *rdev, uint32_t reg);  int r100_copy_blit(struct radeon_device *rdev,  		   uint64_t src_offset,  		   uint64_t dst_offset, -		   unsigned num_pages, +		   unsigned num_gpu_pages,  		   struct radeon_fence *fence);  int r100_set_surface_reg(struct radeon_device *rdev, int reg,  			 uint32_t tiling_flags, uint32_t pitch, @@ -143,7 +143,7 @@ extern void r100_post_page_flip(struct radeon_device *rdev, int crtc);  extern int r200_copy_dma(struct radeon_device *rdev,  			 uint64_t src_offset,  			 uint64_t dst_offset, -			 unsigned num_pages, +			 unsigned num_gpu_pages,  			 struct radeon_fence *fence);  void r200_set_safe_registers(struct radeon_device *rdev); @@ -311,7 +311,7 @@ void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);  int r600_ring_test(struct radeon_device *rdev);  int r600_copy_blit(struct radeon_device *rdev,  		   uint64_t src_offset, uint64_t dst_offset, -		   unsigned num_pages, struct radeon_fence *fence); +		   unsigned num_gpu_pages, struct radeon_fence *fence);  void r600_hpd_init(struct radeon_device *rdev);  void r600_hpd_fini(struct radeon_device *rdev);  bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd); @@ -403,7 +403,7 @@ void evergreen_bandwidth_update(struct radeon_device *rdev);  void evergreen_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);  int evergreen_copy_blit(struct radeon_device *rdev,  			uint64_t src_offset, uint64_t dst_offset, -			unsigned num_pages, struct radeon_fence *fence); +			unsigned num_gpu_pages, struct radeon_fence *fence);  void evergreen_hpd_init(struct radeon_device *rdev);  void evergreen_hpd_fini(struct radeon_device *rdev);  bool evergreen_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd); diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index 6cc17fb96a5..6adb3e58aff 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c @@ -473,8 +473,8 @@ pflip_cleanup:  	spin_lock_irqsave(&dev->event_lock, flags);  	radeon_crtc->unpin_work = NULL;  unlock_free: -	drm_gem_object_unreference_unlocked(old_radeon_fb->obj);  	spin_unlock_irqrestore(&dev->event_lock, flags); +	drm_gem_object_unreference_unlocked(old_radeon_fb->obj);  	radeon_fence_unref(&work->fence);  	kfree(work); diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index 9b86fb0e412..0b5468bfaf5 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c @@ -277,7 +277,12 @@ static int radeon_move_blit(struct ttm_buffer_object *bo,  		DRM_ERROR("Trying to move memory with CP turned off.\n");  		return -EINVAL;  	} -	r = radeon_copy(rdev, old_start, new_start, new_mem->num_pages, fence); + +	BUILD_BUG_ON((PAGE_SIZE % RADEON_GPU_PAGE_SIZE) != 0); + +	r = radeon_copy(rdev, old_start, new_start, +			new_mem->num_pages * (PAGE_SIZE / RADEON_GPU_PAGE_SIZE), /* GPU pages */ +			fence);  	/* FIXME: handle copy error */  	r = ttm_bo_move_accel_cleanup(bo, (void *)fence, NULL,  				      evict, no_wait_reserve, no_wait_gpu, new_mem); diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index a4d38d85909..ef06194c5aa 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -394,7 +394,8 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,  	if (!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED)) {  		if (bo->ttm == NULL) { -			ret = ttm_bo_add_ttm(bo, false); +			bool zero = !(old_man->flags & TTM_MEMTYPE_FLAG_FIXED); +			ret = ttm_bo_add_ttm(bo, zero);  			if (ret)  				goto out_err;  		} diff --git a/drivers/hid/hid-wacom.c b/drivers/hid/hid-wacom.c index a597039d075..72ca689b647 100644 --- a/drivers/hid/hid-wacom.c +++ b/drivers/hid/hid-wacom.c @@ -373,6 +373,8 @@ static int wacom_probe(struct hid_device *hdev,  	hidinput = list_entry(hdev->inputs.next, struct hid_input, list);  	input = hidinput->input; +	__set_bit(INPUT_PROP_POINTER, input->propbit); +  	/* Basics */  	input->evbit[0] |= BIT(EV_KEY) | BIT(EV_ABS) | BIT(EV_REL); diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c index 59d83e83da7..41125767613 100644 --- a/drivers/hwmon/coretemp.c +++ b/drivers/hwmon/coretemp.c @@ -601,7 +601,12 @@ static int create_core_data(struct platform_data *pdata,  	err = rdmsr_safe_on_cpu(cpu, tdata->intrpt_reg, &eax, &edx);  	if (!err) {  		tdata->attr_size += MAX_THRESH_ATTRS; -		tdata->ttarget = tdata->tjmax - ((eax >> 16) & 0x7f) * 1000; +		tdata->tmin = tdata->tjmax - +			      ((eax & THERM_MASK_THRESHOLD0) >> +			       THERM_SHIFT_THRESHOLD0) * 1000; +		tdata->ttarget = tdata->tjmax - +				 ((eax & THERM_MASK_THRESHOLD1) >> +				  THERM_SHIFT_THRESHOLD1) * 1000;  	}  	pdata->core_data[attr_no] = tdata; diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c index a561c3a0e91..397fc59b568 100644 --- a/drivers/hwmon/pmbus/pmbus_core.c +++ b/drivers/hwmon/pmbus/pmbus_core.c @@ -978,6 +978,8 @@ static void pmbus_find_max_attr(struct i2c_client *client,  struct pmbus_limit_attr {  	u16 reg;		/* Limit register */  	bool update;		/* True if register needs updates */ +	bool low;		/* True if low limit; for limits with compare +				   functions only */  	const char *attr;	/* Attribute name */  	const char *alarm;	/* Alarm attribute name */  	u32 sbit;		/* Alarm attribute status bit */ @@ -1029,7 +1031,8 @@ static bool pmbus_add_limit_attrs(struct i2c_client *client,  				if (attr->compare) {  					pmbus_add_boolean_cmp(data, name,  						l->alarm, index, -						cbase, cindex, +						l->low ? cindex : cbase, +						l->low ? cbase : cindex,  						attr->sbase + page, l->sbit);  				} else {  					pmbus_add_boolean_reg(data, name, @@ -1366,11 +1369,13 @@ static const struct pmbus_sensor_attr power_attributes[] = {  static const struct pmbus_limit_attr temp_limit_attrs[] = {  	{  		.reg = PMBUS_UT_WARN_LIMIT, +		.low = true,  		.attr = "min",  		.alarm = "min_alarm",  		.sbit = PB_TEMP_UT_WARNING,  	}, {  		.reg = PMBUS_UT_FAULT_LIMIT, +		.low = true,  		.attr = "lcrit",  		.alarm = "lcrit_alarm",  		.sbit = PB_TEMP_UT_FAULT, @@ -1399,11 +1404,13 @@ static const struct pmbus_limit_attr temp_limit_attrs[] = {  static const struct pmbus_limit_attr temp_limit_attrs23[] = {  	{  		.reg = PMBUS_UT_WARN_LIMIT, +		.low = true,  		.attr = "min",  		.alarm = "min_alarm",  		.sbit = PB_TEMP_UT_WARNING,  	}, {  		.reg = PMBUS_UT_FAULT_LIMIT, +		.low = true,  		.attr = "lcrit",  		.alarm = "lcrit_alarm",  		.sbit = PB_TEMP_UT_FAULT, diff --git a/drivers/input/keyboard/adp5588-keys.c b/drivers/input/keyboard/adp5588-keys.c index 7b404e5443e..e34eeb8ae37 100644 --- a/drivers/input/keyboard/adp5588-keys.c +++ b/drivers/input/keyboard/adp5588-keys.c @@ -668,4 +668,3 @@ module_exit(adp5588_exit);  MODULE_LICENSE("GPL");  MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");  MODULE_DESCRIPTION("ADP5588/87 Keypad driver"); -MODULE_ALIAS("platform:adp5588-keys"); diff --git a/drivers/input/misc/cm109.c b/drivers/input/misc/cm109.c index b09c7d12721..ab860511f01 100644 --- a/drivers/input/misc/cm109.c +++ b/drivers/input/misc/cm109.c @@ -475,7 +475,7 @@ static void cm109_toggle_buzzer_sync(struct cm109_dev *dev, int on)  				le16_to_cpu(dev->ctl_req->wIndex),  				dev->ctl_data,  				USB_PKT_LEN, USB_CTRL_SET_TIMEOUT); -	if (error && error != EINTR) +	if (error < 0 && error != -EINTR)  		err("%s: usb_control_msg() failed %d", __func__, error);  } diff --git a/drivers/input/mouse/bcm5974.c b/drivers/input/mouse/bcm5974.c index da280189ef0..5ec617e28f7 100644 --- a/drivers/input/mouse/bcm5974.c +++ b/drivers/input/mouse/bcm5974.c @@ -67,6 +67,10 @@  #define USB_DEVICE_ID_APPLE_WELLSPRING5_ANSI	0x0245  #define USB_DEVICE_ID_APPLE_WELLSPRING5_ISO	0x0246  #define USB_DEVICE_ID_APPLE_WELLSPRING5_JIS	0x0247 +/* MacbookAir4,1 (unibody, July 2011) */ +#define USB_DEVICE_ID_APPLE_WELLSPRING6A_ANSI	0x0249 +#define USB_DEVICE_ID_APPLE_WELLSPRING6A_ISO	0x024a +#define USB_DEVICE_ID_APPLE_WELLSPRING6A_JIS	0x024b  /* MacbookAir4,2 (unibody, July 2011) */  #define USB_DEVICE_ID_APPLE_WELLSPRING6_ANSI	0x024c  #define USB_DEVICE_ID_APPLE_WELLSPRING6_ISO	0x024d @@ -112,6 +116,10 @@ static const struct usb_device_id bcm5974_table[] = {  	BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING5_ANSI),  	BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING5_ISO),  	BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING5_JIS), +	/* MacbookAir4,1 */ +	BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING6A_ANSI), +	BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING6A_ISO), +	BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING6A_JIS),  	/* MacbookAir4,2 */  	BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING6_ANSI),  	BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING6_ISO), @@ -334,6 +342,18 @@ static const struct bcm5974_config bcm5974_config_table[] = {  		{ DIM_X, DIM_X / SN_COORD, -4750, 5280 },  		{ DIM_Y, DIM_Y / SN_COORD, -150, 6730 }  	}, +	{ +		USB_DEVICE_ID_APPLE_WELLSPRING6A_ANSI, +		USB_DEVICE_ID_APPLE_WELLSPRING6A_ISO, +		USB_DEVICE_ID_APPLE_WELLSPRING6A_JIS, +		HAS_INTEGRATED_BUTTON, +		0x84, sizeof(struct bt_data), +		0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS, +		{ DIM_PRESSURE, DIM_PRESSURE / SN_PRESSURE, 0, 300 }, +		{ DIM_WIDTH, DIM_WIDTH / SN_WIDTH, 0, 2048 }, +		{ DIM_X, DIM_X / SN_COORD, -4620, 5140 }, +		{ DIM_Y, DIM_Y / SN_COORD, -150, 6600 } +	},  	{}  }; diff --git a/drivers/input/tablet/wacom_sys.c b/drivers/input/tablet/wacom_sys.c index d27c9d91630..958b4eb6369 100644 --- a/drivers/input/tablet/wacom_sys.c +++ b/drivers/input/tablet/wacom_sys.c @@ -229,13 +229,6 @@ static int wacom_parse_hid(struct usb_interface *intf, struct hid_descriptor *hi  							get_unaligned_le16(&report[i + 3]);  						i += 4;  					} -				} else if (usage == WCM_DIGITIZER) { -					/* max pressure isn't reported -					features->pressure_max = (unsigned short) -							(report[i+4] << 8  | report[i + 3]); -					*/ -					features->pressure_max = 255; -					i += 4;  				}  				break; @@ -291,13 +284,6 @@ static int wacom_parse_hid(struct usb_interface *intf, struct hid_descriptor *hi  				pen = 1;  				i++;  				break; - -			case HID_USAGE_UNDEFINED: -				if (usage == WCM_DESKTOP && finger) /* capacity */ -					features->pressure_max = -						get_unaligned_le16(&report[i + 3]); -				i += 4; -				break;  			}  			break; diff --git a/drivers/input/tablet/wacom_wac.c b/drivers/input/tablet/wacom_wac.c index c1c2f7b28d8..0dc97ec15c2 100644 --- a/drivers/input/tablet/wacom_wac.c +++ b/drivers/input/tablet/wacom_wac.c @@ -800,25 +800,26 @@ static int wacom_bpt_touch(struct wacom_wac *wacom)  	int i;  	for (i = 0; i < 2; i++) { -		int p = data[9 * i + 2]; -		bool touch = p && !wacom->shared->stylus_in_proximity; +		int offset = (data[1] & 0x80) ? (8 * i) : (9 * i); +		bool touch = data[offset + 3] & 0x80; -		input_mt_slot(input, i); -		input_mt_report_slot_state(input, MT_TOOL_FINGER, touch);  		/*  		 * Touch events need to be disabled while stylus is  		 * in proximity because user's hand is resting on touchpad  		 * and sending unwanted events.  User expects tablet buttons  		 * to continue working though.  		 */ +		touch = touch && !wacom->shared->stylus_in_proximity; + +		input_mt_slot(input, i); +		input_mt_report_slot_state(input, MT_TOOL_FINGER, touch);  		if (touch) { -			int x = get_unaligned_be16(&data[9 * i + 3]) & 0x7ff; -			int y = get_unaligned_be16(&data[9 * i + 5]) & 0x7ff; +			int x = get_unaligned_be16(&data[offset + 3]) & 0x7ff; +			int y = get_unaligned_be16(&data[offset + 5]) & 0x7ff;  			if (features->quirks & WACOM_QUIRK_BBTOUCH_LOWRES) {  				x <<= 5;  				y <<= 5;  			} -			input_report_abs(input, ABS_MT_PRESSURE, p);  			input_report_abs(input, ABS_MT_POSITION_X, x);  			input_report_abs(input, ABS_MT_POSITION_Y, y);  		} @@ -1056,10 +1057,11 @@ void wacom_setup_input_capabilities(struct input_dev *input_dev,  			     features->x_fuzz, 0);  	input_set_abs_params(input_dev, ABS_Y, 0, features->y_max,  			     features->y_fuzz, 0); -	input_set_abs_params(input_dev, ABS_PRESSURE, 0, features->pressure_max, -			     features->pressure_fuzz, 0);  	if (features->device_type == BTN_TOOL_PEN) { +		input_set_abs_params(input_dev, ABS_PRESSURE, 0, features->pressure_max, +			     features->pressure_fuzz, 0); +  		/* penabled devices have fixed resolution for each model */  		input_abs_set_res(input_dev, ABS_X, features->x_resolution);  		input_abs_set_res(input_dev, ABS_Y, features->y_resolution); @@ -1098,6 +1100,8 @@ void wacom_setup_input_capabilities(struct input_dev *input_dev,  		__set_bit(BTN_TOOL_MOUSE, input_dev->keybit);  		__set_bit(BTN_STYLUS, input_dev->keybit);  		__set_bit(BTN_STYLUS2, input_dev->keybit); + +		__set_bit(INPUT_PROP_POINTER, input_dev->propbit);  		break;  	case WACOM_21UX2: @@ -1126,6 +1130,9 @@ void wacom_setup_input_capabilities(struct input_dev *input_dev,  		}  		input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0); + +		__set_bit(INPUT_PROP_DIRECT, input_dev->propbit); +  		wacom_setup_cintiq(wacom_wac);  		break; @@ -1150,6 +1157,8 @@ void wacom_setup_input_capabilities(struct input_dev *input_dev,  		/* fall through */  	case INTUOS: +		__set_bit(INPUT_PROP_POINTER, input_dev->propbit); +  		wacom_setup_intuos(wacom_wac);  		break; @@ -1165,6 +1174,8 @@ void wacom_setup_input_capabilities(struct input_dev *input_dev,  		input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0);  		wacom_setup_intuos(wacom_wac); + +		__set_bit(INPUT_PROP_POINTER, input_dev->propbit);  		break;  	case TABLETPC2FG: @@ -1183,26 +1194,40 @@ void wacom_setup_input_capabilities(struct input_dev *input_dev,  	case TABLETPC:  		__clear_bit(ABS_MISC, input_dev->absbit); +		__set_bit(INPUT_PROP_DIRECT, input_dev->propbit); +  		if (features->device_type != BTN_TOOL_PEN)  			break;  /* no need to process stylus stuff */  		/* fall through */  	case PL: -	case PTU:  	case DTU:  		__set_bit(BTN_TOOL_PEN, input_dev->keybit); +		__set_bit(BTN_TOOL_RUBBER, input_dev->keybit);  		__set_bit(BTN_STYLUS, input_dev->keybit);  		__set_bit(BTN_STYLUS2, input_dev->keybit); + +		__set_bit(INPUT_PROP_DIRECT, input_dev->propbit); +		break; + +	case PTU: +		__set_bit(BTN_STYLUS2, input_dev->keybit);  		/* fall through */  	case PENPARTNER: +		__set_bit(BTN_TOOL_PEN, input_dev->keybit);  		__set_bit(BTN_TOOL_RUBBER, input_dev->keybit); +		__set_bit(BTN_STYLUS, input_dev->keybit); + +		__set_bit(INPUT_PROP_POINTER, input_dev->propbit);  		break;  	case BAMBOO_PT:  		__clear_bit(ABS_MISC, input_dev->absbit); +		__set_bit(INPUT_PROP_POINTER, input_dev->propbit); +  		if (features->device_type == BTN_TOOL_DOUBLETAP) {  			__set_bit(BTN_LEFT, input_dev->keybit);  			__set_bit(BTN_FORWARD, input_dev->keybit); diff --git a/drivers/input/touchscreen/wacom_w8001.c b/drivers/input/touchscreen/wacom_w8001.c index c14412ef464..9941d39df43 100644 --- a/drivers/input/touchscreen/wacom_w8001.c +++ b/drivers/input/touchscreen/wacom_w8001.c @@ -383,6 +383,8 @@ static int w8001_setup(struct w8001 *w8001)  	dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);  	strlcat(w8001->name, "Wacom Serial", sizeof(w8001->name)); +	__set_bit(INPUT_PROP_DIRECT, dev->propbit); +  	/* penabled? */  	error = w8001_command(w8001, W8001_CMD_QUERY, true);  	if (!error) { diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c index 3dc9befa5ae..6dcc7e2d54d 100644 --- a/drivers/iommu/dmar.c +++ b/drivers/iommu/dmar.c @@ -1388,7 +1388,7 @@ int dmar_set_interrupt(struct intel_iommu *iommu)  		return ret;  	} -	ret = request_irq(irq, dmar_fault, 0, iommu->name, iommu); +	ret = request_irq(irq, dmar_fault, IRQF_NO_THREAD, iommu->name, iommu);  	if (ret)  		printk(KERN_ERR "IOMMU: can't request irq\n");  	return ret; diff --git a/drivers/leds/ledtrig-timer.c b/drivers/leds/ledtrig-timer.c index d87c9d02f78..328c64c0841 100644 --- a/drivers/leds/ledtrig-timer.c +++ b/drivers/leds/ledtrig-timer.c @@ -41,6 +41,7 @@ static ssize_t led_delay_on_store(struct device *dev,  	if (count == size) {  		led_blink_set(led_cdev, &state, &led_cdev->blink_delay_off); +		led_cdev->blink_delay_on = state;  		ret = count;  	} @@ -69,6 +70,7 @@ static ssize_t led_delay_off_store(struct device *dev,  	if (count == size) {  		led_blink_set(led_cdev, &led_cdev->blink_delay_on, &state); +		led_cdev->blink_delay_off = state;  		ret = count;  	} diff --git a/drivers/mfd/max8997.c b/drivers/mfd/max8997.c index 5d1fca0277e..f83103b8970 100644 --- a/drivers/mfd/max8997.c +++ b/drivers/mfd/max8997.c @@ -135,10 +135,13 @@ static int max8997_i2c_probe(struct i2c_client *i2c,  	max8997->dev = &i2c->dev;  	max8997->i2c = i2c;  	max8997->type = id->driver_data; +	max8997->irq = i2c->irq;  	if (!pdata)  		goto err; +	max8997->irq_base = pdata->irq_base; +	max8997->ono = pdata->ono;  	max8997->wakeup = pdata->wakeup;  	mutex_init(&max8997->iolock); @@ -152,6 +155,8 @@ static int max8997_i2c_probe(struct i2c_client *i2c,  	pm_runtime_set_active(max8997->dev); +	max8997_irq_init(max8997); +  	mfd_add_devices(max8997->dev, -1, max8997_devs,  			ARRAY_SIZE(max8997_devs),  			NULL, 0); diff --git a/drivers/mfd/omap-usb-host.c b/drivers/mfd/omap-usb-host.c index 29601e7d606..86e14583a08 100644 --- a/drivers/mfd/omap-usb-host.c +++ b/drivers/mfd/omap-usb-host.c @@ -17,6 +17,7 @@   * along with this program.  If not, see <http://www.gnu.org/licenses/>.   */  #include <linux/kernel.h> +#include <linux/module.h>  #include <linux/types.h>  #include <linux/slab.h>  #include <linux/delay.h> @@ -676,7 +677,6 @@ static void usbhs_omap_tll_init(struct device *dev, u8 tll_channel_count)  				| OMAP_TLL_CHANNEL_CONF_ULPINOBITSTUFF  				| OMAP_TLL_CHANNEL_CONF_ULPIDDRMODE); -			reg |= (1 << (i + 1));  		} else  			continue; diff --git a/drivers/mfd/tps65910-irq.c b/drivers/mfd/tps65910-irq.c index 2bfad5c86cc..a56be931551 100644 --- a/drivers/mfd/tps65910-irq.c +++ b/drivers/mfd/tps65910-irq.c @@ -178,8 +178,10 @@ int tps65910_irq_init(struct tps65910 *tps65910, int irq,  	switch (tps65910_chip_id(tps65910)) {  	case TPS65910:  		tps65910->irq_num = TPS65910_NUM_IRQ; +		break;  	case TPS65911:  		tps65910->irq_num = TPS65911_NUM_IRQ; +		break;  	}  	/* Register with genirq */ diff --git a/drivers/mfd/twl4030-madc.c b/drivers/mfd/twl4030-madc.c index b5d598c3aa7..7cbf2aa9e64 100644 --- a/drivers/mfd/twl4030-madc.c +++ b/drivers/mfd/twl4030-madc.c @@ -510,8 +510,9 @@ int twl4030_madc_conversion(struct twl4030_madc_request *req)  	u8 ch_msb, ch_lsb;  	int ret; -	if (!req) +	if (!req || !twl4030_madc)  		return -EINVAL; +  	mutex_lock(&twl4030_madc->lock);  	if (req->method < TWL4030_MADC_RT || req->method > TWL4030_MADC_SW2) {  		ret = -EINVAL; @@ -706,6 +707,8 @@ static int __devinit twl4030_madc_probe(struct platform_device *pdev)  	if (!madc)  		return -ENOMEM; +	madc->dev = &pdev->dev; +  	/*  	 * Phoenix provides 2 interrupt lines. The first one is connected to  	 * the OMAP. The other one can be connected to the other processor such diff --git a/drivers/mfd/wm8350-gpio.c b/drivers/mfd/wm8350-gpio.c index ebf99bef392..d584f6b4d6e 100644 --- a/drivers/mfd/wm8350-gpio.c +++ b/drivers/mfd/wm8350-gpio.c @@ -37,7 +37,7 @@ static int gpio_set_dir(struct wm8350 *wm8350, int gpio, int dir)  	return ret;  } -static int gpio_set_debounce(struct wm8350 *wm8350, int gpio, int db) +static int wm8350_gpio_set_debounce(struct wm8350 *wm8350, int gpio, int db)  {  	if (db == WM8350_GPIO_DEBOUNCE_ON)  		return wm8350_set_bits(wm8350, WM8350_GPIO_DEBOUNCE, @@ -210,7 +210,7 @@ int wm8350_gpio_config(struct wm8350 *wm8350, int gpio, int dir, int func,  		goto err;  	if (gpio_set_polarity(wm8350, gpio, pol))  		goto err; -	if (gpio_set_debounce(wm8350, gpio, debounce)) +	if (wm8350_gpio_set_debounce(wm8350, gpio, debounce))  		goto err;  	if (gpio_set_dir(wm8350, gpio, dir))  		goto err; diff --git a/drivers/misc/pti.c b/drivers/misc/pti.c index 06df1877ad0..0b56e3f4357 100644 --- a/drivers/misc/pti.c +++ b/drivers/misc/pti.c @@ -165,6 +165,11 @@ static void pti_write_to_aperture(struct pti_masterchannel *mc,  static void pti_control_frame_built_and_sent(struct pti_masterchannel *mc,  					     const char *thread_name)  { +	/* +	 * Since we access the comm member in current's task_struct, we only +	 * need to be as large as what 'comm' in that structure is. +	 */ +	char comm[TASK_COMM_LEN];  	struct pti_masterchannel mccontrol = {.master = CONTROL_ID,  					      .channel = 0};  	const char *thread_name_p; @@ -172,13 +177,6 @@ static void pti_control_frame_built_and_sent(struct pti_masterchannel *mc,  	u8 control_frame[CONTROL_FRAME_LEN];  	if (!thread_name) { -		/* -		 * Since we access the comm member in current's task_struct, -		 * we only need to be as large as what 'comm' in that -		 * structure is. -		 */ -		char comm[TASK_COMM_LEN]; -  		if (!in_interrupt())  			get_task_comm(comm, current);  		else diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c index 1ff5486213f..4c1a648d00f 100644 --- a/drivers/mmc/card/block.c +++ b/drivers/mmc/card/block.c @@ -926,6 +926,9 @@ static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,  	/*  	 * Reliable writes are used to implement Forced Unit Access and  	 * REQ_META accesses, and are supported only on MMCs. +	 * +	 * XXX: this really needs a good explanation of why REQ_META +	 * is treated special.  	 */  	bool do_rel_wr = ((req->cmd_flags & REQ_FUA) ||  			  (req->cmd_flags & REQ_META)) && diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index 8d0314dbd94..a44874e24f2 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig @@ -2535,7 +2535,7 @@ config S6GMAC  source "drivers/net/stmmac/Kconfig"  config PCH_GBE -	tristate "Intel EG20T PCH / OKI SEMICONDUCTOR ML7223 IOH GbE" +	tristate "Intel EG20T PCH/OKI SEMICONDUCTOR IOH(ML7223/ML7831) GbE"  	depends on PCI  	select MII  	---help--- @@ -2548,10 +2548,11 @@ config PCH_GBE  	  This driver enables Gigabit Ethernet function.  	  This driver also can be used for OKI SEMICONDUCTOR IOH(Input/ -	  Output Hub), ML7223. -	  ML7223 IOH is for MP(Media Phone) use. -	  ML7223 is companion chip for Intel Atom E6xx series. -	  ML7223 is completely compatible for Intel EG20T PCH. +	  Output Hub), ML7223/ML7831. +	  ML7223 IOH is for MP(Media Phone) use. ML7831 IOH is for general +	  purpose use. +	  ML7223/ML7831 is companion chip for Intel Atom E6xx series. +	  ML7223/ML7831 is completely compatible for Intel EG20T PCH.  config FTGMAC100  	tristate "Faraday FTGMAC100 Gigabit Ethernet support" diff --git a/drivers/net/bnx2x/bnx2x.h b/drivers/net/bnx2x/bnx2x.h index c423504a755..e46df5331c5 100644 --- a/drivers/net/bnx2x/bnx2x.h +++ b/drivers/net/bnx2x/bnx2x.h @@ -315,6 +315,14 @@ union db_prod {  	u32		raw;  }; +/* dropless fc FW/HW related params */ +#define BRB_SIZE(bp)		(CHIP_IS_E3(bp) ? 1024 : 512) +#define MAX_AGG_QS(bp)		(CHIP_IS_E1(bp) ? \ +					ETH_MAX_AGGREGATION_QUEUES_E1 :\ +					ETH_MAX_AGGREGATION_QUEUES_E1H_E2) +#define FW_DROP_LEVEL(bp)	(3 + MAX_SPQ_PENDING + MAX_AGG_QS(bp)) +#define FW_PREFETCH_CNT		16 +#define DROPLESS_FC_HEADROOM	100  /* MC hsi */  #define BCM_PAGE_SHIFT		12 @@ -331,15 +339,35 @@ union db_prod {  /* SGE ring related macros */  #define NUM_RX_SGE_PAGES	2  #define RX_SGE_CNT		(BCM_PAGE_SIZE / sizeof(struct eth_rx_sge)) -#define MAX_RX_SGE_CNT		(RX_SGE_CNT - 2) +#define NEXT_PAGE_SGE_DESC_CNT	2 +#define MAX_RX_SGE_CNT		(RX_SGE_CNT - NEXT_PAGE_SGE_DESC_CNT)  /* RX_SGE_CNT is promised to be a power of 2 */  #define RX_SGE_MASK		(RX_SGE_CNT - 1)  #define NUM_RX_SGE		(RX_SGE_CNT * NUM_RX_SGE_PAGES)  #define MAX_RX_SGE		(NUM_RX_SGE - 1)  #define NEXT_SGE_IDX(x)		((((x) & RX_SGE_MASK) == \ -				  (MAX_RX_SGE_CNT - 1)) ? (x) + 3 : (x) + 1) +				  (MAX_RX_SGE_CNT - 1)) ? \ +					(x) + 1 + NEXT_PAGE_SGE_DESC_CNT : \ +					(x) + 1)  #define RX_SGE(x)		((x) & MAX_RX_SGE) +/* + * Number of required  SGEs is the sum of two: + * 1. Number of possible opened aggregations (next packet for + *    these aggregations will probably consume SGE immidiatelly) + * 2. Rest of BRB blocks divided by 2 (block will consume new SGE only + *    after placement on BD for new TPA aggregation) + * + * Takes into account NEXT_PAGE_SGE_DESC_CNT "next" elements on each page + */ +#define NUM_SGE_REQ		(MAX_AGG_QS(bp) + \ +					(BRB_SIZE(bp) - MAX_AGG_QS(bp)) / 2) +#define NUM_SGE_PG_REQ		((NUM_SGE_REQ + MAX_RX_SGE_CNT - 1) / \ +						MAX_RX_SGE_CNT) +#define SGE_TH_LO(bp)		(NUM_SGE_REQ + \ +				 NUM_SGE_PG_REQ * NEXT_PAGE_SGE_DESC_CNT) +#define SGE_TH_HI(bp)		(SGE_TH_LO(bp) + DROPLESS_FC_HEADROOM) +  /* Manipulate a bit vector defined as an array of u64 */  /* Number of bits in one sge_mask array element */ @@ -551,24 +579,43 @@ struct bnx2x_fastpath {  #define NUM_TX_RINGS		16  #define TX_DESC_CNT		(BCM_PAGE_SIZE / sizeof(union eth_tx_bd_types)) -#define MAX_TX_DESC_CNT		(TX_DESC_CNT - 1) +#define NEXT_PAGE_TX_DESC_CNT	1 +#define MAX_TX_DESC_CNT		(TX_DESC_CNT - NEXT_PAGE_TX_DESC_CNT)  #define NUM_TX_BD		(TX_DESC_CNT * NUM_TX_RINGS)  #define MAX_TX_BD		(NUM_TX_BD - 1)  #define MAX_TX_AVAIL		(MAX_TX_DESC_CNT * NUM_TX_RINGS - 2)  #define NEXT_TX_IDX(x)		((((x) & MAX_TX_DESC_CNT) == \ -				  (MAX_TX_DESC_CNT - 1)) ? (x) + 2 : (x) + 1) +				  (MAX_TX_DESC_CNT - 1)) ? \ +					(x) + 1 + NEXT_PAGE_TX_DESC_CNT : \ +					(x) + 1)  #define TX_BD(x)		((x) & MAX_TX_BD)  #define TX_BD_POFF(x)		((x) & MAX_TX_DESC_CNT)  /* The RX BD ring is special, each bd is 8 bytes but the last one is 16 */  #define NUM_RX_RINGS		8  #define RX_DESC_CNT		(BCM_PAGE_SIZE / sizeof(struct eth_rx_bd)) -#define MAX_RX_DESC_CNT		(RX_DESC_CNT - 2) +#define NEXT_PAGE_RX_DESC_CNT	2 +#define MAX_RX_DESC_CNT		(RX_DESC_CNT - NEXT_PAGE_RX_DESC_CNT)  #define RX_DESC_MASK		(RX_DESC_CNT - 1)  #define NUM_RX_BD		(RX_DESC_CNT * NUM_RX_RINGS)  #define MAX_RX_BD		(NUM_RX_BD - 1)  #define MAX_RX_AVAIL		(MAX_RX_DESC_CNT * NUM_RX_RINGS - 2) -#define MIN_RX_AVAIL		128 + +/* dropless fc calculations for BDs + * + * Number of BDs should as number of buffers in BRB: + * Low threshold takes into account NEXT_PAGE_RX_DESC_CNT + * "next" elements on each page + */ +#define NUM_BD_REQ		BRB_SIZE(bp) +#define NUM_BD_PG_REQ		((NUM_BD_REQ + MAX_RX_DESC_CNT - 1) / \ +					      MAX_RX_DESC_CNT) +#define BD_TH_LO(bp)		(NUM_BD_REQ + \ +				 NUM_BD_PG_REQ * NEXT_PAGE_RX_DESC_CNT + \ +				 FW_DROP_LEVEL(bp)) +#define BD_TH_HI(bp)		(BD_TH_LO(bp) + DROPLESS_FC_HEADROOM) + +#define MIN_RX_AVAIL		((bp)->dropless_fc ? BD_TH_HI(bp) + 128 : 128)  #define MIN_RX_SIZE_TPA_HW	(CHIP_IS_E1(bp) ? \  					ETH_MIN_RX_CQES_WITH_TPA_E1 : \ @@ -579,7 +626,9 @@ struct bnx2x_fastpath {  								MIN_RX_AVAIL))  #define NEXT_RX_IDX(x)		((((x) & RX_DESC_MASK) == \ -				  (MAX_RX_DESC_CNT - 1)) ? (x) + 3 : (x) + 1) +				  (MAX_RX_DESC_CNT - 1)) ? \ +					(x) + 1 + NEXT_PAGE_RX_DESC_CNT : \ +					(x) + 1)  #define RX_BD(x)		((x) & MAX_RX_BD)  /* @@ -589,14 +638,31 @@ struct bnx2x_fastpath {  #define CQE_BD_REL	(sizeof(union eth_rx_cqe) / sizeof(struct eth_rx_bd))  #define NUM_RCQ_RINGS		(NUM_RX_RINGS * CQE_BD_REL)  #define RCQ_DESC_CNT		(BCM_PAGE_SIZE / sizeof(union eth_rx_cqe)) -#define MAX_RCQ_DESC_CNT	(RCQ_DESC_CNT - 1) +#define NEXT_PAGE_RCQ_DESC_CNT	1 +#define MAX_RCQ_DESC_CNT	(RCQ_DESC_CNT - NEXT_PAGE_RCQ_DESC_CNT)  #define NUM_RCQ_BD		(RCQ_DESC_CNT * NUM_RCQ_RINGS)  #define MAX_RCQ_BD		(NUM_RCQ_BD - 1)  #define MAX_RCQ_AVAIL		(MAX_RCQ_DESC_CNT * NUM_RCQ_RINGS - 2)  #define NEXT_RCQ_IDX(x)		((((x) & MAX_RCQ_DESC_CNT) == \ -				  (MAX_RCQ_DESC_CNT - 1)) ? (x) + 2 : (x) + 1) +				  (MAX_RCQ_DESC_CNT - 1)) ? \ +					(x) + 1 + NEXT_PAGE_RCQ_DESC_CNT : \ +					(x) + 1)  #define RCQ_BD(x)		((x) & MAX_RCQ_BD) +/* dropless fc calculations for RCQs + * + * Number of RCQs should be as number of buffers in BRB: + * Low threshold takes into account NEXT_PAGE_RCQ_DESC_CNT + * "next" elements on each page + */ +#define NUM_RCQ_REQ		BRB_SIZE(bp) +#define NUM_RCQ_PG_REQ		((NUM_BD_REQ + MAX_RCQ_DESC_CNT - 1) / \ +					      MAX_RCQ_DESC_CNT) +#define RCQ_TH_LO(bp)		(NUM_RCQ_REQ + \ +				 NUM_RCQ_PG_REQ * NEXT_PAGE_RCQ_DESC_CNT + \ +				 FW_DROP_LEVEL(bp)) +#define RCQ_TH_HI(bp)		(RCQ_TH_LO(bp) + DROPLESS_FC_HEADROOM) +  /* This is needed for determining of last_max */  #define SUB_S16(a, b)		(s16)((s16)(a) - (s16)(b)) @@ -685,24 +751,17 @@ struct bnx2x_fastpath {  #define FP_CSB_FUNC_OFF	\  			offsetof(struct cstorm_status_block_c, func) -#define HC_INDEX_TOE_RX_CQ_CONS		0 /* Formerly Ustorm TOE CQ index */ -					  /* (HC_INDEX_U_TOE_RX_CQ_CONS)  */ -#define HC_INDEX_ETH_RX_CQ_CONS		1 /* Formerly Ustorm ETH CQ index */ -					  /* (HC_INDEX_U_ETH_RX_CQ_CONS)  */ -#define HC_INDEX_ETH_RX_BD_CONS		2 /* Formerly Ustorm ETH BD index */ -					  /* (HC_INDEX_U_ETH_RX_BD_CONS)  */ +#define HC_INDEX_ETH_RX_CQ_CONS		1 -#define HC_INDEX_TOE_TX_CQ_CONS		4 /* Formerly Cstorm TOE CQ index   */ -					  /* (HC_INDEX_C_TOE_TX_CQ_CONS)    */ -#define HC_INDEX_ETH_TX_CQ_CONS_COS0	5 /* Formerly Cstorm ETH CQ index   */ -					  /* (HC_INDEX_C_ETH_TX_CQ_CONS)    */ -#define HC_INDEX_ETH_TX_CQ_CONS_COS1	6 /* Formerly Cstorm ETH CQ index   */ -					  /* (HC_INDEX_C_ETH_TX_CQ_CONS)    */ -#define HC_INDEX_ETH_TX_CQ_CONS_COS2	7 /* Formerly Cstorm ETH CQ index   */ -					  /* (HC_INDEX_C_ETH_TX_CQ_CONS)    */ +#define HC_INDEX_OOO_TX_CQ_CONS		4 -#define HC_INDEX_ETH_FIRST_TX_CQ_CONS	HC_INDEX_ETH_TX_CQ_CONS_COS0 +#define HC_INDEX_ETH_TX_CQ_CONS_COS0	5 +#define HC_INDEX_ETH_TX_CQ_CONS_COS1	6 + +#define HC_INDEX_ETH_TX_CQ_CONS_COS2	7 + +#define HC_INDEX_ETH_FIRST_TX_CQ_CONS	HC_INDEX_ETH_TX_CQ_CONS_COS0  #define BNX2X_RX_SB_INDEX \  	(&fp->sb_index_values[HC_INDEX_ETH_RX_CQ_CONS]) @@ -1100,11 +1159,12 @@ struct bnx2x {  #define BP_PORT(bp)			(bp->pfid & 1)  #define BP_FUNC(bp)			(bp->pfid)  #define BP_ABS_FUNC(bp)			(bp->pf_num) -#define BP_E1HVN(bp)			(bp->pfid >> 1) -#define BP_VN(bp)			(BP_E1HVN(bp)) /*remove when approved*/ -#define BP_L_ID(bp)			(BP_E1HVN(bp) << 2) -#define BP_FW_MB_IDX(bp)		(BP_PORT(bp) +\ -	  BP_VN(bp) * ((CHIP_IS_E1x(bp) || (CHIP_MODE_IS_4_PORT(bp))) ? 2  : 1)) +#define BP_VN(bp)			((bp)->pfid >> 1) +#define BP_MAX_VN_NUM(bp)		(CHIP_MODE_IS_4_PORT(bp) ? 2 : 4) +#define BP_L_ID(bp)			(BP_VN(bp) << 2) +#define BP_FW_MB_IDX_VN(bp, vn)		(BP_PORT(bp) +\ +	  (vn) * ((CHIP_IS_E1x(bp) || (CHIP_MODE_IS_4_PORT(bp))) ? 2  : 1)) +#define BP_FW_MB_IDX(bp)		BP_FW_MB_IDX_VN(bp, BP_VN(bp))  	struct net_device	*dev;  	struct pci_dev		*pdev; @@ -1767,7 +1827,7 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,  #define MAX_DMAE_C_PER_PORT		8  #define INIT_DMAE_C(bp)			(BP_PORT(bp) * MAX_DMAE_C_PER_PORT + \ -					 BP_E1HVN(bp)) +					 BP_VN(bp))  #define PMF_DMAE_C(bp)			(BP_PORT(bp) * MAX_DMAE_C_PER_PORT + \  					 E1HVN_MAX) @@ -1793,7 +1853,7 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,  /* must be used on a CID before placing it on a HW ring */  #define HW_CID(bp, x)			((BP_PORT(bp) << 23) | \ -					 (BP_E1HVN(bp) << BNX2X_SWCID_SHIFT) | \ +					 (BP_VN(bp) << BNX2X_SWCID_SHIFT) | \  					 (x))  #define SP_DESC_CNT		(BCM_PAGE_SIZE / sizeof(struct eth_spe)) diff --git a/drivers/net/bnx2x/bnx2x_cmn.c b/drivers/net/bnx2x/bnx2x_cmn.c index 37e5790681a..c4cbf973641 100644 --- a/drivers/net/bnx2x/bnx2x_cmn.c +++ b/drivers/net/bnx2x/bnx2x_cmn.c @@ -987,8 +987,6 @@ void __bnx2x_link_report(struct bnx2x *bp)  void bnx2x_init_rx_rings(struct bnx2x *bp)  {  	int func = BP_FUNC(bp); -	int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 : -					      ETH_MAX_AGGREGATION_QUEUES_E1H_E2;  	u16 ring_prod;  	int i, j; @@ -1001,7 +999,7 @@ void bnx2x_init_rx_rings(struct bnx2x *bp)  		if (!fp->disable_tpa) {  			/* Fill the per-aggregtion pool */ -			for (i = 0; i < max_agg_queues; i++) { +			for (i = 0; i < MAX_AGG_QS(bp); i++) {  				struct bnx2x_agg_info *tpa_info =  					&fp->tpa_info[i];  				struct sw_rx_bd *first_buf = @@ -1041,7 +1039,7 @@ void bnx2x_init_rx_rings(struct bnx2x *bp)  					bnx2x_free_rx_sge_range(bp, fp,  								ring_prod);  					bnx2x_free_tpa_pool(bp, fp, -							    max_agg_queues); +							    MAX_AGG_QS(bp));  					fp->disable_tpa = 1;  					ring_prod = 0;  					break; @@ -1137,9 +1135,7 @@ static void bnx2x_free_rx_skbs(struct bnx2x *bp)  		bnx2x_free_rx_bds(fp);  		if (!fp->disable_tpa) -			bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ? -					    ETH_MAX_AGGREGATION_QUEUES_E1 : -					    ETH_MAX_AGGREGATION_QUEUES_E1H_E2); +			bnx2x_free_tpa_pool(bp, fp, MAX_AGG_QS(bp));  	}  } @@ -3095,15 +3091,20 @@ static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)  	struct bnx2x_fastpath *fp = &bp->fp[index];  	int ring_size = 0;  	u8 cos; +	int rx_ring_size = 0;  	/* if rx_ring_size specified - use it */ -	int rx_ring_size = bp->rx_ring_size ? bp->rx_ring_size : -			   MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp); +	if (!bp->rx_ring_size) { -	/* allocate at least number of buffers required by FW */ -	rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA : -						    MIN_RX_SIZE_TPA, -				  rx_ring_size); +		rx_ring_size = MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp); + +		/* allocate at least number of buffers required by FW */ +		rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA : +				     MIN_RX_SIZE_TPA, rx_ring_size); + +		bp->rx_ring_size = rx_ring_size; +	} else +		rx_ring_size = bp->rx_ring_size;  	/* Common */  	sb = &bnx2x_fp(bp, index, status_blk); diff --git a/drivers/net/bnx2x/bnx2x_ethtool.c b/drivers/net/bnx2x/bnx2x_ethtool.c index 221863059da..cf3e47914dd 100644 --- a/drivers/net/bnx2x/bnx2x_ethtool.c +++ b/drivers/net/bnx2x/bnx2x_ethtool.c @@ -363,13 +363,50 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)  		}  		/* advertise the requested speed and duplex if supported */ -		cmd->advertising &= bp->port.supported[cfg_idx]; +		if (cmd->advertising & ~(bp->port.supported[cfg_idx])) { +			DP(NETIF_MSG_LINK, "Advertisement parameters " +					   "are not supported\n"); +			return -EINVAL; +		}  		bp->link_params.req_line_speed[cfg_idx] = SPEED_AUTO_NEG; -		bp->link_params.req_duplex[cfg_idx] = DUPLEX_FULL; -		bp->port.advertising[cfg_idx] |= (ADVERTISED_Autoneg | +		bp->link_params.req_duplex[cfg_idx] = cmd->duplex; +		bp->port.advertising[cfg_idx] = (ADVERTISED_Autoneg |  					 cmd->advertising); +		if (cmd->advertising) { + +			bp->link_params.speed_cap_mask[cfg_idx] = 0; +			if (cmd->advertising & ADVERTISED_10baseT_Half) { +				bp->link_params.speed_cap_mask[cfg_idx] |= +				PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF; +			} +			if (cmd->advertising & ADVERTISED_10baseT_Full) +				bp->link_params.speed_cap_mask[cfg_idx] |= +				PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL; + +			if (cmd->advertising & ADVERTISED_100baseT_Full) +				bp->link_params.speed_cap_mask[cfg_idx] |= +				PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL; +			if (cmd->advertising & ADVERTISED_100baseT_Half) { +				bp->link_params.speed_cap_mask[cfg_idx] |= +				     PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF; +			} +			if (cmd->advertising & ADVERTISED_1000baseT_Half) { +				bp->link_params.speed_cap_mask[cfg_idx] |= +					PORT_HW_CFG_SPEED_CAPABILITY_D0_1G; +			} +			if (cmd->advertising & (ADVERTISED_1000baseT_Full | +						ADVERTISED_1000baseKX_Full)) +				bp->link_params.speed_cap_mask[cfg_idx] |= +					PORT_HW_CFG_SPEED_CAPABILITY_D0_1G; + +			if (cmd->advertising & (ADVERTISED_10000baseT_Full | +						ADVERTISED_10000baseKX4_Full | +						ADVERTISED_10000baseKR_Full)) +				bp->link_params.speed_cap_mask[cfg_idx] |= +					PORT_HW_CFG_SPEED_CAPABILITY_D0_10G; +		}  	} else { /* forced speed */  		/* advertise the requested speed and duplex if supported */  		switch (speed) { @@ -1310,10 +1347,7 @@ static void bnx2x_get_ringparam(struct net_device *dev,  	if (bp->rx_ring_size)  		ering->rx_pending = bp->rx_ring_size;  	else -		if (bp->state == BNX2X_STATE_OPEN && bp->num_queues) -			ering->rx_pending = MAX_RX_AVAIL/bp->num_queues; -		else -			ering->rx_pending = MAX_RX_AVAIL; +		ering->rx_pending = MAX_RX_AVAIL;  	ering->rx_mini_pending = 0;  	ering->rx_jumbo_pending = 0; diff --git a/drivers/net/bnx2x/bnx2x_link.c b/drivers/net/bnx2x/bnx2x_link.c index d45b1555a60..ba15bdc5a1a 100644 --- a/drivers/net/bnx2x/bnx2x_link.c +++ b/drivers/net/bnx2x/bnx2x_link.c @@ -778,9 +778,9 @@ static int bnx2x_ets_e3b0_set_cos_bw(struct bnx2x *bp,  {  	u32 nig_reg_adress_crd_weight = 0;  	u32 pbf_reg_adress_crd_weight = 0; -	/* Calculate and set BW for this COS*/ -	const u32 cos_bw_nig = (bw * min_w_val_nig) / total_bw; -	const u32 cos_bw_pbf = (bw * min_w_val_pbf) / total_bw; +	/* Calculate and set BW for this COS - use 1 instead of 0 for BW */ +	const u32 cos_bw_nig = ((bw ? bw : 1) * min_w_val_nig) / total_bw; +	const u32 cos_bw_pbf = ((bw ? bw : 1) * min_w_val_pbf) / total_bw;  	switch (cos_entry) {  	case 0: @@ -852,18 +852,12 @@ static int bnx2x_ets_e3b0_get_total_bw(  	/* Calculate total BW requested */  	for (cos_idx = 0; cos_idx < ets_params->num_of_cos; cos_idx++) {  		if (bnx2x_cos_state_bw == ets_params->cos[cos_idx].state) { - -			if (0 == ets_params->cos[cos_idx].params.bw_params.bw) { -				DP(NETIF_MSG_LINK, "bnx2x_ets_E3B0_config BW" -						   "was set to 0\n"); -			return -EINVAL; +			*total_bw += +				ets_params->cos[cos_idx].params.bw_params.bw;  		} -		*total_bw += -		    ets_params->cos[cos_idx].params.bw_params.bw; -	    }  	} -	/*Check taotl BW is valid */ +	/* Check total BW is valid */  	if ((100 != *total_bw) || (0 == *total_bw)) {  		if (0 == *total_bw) {  			DP(NETIF_MSG_LINK, "bnx2x_ets_E3B0_config toatl BW" @@ -1726,7 +1720,7 @@ static int bnx2x_xmac_enable(struct link_params *params,  	/* Check loopback mode */  	if (lb) -		val |= XMAC_CTRL_REG_CORE_LOCAL_LPBK; +		val |= XMAC_CTRL_REG_LINE_LOCAL_LPBK;  	REG_WR(bp, xmac_base + XMAC_REG_CTRL, val);  	bnx2x_set_xumac_nig(params,  			    ((vars->flow_ctrl & BNX2X_FLOW_CTRL_TX) != 0), 1); @@ -3630,6 +3624,12 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,  	bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,  			 MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT1, val16); +	/* Advertised and set FEC (Forward Error Correction) */ +	bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, +			 MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT2, +			 (MDIO_WC_REG_AN_IEEE1BLK_AN_ADV2_FEC_ABILITY | +			  MDIO_WC_REG_AN_IEEE1BLK_AN_ADV2_FEC_REQ)); +  	/* Enable CL37 BAM */  	if (REG_RD(bp, params->shmem_base +  		   offsetof(struct shmem_region, dev_info. @@ -5924,7 +5924,7 @@ int bnx2x_set_led(struct link_params *params,  					(tmp | EMAC_LED_OVERRIDE));  				/*  				 * return here without enabling traffic -				 * LED blink andsetting rate in ON mode. +				 * LED blink and setting rate in ON mode.  				 * In oper mode, enabling LED blink  				 * and setting rate is needed.  				 */ @@ -5936,7 +5936,11 @@ int bnx2x_set_led(struct link_params *params,  			 * This is a work-around for HW issue found when link  			 * is up in CL73  			 */ -			REG_WR(bp, NIG_REG_LED_10G_P0 + port*4, 1); +			if ((!CHIP_IS_E3(bp)) || +			    (CHIP_IS_E3(bp) && +			     mode == LED_MODE_ON)) +				REG_WR(bp, NIG_REG_LED_10G_P0 + port*4, 1); +  			if (CHIP_IS_E1x(bp) ||  			    CHIP_IS_E2(bp) ||  			    (mode == LED_MODE_ON)) @@ -10638,8 +10642,7 @@ static struct bnx2x_phy phy_warpcore = {  	.type		= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT,  	.addr		= 0xff,  	.def_md_devad	= 0, -	.flags		= (FLAGS_HW_LOCK_REQUIRED | -			   FLAGS_TX_ERROR_CHECK), +	.flags		= FLAGS_HW_LOCK_REQUIRED,  	.rx_preemphasis	= {0xffff, 0xffff, 0xffff, 0xffff},  	.tx_preemphasis	= {0xffff, 0xffff, 0xffff, 0xffff},  	.mdio_ctrl	= 0, @@ -10765,8 +10768,7 @@ static struct bnx2x_phy phy_8706 = {  	.type		= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706,  	.addr		= 0xff,  	.def_md_devad	= 0, -	.flags		= (FLAGS_INIT_XGXS_FIRST | -			   FLAGS_TX_ERROR_CHECK), +	.flags		= FLAGS_INIT_XGXS_FIRST,  	.rx_preemphasis	= {0xffff, 0xffff, 0xffff, 0xffff},  	.tx_preemphasis	= {0xffff, 0xffff, 0xffff, 0xffff},  	.mdio_ctrl	= 0, @@ -10797,8 +10799,7 @@ static struct bnx2x_phy phy_8726 = {  	.addr		= 0xff,  	.def_md_devad	= 0,  	.flags		= (FLAGS_HW_LOCK_REQUIRED | -			   FLAGS_INIT_XGXS_FIRST | -			   FLAGS_TX_ERROR_CHECK), +			   FLAGS_INIT_XGXS_FIRST),  	.rx_preemphasis	= {0xffff, 0xffff, 0xffff, 0xffff},  	.tx_preemphasis	= {0xffff, 0xffff, 0xffff, 0xffff},  	.mdio_ctrl	= 0, @@ -10829,8 +10830,7 @@ static struct bnx2x_phy phy_8727 = {  	.type		= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727,  	.addr		= 0xff,  	.def_md_devad	= 0, -	.flags		= (FLAGS_FAN_FAILURE_DET_REQ | -			   FLAGS_TX_ERROR_CHECK), +	.flags		= FLAGS_FAN_FAILURE_DET_REQ,  	.rx_preemphasis	= {0xffff, 0xffff, 0xffff, 0xffff},  	.tx_preemphasis	= {0xffff, 0xffff, 0xffff, 0xffff},  	.mdio_ctrl	= 0, diff --git a/drivers/net/bnx2x/bnx2x_main.c b/drivers/net/bnx2x/bnx2x_main.c index f74582a22c6..c027e9341a1 100644 --- a/drivers/net/bnx2x/bnx2x_main.c +++ b/drivers/net/bnx2x/bnx2x_main.c @@ -407,8 +407,8 @@ u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type,  	opcode |= (DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET);  	opcode |= (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0); -	opcode |= ((BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT) | -		   (BP_E1HVN(bp) << DMAE_COMMAND_DST_VN_SHIFT)); +	opcode |= ((BP_VN(bp) << DMAE_CMD_E1HVN_SHIFT) | +		   (BP_VN(bp) << DMAE_COMMAND_DST_VN_SHIFT));  	opcode |= (DMAE_COM_SET_ERR << DMAE_COMMAND_ERR_POLICY_SHIFT);  #ifdef __BIG_ENDIAN @@ -1419,7 +1419,7 @@ static void bnx2x_hc_int_enable(struct bnx2x *bp)  	if (!CHIP_IS_E1(bp)) {  		/* init leading/trailing edge */  		if (IS_MF(bp)) { -			val = (0xee0f | (1 << (BP_E1HVN(bp) + 4))); +			val = (0xee0f | (1 << (BP_VN(bp) + 4)));  			if (bp->port.pmf)  				/* enable nig and gpio3 attention */  				val |= 0x1100; @@ -1471,7 +1471,7 @@ static void bnx2x_igu_int_enable(struct bnx2x *bp)  	/* init leading/trailing edge */  	if (IS_MF(bp)) { -		val = (0xee0f | (1 << (BP_E1HVN(bp) + 4))); +		val = (0xee0f | (1 << (BP_VN(bp) + 4)));  		if (bp->port.pmf)  			/* enable nig and gpio3 attention */  			val |= 0x1100; @@ -2287,7 +2287,7 @@ static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)  	int vn;  	bp->vn_weight_sum = 0; -	for (vn = VN_0; vn < E1HVN_MAX; vn++) { +	for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) {  		u32 vn_cfg = bp->mf_config[vn];  		u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>  				   FUNC_MF_CFG_MIN_BW_SHIFT) * 100; @@ -2320,12 +2320,18 @@ static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)  					CMNG_FLAGS_PER_PORT_FAIRNESS_VN;  } +/* returns func by VN for current port */ +static inline int func_by_vn(struct bnx2x *bp, int vn) +{ +	return 2 * vn + BP_PORT(bp); +} +  static void bnx2x_init_vn_minmax(struct bnx2x *bp, int vn)  {  	struct rate_shaping_vars_per_vn m_rs_vn;  	struct fairness_vars_per_vn m_fair_vn;  	u32 vn_cfg = bp->mf_config[vn]; -	int func = 2*vn + BP_PORT(bp); +	int func = func_by_vn(bp, vn);  	u16 vn_min_rate, vn_max_rate;  	int i; @@ -2422,7 +2428,7 @@ void bnx2x_read_mf_cfg(struct bnx2x *bp)  	 *  	 *      and there are 2 functions per port  	 */ -	for (vn = VN_0; vn < E1HVN_MAX; vn++) { +	for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) {  		int /*abs*/func = n * (2 * vn + BP_PORT(bp)) + BP_PATH(bp);  		if (func >= E1H_FUNC_MAX) @@ -2454,7 +2460,7 @@ static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type)  		/* calculate and set min-max rate for each vn */  		if (bp->port.pmf) -			for (vn = VN_0; vn < E1HVN_MAX; vn++) +			for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++)  				bnx2x_init_vn_minmax(bp, vn);  		/* always enable rate shaping and fairness */ @@ -2473,16 +2479,15 @@ static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type)  static inline void bnx2x_link_sync_notify(struct bnx2x *bp)  { -	int port = BP_PORT(bp);  	int func;  	int vn;  	/* Set the attention towards other drivers on the same port */ -	for (vn = VN_0; vn < E1HVN_MAX; vn++) { -		if (vn == BP_E1HVN(bp)) +	for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) { +		if (vn == BP_VN(bp))  			continue; -		func = ((vn << 1) | port); +		func = func_by_vn(bp, vn);  		REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +  		       (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);  	} @@ -2577,7 +2582,7 @@ static void bnx2x_pmf_update(struct bnx2x *bp)  	bnx2x_dcbx_pmf_update(bp);  	/* enable nig attention */ -	val = (0xff0f | (1 << (BP_E1HVN(bp) + 4))); +	val = (0xff0f | (1 << (BP_VN(bp) + 4)));  	if (bp->common.int_block == INT_BLOCK_HC) {  		REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);  		REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val); @@ -2756,8 +2761,14 @@ static void bnx2x_pf_rx_q_prep(struct bnx2x *bp,  	u16 tpa_agg_size = 0;  	if (!fp->disable_tpa) { -		pause->sge_th_hi = 250; -		pause->sge_th_lo = 150; +		pause->sge_th_lo = SGE_TH_LO(bp); +		pause->sge_th_hi = SGE_TH_HI(bp); + +		/* validate SGE ring has enough to cross high threshold */ +		WARN_ON(bp->dropless_fc && +				pause->sge_th_hi + FW_PREFETCH_CNT > +				MAX_RX_SGE_CNT * NUM_RX_SGE_PAGES); +  		tpa_agg_size = min_t(u32,  			(min_t(u32, 8, MAX_SKB_FRAGS) *  			SGE_PAGE_SIZE * PAGES_PER_SGE), 0xffff); @@ -2771,10 +2782,21 @@ static void bnx2x_pf_rx_q_prep(struct bnx2x *bp,  	/* pause - not for e1 */  	if (!CHIP_IS_E1(bp)) { -		pause->bd_th_hi = 350; -		pause->bd_th_lo = 250; -		pause->rcq_th_hi = 350; -		pause->rcq_th_lo = 250; +		pause->bd_th_lo = BD_TH_LO(bp); +		pause->bd_th_hi = BD_TH_HI(bp); + +		pause->rcq_th_lo = RCQ_TH_LO(bp); +		pause->rcq_th_hi = RCQ_TH_HI(bp); +		/* +		 * validate that rings have enough entries to cross +		 * high thresholds +		 */ +		WARN_ON(bp->dropless_fc && +				pause->bd_th_hi + FW_PREFETCH_CNT > +				bp->rx_ring_size); +		WARN_ON(bp->dropless_fc && +				pause->rcq_th_hi + FW_PREFETCH_CNT > +				NUM_RCQ_RINGS * MAX_RCQ_DESC_CNT);  		pause->pri_map = 1;  	} @@ -2802,9 +2824,7 @@ static void bnx2x_pf_rx_q_prep(struct bnx2x *bp,  	 * For PF Clients it should be the maximum avaliable number.  	 * VF driver(s) may want to define it to a smaller value.  	 */ -	rxq_init->max_tpa_queues = -		(CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 : -		ETH_MAX_AGGREGATION_QUEUES_E1H_E2); +	rxq_init->max_tpa_queues = MAX_AGG_QS(bp);  	rxq_init->cache_line_log = BNX2X_RX_ALIGN_SHIFT;  	rxq_init->fw_sb_id = fp->fw_sb_id; @@ -4808,6 +4828,37 @@ void bnx2x_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm,  	hc_sm->time_to_expire = 0xFFFFFFFF;  } + +/* allocates state machine ids. */ +static inline +void bnx2x_map_sb_state_machines(struct hc_index_data *index_data) +{ +	/* zero out state machine indices */ +	/* rx indices */ +	index_data[HC_INDEX_ETH_RX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID; + +	/* tx indices */ +	index_data[HC_INDEX_OOO_TX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID; +	index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags &= ~HC_INDEX_DATA_SM_ID; +	index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags &= ~HC_INDEX_DATA_SM_ID; +	index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags &= ~HC_INDEX_DATA_SM_ID; + +	/* map indices */ +	/* rx indices */ +	index_data[HC_INDEX_ETH_RX_CQ_CONS].flags |= +		SM_RX_ID << HC_INDEX_DATA_SM_ID_SHIFT; + +	/* tx indices */ +	index_data[HC_INDEX_OOO_TX_CQ_CONS].flags |= +		SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT; +	index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags |= +		SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT; +	index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags |= +		SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT; +	index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags |= +		SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT; +} +  static void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,  			  u8 vf_valid, int fw_sb_id, int igu_sb_id)  { @@ -4839,6 +4890,7 @@ static void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,  		hc_sm_p = sb_data_e2.common.state_machine;  		sb_data_p = (u32 *)&sb_data_e2;  		data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32); +		bnx2x_map_sb_state_machines(sb_data_e2.index_data);  	} else {  		memset(&sb_data_e1x, 0,  		       sizeof(struct hc_status_block_data_e1x)); @@ -4853,6 +4905,7 @@ static void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,  		hc_sm_p = sb_data_e1x.common.state_machine;  		sb_data_p = (u32 *)&sb_data_e1x;  		data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32); +		bnx2x_map_sb_state_machines(sb_data_e1x.index_data);  	}  	bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_RX_ID], @@ -5802,7 +5855,7 @@ static int bnx2x_init_hw_common(struct bnx2x *bp)  	 * take the UNDI lock to protect undi_unload flow from accessing  	 * registers while we're resetting the chip  	 */ -	bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI); +	bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RESET);  	bnx2x_reset_common(bp);  	REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff); @@ -5814,7 +5867,7 @@ static int bnx2x_init_hw_common(struct bnx2x *bp)  	}  	REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, val); -	bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI); +	bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESET);  	bnx2x_init_block(bp, BLOCK_MISC, PHASE_COMMON); @@ -6671,12 +6724,16 @@ static int bnx2x_init_hw_func(struct bnx2x *bp)  			if (CHIP_MODE_IS_4_PORT(bp))  				dsb_idx = BP_FUNC(bp);  			else -				dsb_idx = BP_E1HVN(bp); +				dsb_idx = BP_VN(bp);  			prod_offset = (CHIP_INT_MODE_IS_BC(bp) ?  				       IGU_BC_BASE_DSB_PROD + dsb_idx :  				       IGU_NORM_BASE_DSB_PROD + dsb_idx); +			/* +			 * igu prods come in chunks of E1HVN_MAX (4) - +			 * does not matters what is the current chip mode +			 */  			for (i = 0; i < (num_segs * E1HVN_MAX);  			     i += E1HVN_MAX) {  				addr = IGU_REG_PROD_CONS_MEMORY + @@ -7570,7 +7627,7 @@ u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode)  		u32 val;  		/* The mac address is written to entries 1-4 to  		   preserve entry 0 which is used by the PMF */ -		u8 entry = (BP_E1HVN(bp) + 1)*8; +		u8 entry = (BP_VN(bp) + 1)*8;  		val = (mac_addr[0] << 8) | mac_addr[1];  		EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val); @@ -8546,10 +8603,12 @@ static void __devinit bnx2x_undi_unload(struct bnx2x *bp)  	/* Check if there is any driver already loaded */  	val = REG_RD(bp, MISC_REG_UNPREPARED);  	if (val == 0x1) { -		/* Check if it is the UNDI driver + +		bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RESET); +		/* +		 * Check if it is the UNDI driver  		 * UNDI driver initializes CID offset for normal bell to 0x7  		 */ -		bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);  		val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);  		if (val == 0x7) {  			u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; @@ -8587,9 +8646,6 @@ static void __devinit bnx2x_undi_unload(struct bnx2x *bp)  				bnx2x_fw_command(bp, reset_code, 0);  			} -			/* now it's safe to release the lock */ -			bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI); -  			bnx2x_undi_int_disable(bp);  			port = BP_PORT(bp); @@ -8639,8 +8695,10 @@ static void __devinit bnx2x_undi_unload(struct bnx2x *bp)  			bp->fw_seq =  			      (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) &  				DRV_MSG_SEQ_NUMBER_MASK); -		} else -			bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI); +		} + +		/* now it's safe to release the lock */ +		bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESET);  	}  } @@ -8777,13 +8835,13 @@ static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)  static void __devinit bnx2x_get_igu_cam_info(struct bnx2x *bp)  {  	int pfid = BP_FUNC(bp); -	int vn = BP_E1HVN(bp);  	int igu_sb_id;  	u32 val;  	u8 fid, igu_sb_cnt = 0;  	bp->igu_base_sb = 0xff;  	if (CHIP_INT_MODE_IS_BC(bp)) { +		int vn = BP_VN(bp);  		igu_sb_cnt = bp->igu_sb_cnt;  		bp->igu_base_sb = (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn) *  			FP_SB_MAX_E1x; @@ -9416,6 +9474,10 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)  		bp->igu_base_sb = 0;  	} else {  		bp->common.int_block = INT_BLOCK_IGU; + +		/* do not allow device reset during IGU info preocessing */ +		bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RESET); +  		val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION);  		if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) { @@ -9447,6 +9509,7 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)  		bnx2x_get_igu_cam_info(bp); +		bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESET);  	}  	/* @@ -9473,7 +9536,7 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)  	bp->mf_ov = 0;  	bp->mf_mode = 0; -	vn = BP_E1HVN(bp); +	vn = BP_VN(bp);  	if (!CHIP_IS_E1(bp) && !BP_NOMCP(bp)) {  		BNX2X_DEV_INFO("shmem2base 0x%x, size %d, mfcfg offset %d\n", @@ -9593,13 +9656,6 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)  	/* port info */  	bnx2x_get_port_hwinfo(bp); -	if (!BP_NOMCP(bp)) { -		bp->fw_seq = -			(SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) & -			 DRV_MSG_SEQ_NUMBER_MASK); -		BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq); -	} -  	/* Get MAC addresses */  	bnx2x_get_mac_hwinfo(bp); @@ -9765,6 +9821,14 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp)  	if (!BP_NOMCP(bp))  		bnx2x_undi_unload(bp); +	/* init fw_seq after undi_unload! */ +	if (!BP_NOMCP(bp)) { +		bp->fw_seq = +			(SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) & +			 DRV_MSG_SEQ_NUMBER_MASK); +		BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq); +	} +  	if (CHIP_REV_IS_FPGA(bp))  		dev_err(&bp->pdev->dev, "FPGA detected\n"); @@ -10259,17 +10323,21 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,  	/* clean indirect addresses */  	pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,  			       PCICFG_VENDOR_ID_OFFSET); -	/* Clean the following indirect addresses for all functions since it +	/* +	 * Clean the following indirect addresses for all functions since it  	 * is not used by the driver.  	 */  	REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0, 0);  	REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0, 0);  	REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0, 0);  	REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0, 0); -	REG_WR(bp, PXP2_REG_PGL_ADDR_88_F1, 0); -	REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F1, 0); -	REG_WR(bp, PXP2_REG_PGL_ADDR_90_F1, 0); -	REG_WR(bp, PXP2_REG_PGL_ADDR_94_F1, 0); + +	if (CHIP_IS_E1x(bp)) { +		REG_WR(bp, PXP2_REG_PGL_ADDR_88_F1, 0); +		REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F1, 0); +		REG_WR(bp, PXP2_REG_PGL_ADDR_90_F1, 0); +		REG_WR(bp, PXP2_REG_PGL_ADDR_94_F1, 0); +	}  	/*  	 * Enable internal target-read (in case we are probed after PF FLR). diff --git a/drivers/net/bnx2x/bnx2x_reg.h b/drivers/net/bnx2x/bnx2x_reg.h index 40266c14e6d..750e8445dac 100644 --- a/drivers/net/bnx2x/bnx2x_reg.h +++ b/drivers/net/bnx2x/bnx2x_reg.h @@ -5320,7 +5320,7 @@  #define XCM_REG_XX_OVFL_EVNT_ID 				 0x20058  #define XMAC_CLEAR_RX_LSS_STATUS_REG_CLEAR_LOCAL_FAULT_STATUS	 (0x1<<0)  #define XMAC_CLEAR_RX_LSS_STATUS_REG_CLEAR_REMOTE_FAULT_STATUS	 (0x1<<1) -#define XMAC_CTRL_REG_CORE_LOCAL_LPBK				 (0x1<<3) +#define XMAC_CTRL_REG_LINE_LOCAL_LPBK				 (0x1<<2)  #define XMAC_CTRL_REG_RX_EN					 (0x1<<1)  #define XMAC_CTRL_REG_SOFT_RESET				 (0x1<<6)  #define XMAC_CTRL_REG_TX_EN					 (0x1<<0) @@ -5766,7 +5766,7 @@  #define HW_LOCK_RESOURCE_RECOVERY_LEADER_0			 8  #define HW_LOCK_RESOURCE_RECOVERY_LEADER_1			 9  #define HW_LOCK_RESOURCE_SPIO					 2 -#define HW_LOCK_RESOURCE_UNDI					 5 +#define HW_LOCK_RESOURCE_RESET					 5  #define AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT			 (0x1<<4)  #define AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR			 (0x1<<5)  #define AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR			 (0x1<<18) @@ -6853,6 +6853,9 @@ Theotherbitsarereservedandshouldbezero*/  #define MDIO_WC_REG_IEEE0BLK_AUTONEGNP			0x7  #define MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT0	0x10  #define MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT1	0x11 +#define MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT2	0x12 +#define MDIO_WC_REG_AN_IEEE1BLK_AN_ADV2_FEC_ABILITY	0x4000 +#define MDIO_WC_REG_AN_IEEE1BLK_AN_ADV2_FEC_REQ		0x8000  #define MDIO_WC_REG_PMD_IEEE9BLK_TENGBASE_KR_PMD_CONTROL_REGISTER_150  0x96  #define MDIO_WC_REG_XGXSBLK0_XGXSCONTROL		0x8000  #define MDIO_WC_REG_XGXSBLK0_MISCCONTROL1		0x800e diff --git a/drivers/net/bnx2x/bnx2x_stats.c b/drivers/net/bnx2x/bnx2x_stats.c index 771f6803b23..9908f2bbcf7 100644 --- a/drivers/net/bnx2x/bnx2x_stats.c +++ b/drivers/net/bnx2x/bnx2x_stats.c @@ -710,7 +710,8 @@ static int bnx2x_hw_stats_update(struct bnx2x *bp)  		break;  	case MAC_TYPE_NONE: /* unreached */ -		BNX2X_ERR("stats updated by DMAE but no MAC active\n"); +		DP(BNX2X_MSG_STATS, +		   "stats updated by DMAE but no MAC active\n");  		return -1;  	default: /* unreached */ @@ -1391,7 +1392,7 @@ static void bnx2x_port_stats_base_init(struct bnx2x *bp)  static void bnx2x_func_stats_base_init(struct bnx2x *bp)  { -	int vn, vn_max = IS_MF(bp) ? E1HVN_MAX : E1VN_MAX; +	int vn, vn_max = IS_MF(bp) ? BP_MAX_VN_NUM(bp) : E1VN_MAX;  	u32 func_stx;  	/* sanity */ @@ -1404,7 +1405,7 @@ static void bnx2x_func_stats_base_init(struct bnx2x *bp)  	func_stx = bp->func_stx;  	for (vn = VN_0; vn < vn_max; vn++) { -		int mb_idx = CHIP_IS_E1x(bp) ? 2*vn + BP_PORT(bp) : vn; +		int mb_idx = BP_FW_MB_IDX_VN(bp, vn);  		bp->func_stx = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_param);  		bnx2x_func_stats_init(bp); diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c index a81249246ec..2adc294f512 100644 --- a/drivers/net/can/ti_hecc.c +++ b/drivers/net/can/ti_hecc.c @@ -46,6 +46,7 @@  #include <linux/skbuff.h>  #include <linux/platform_device.h>  #include <linux/clk.h> +#include <linux/io.h>  #include <linux/can/dev.h>  #include <linux/can/error.h> diff --git a/drivers/net/e1000/e1000_hw.c b/drivers/net/e1000/e1000_hw.c index 8545c7aa93e..a5a89ecb6f3 100644 --- a/drivers/net/e1000/e1000_hw.c +++ b/drivers/net/e1000/e1000_hw.c @@ -4026,6 +4026,12 @@ s32 e1000_validate_eeprom_checksum(struct e1000_hw *hw)  		checksum += eeprom_data;  	} +#ifdef CONFIG_PARISC +	/* This is a signature and not a checksum on HP c8000 */ +	if ((hw->subsystem_vendor_id == 0x103C) && (eeprom_data == 0x16d6)) +		return E1000_SUCCESS; + +#endif  	if (checksum == (u16) EEPROM_SUM)  		return E1000_SUCCESS;  	else { diff --git a/drivers/net/gianfar_ethtool.c b/drivers/net/gianfar_ethtool.c index 25a8c2adb00..0caf3c323ec 100644 --- a/drivers/net/gianfar_ethtool.c +++ b/drivers/net/gianfar_ethtool.c @@ -1669,10 +1669,10 @@ static int gfar_get_cls_all(struct gfar_private *priv,  	u32 i = 0;  	list_for_each_entry(comp, &priv->rx_list.list, list) { -		if (i <= cmd->rule_cnt) { -			rule_locs[i] = comp->fs.location; -			i++; -		} +		if (i == cmd->rule_cnt) +			return -EMSGSIZE; +		rule_locs[i] = comp->fs.location; +		i++;  	}  	cmd->data = MAX_FILER_IDX; diff --git a/drivers/net/greth.c b/drivers/net/greth.c index 16ce45c1193..52a39000c42 100644 --- a/drivers/net/greth.c +++ b/drivers/net/greth.c @@ -428,6 +428,7 @@ greth_start_xmit(struct sk_buff *skb, struct net_device *dev)  	dma_sync_single_for_device(greth->dev, dma_addr, skb->len, DMA_TO_DEVICE);  	status = GRETH_BD_EN | GRETH_BD_IE | (skb->len & GRETH_BD_LEN); +	greth->tx_bufs_length[greth->tx_next] = skb->len & GRETH_BD_LEN;  	/* Wrap around descriptor ring */  	if (greth->tx_next == GRETH_TXBD_NUM_MASK) { @@ -490,7 +491,8 @@ greth_start_xmit_gbit(struct sk_buff *skb, struct net_device *dev)  	if (nr_frags != 0)  		status = GRETH_TXBD_MORE; -	status |= GRETH_TXBD_CSALL; +	if (skb->ip_summed == CHECKSUM_PARTIAL) +		status |= GRETH_TXBD_CSALL;  	status |= skb_headlen(skb) & GRETH_BD_LEN;  	if (greth->tx_next == GRETH_TXBD_NUM_MASK)  		status |= GRETH_BD_WR; @@ -513,7 +515,9 @@ greth_start_xmit_gbit(struct sk_buff *skb, struct net_device *dev)  		greth->tx_skbuff[curr_tx] = NULL;  		bdp = greth->tx_bd_base + curr_tx; -		status = GRETH_TXBD_CSALL | GRETH_BD_EN; +		status = GRETH_BD_EN; +		if (skb->ip_summed == CHECKSUM_PARTIAL) +			status |= GRETH_TXBD_CSALL;  		status |= frag->size & GRETH_BD_LEN;  		/* Wrap around descriptor ring */ @@ -641,6 +645,7 @@ static void greth_clean_tx(struct net_device *dev)  				dev->stats.tx_fifo_errors++;  		}  		dev->stats.tx_packets++; +		dev->stats.tx_bytes += greth->tx_bufs_length[greth->tx_last];  		greth->tx_last = NEXT_TX(greth->tx_last);  		greth->tx_free++;  	} @@ -695,6 +700,7 @@ static void greth_clean_tx_gbit(struct net_device *dev)  		greth->tx_skbuff[greth->tx_last] = NULL;  		greth_update_tx_stats(dev, stat); +		dev->stats.tx_bytes += skb->len;  		bdp = greth->tx_bd_base + greth->tx_last; @@ -796,6 +802,7 @@ static int greth_rx(struct net_device *dev, int limit)  				memcpy(skb_put(skb, pkt_len), phys_to_virt(dma_addr), pkt_len);  				skb->protocol = eth_type_trans(skb, dev); +				dev->stats.rx_bytes += pkt_len;  				dev->stats.rx_packets++;  				netif_receive_skb(skb);  			} @@ -910,6 +917,7 @@ static int greth_rx_gbit(struct net_device *dev, int limit)  				skb->protocol = eth_type_trans(skb, dev);  				dev->stats.rx_packets++; +				dev->stats.rx_bytes += pkt_len;  				netif_receive_skb(skb);  				greth->rx_skbuff[greth->rx_cur] = newskb; diff --git a/drivers/net/greth.h b/drivers/net/greth.h index 9a0040dee4d..232a622a85b 100644 --- a/drivers/net/greth.h +++ b/drivers/net/greth.h @@ -103,6 +103,7 @@ struct greth_private {  	unsigned char *tx_bufs[GRETH_TXBD_NUM];  	unsigned char *rx_bufs[GRETH_RXBD_NUM]; +	u16 tx_bufs_length[GRETH_TXBD_NUM];  	u16 tx_next;  	u16 tx_last; diff --git a/drivers/net/ibmveth.c b/drivers/net/ibmveth.c index 3e667926940..8dd5fccef72 100644 --- a/drivers/net/ibmveth.c +++ b/drivers/net/ibmveth.c @@ -757,7 +757,7 @@ static int ibmveth_set_csum_offload(struct net_device *dev, u32 data)  	struct ibmveth_adapter *adapter = netdev_priv(dev);  	unsigned long set_attr, clr_attr, ret_attr;  	unsigned long set_attr6, clr_attr6; -	long ret, ret6; +	long ret, ret4, ret6;  	int rc1 = 0, rc2 = 0;  	int restart = 0; @@ -770,6 +770,8 @@ static int ibmveth_set_csum_offload(struct net_device *dev, u32 data)  	set_attr = 0;  	clr_attr = 0; +	set_attr6 = 0; +	clr_attr6 = 0;  	if (data) {  		set_attr = IBMVETH_ILLAN_IPV4_TCP_CSUM; @@ -784,16 +786,20 @@ static int ibmveth_set_csum_offload(struct net_device *dev, u32 data)  	if (ret == H_SUCCESS && !(ret_attr & IBMVETH_ILLAN_ACTIVE_TRUNK) &&  	    !(ret_attr & IBMVETH_ILLAN_TRUNK_PRI_MASK) &&  	    (ret_attr & IBMVETH_ILLAN_PADDED_PKT_CSUM)) { -		ret = h_illan_attributes(adapter->vdev->unit_address, clr_attr, +		ret4 = h_illan_attributes(adapter->vdev->unit_address, clr_attr,  					 set_attr, &ret_attr); -		if (ret != H_SUCCESS) { +		if (ret4 != H_SUCCESS) {  			netdev_err(dev, "unable to change IPv4 checksum "  					"offload settings. %d rc=%ld\n", -					data, ret); +					data, ret4); + +			h_illan_attributes(adapter->vdev->unit_address, +					   set_attr, clr_attr, &ret_attr); + +			if (data == 1) +				dev->features &= ~NETIF_F_IP_CSUM; -			ret = h_illan_attributes(adapter->vdev->unit_address, -						 set_attr, clr_attr, &ret_attr);  		} else {  			adapter->fw_ipv4_csum_support = data;  		} @@ -804,15 +810,18 @@ static int ibmveth_set_csum_offload(struct net_device *dev, u32 data)  		if (ret6 != H_SUCCESS) {  			netdev_err(dev, "unable to change IPv6 checksum "  					"offload settings. %d rc=%ld\n", -					data, ret); +					data, ret6); + +			h_illan_attributes(adapter->vdev->unit_address, +					   set_attr6, clr_attr6, &ret_attr); + +			if (data == 1) +				dev->features &= ~NETIF_F_IPV6_CSUM; -			ret = h_illan_attributes(adapter->vdev->unit_address, -						 set_attr6, clr_attr6, -						 &ret_attr);  		} else  			adapter->fw_ipv6_csum_support = data; -		if (ret != H_SUCCESS || ret6 != H_SUCCESS) +		if (ret4 == H_SUCCESS || ret6 == H_SUCCESS)  			adapter->rx_csum = data;  		else  			rc1 = -EIO; @@ -930,6 +939,7 @@ static netdev_tx_t ibmveth_start_xmit(struct sk_buff *skb,  	union ibmveth_buf_desc descs[6];  	int last, i;  	int force_bounce = 0; +	dma_addr_t dma_addr;  	/*  	 * veth handles a maximum of 6 segments including the header, so @@ -994,17 +1004,16 @@ retry_bounce:  	}  	/* Map the header */ -	descs[0].fields.address = dma_map_single(&adapter->vdev->dev, skb->data, -						 skb_headlen(skb), -						 DMA_TO_DEVICE); -	if (dma_mapping_error(&adapter->vdev->dev, descs[0].fields.address)) +	dma_addr = dma_map_single(&adapter->vdev->dev, skb->data, +				  skb_headlen(skb), DMA_TO_DEVICE); +	if (dma_mapping_error(&adapter->vdev->dev, dma_addr))  		goto map_failed;  	descs[0].fields.flags_len = desc_flags | skb_headlen(skb); +	descs[0].fields.address = dma_addr;  	/* Map the frags */  	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { -		unsigned long dma_addr;  		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];  		dma_addr = dma_map_page(&adapter->vdev->dev, frag->page, @@ -1026,7 +1035,12 @@ retry_bounce:  		netdev->stats.tx_bytes += skb->len;  	} -	for (i = 0; i < skb_shinfo(skb)->nr_frags + 1; i++) +	dma_unmap_single(&adapter->vdev->dev, +			 descs[0].fields.address, +			 descs[0].fields.flags_len & IBMVETH_BUF_LEN_MASK, +			 DMA_TO_DEVICE); + +	for (i = 1; i < skb_shinfo(skb)->nr_frags + 1; i++)  		dma_unmap_page(&adapter->vdev->dev, descs[i].fields.address,  			       descs[i].fields.flags_len & IBMVETH_BUF_LEN_MASK,  			       DMA_TO_DEVICE); diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index 22790394318..e1fcc958927 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c @@ -1321,8 +1321,8 @@ static void ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,  		if (ring_is_rsc_enabled(rx_ring))  			pkt_is_rsc = ixgbe_get_rsc_state(rx_desc); -		/* if this is a skb from previous receive DMA will be 0 */ -		if (rx_buffer_info->dma) { +		/* linear means we are building an skb from multiple pages */ +		if (!skb_is_nonlinear(skb)) {  			u16 hlen;  			if (pkt_is_rsc &&  			    !(staterr & IXGBE_RXD_STAT_EOP) && diff --git a/drivers/net/netconsole.c b/drivers/net/netconsole.c index dfc82720065..ed2a3977c6e 100644 --- a/drivers/net/netconsole.c +++ b/drivers/net/netconsole.c @@ -799,5 +799,11 @@ static void __exit cleanup_netconsole(void)  	}  } -module_init(init_netconsole); +/* + * Use late_initcall to ensure netconsole is + * initialized after network device driver if built-in. + * + * late_initcall() and module_init() are identical if built as module. + */ +late_initcall(init_netconsole);  module_exit(cleanup_netconsole); diff --git a/drivers/net/pch_gbe/pch_gbe.h b/drivers/net/pch_gbe/pch_gbe.h index 59fac77d0db..a09a07197eb 100644 --- a/drivers/net/pch_gbe/pch_gbe.h +++ b/drivers/net/pch_gbe/pch_gbe.h @@ -127,8 +127,8 @@ struct pch_gbe_regs {  /* Reset */  #define PCH_GBE_ALL_RST         0x80000000  /* All reset */ -#define PCH_GBE_TX_RST          0x40000000  /* TX MAC, TX FIFO, TX DMA reset */ -#define PCH_GBE_RX_RST          0x04000000  /* RX MAC, RX FIFO, RX DMA reset */ +#define PCH_GBE_TX_RST          0x00008000  /* TX MAC, TX FIFO, TX DMA reset */ +#define PCH_GBE_RX_RST          0x00004000  /* RX MAC, RX FIFO, RX DMA reset */  /* TCP/IP Accelerator Control */  #define PCH_GBE_EX_LIST_EN      0x00000008  /* External List Enable */ @@ -276,6 +276,9 @@ struct pch_gbe_regs {  #define PCH_GBE_RX_DMA_EN       0x00000002   /* Enables Receive DMA */  #define PCH_GBE_TX_DMA_EN       0x00000001   /* Enables Transmission DMA */ +/* RX DMA STATUS */ +#define PCH_GBE_IDLE_CHECK       0xFFFFFFFE +  /* Wake On LAN Status */  #define PCH_GBE_WLS_BR          0x00000008 /* Broadcas Address */  #define PCH_GBE_WLS_MLT         0x00000004 /* Multicast Address */ @@ -471,6 +474,7 @@ struct pch_gbe_tx_desc {  struct pch_gbe_buffer {  	struct sk_buff *skb;  	dma_addr_t dma; +	unsigned char *rx_buffer;  	unsigned long time_stamp;  	u16 length;  	bool mapped; @@ -511,6 +515,9 @@ struct pch_gbe_tx_ring {  struct pch_gbe_rx_ring {  	struct pch_gbe_rx_desc *desc;  	dma_addr_t dma; +	unsigned char *rx_buff_pool; +	dma_addr_t rx_buff_pool_logic; +	unsigned int rx_buff_pool_size;  	unsigned int size;  	unsigned int count;  	unsigned int next_to_use; @@ -622,6 +629,7 @@ struct pch_gbe_adapter {  	unsigned long rx_buffer_len;  	unsigned long tx_queue_len;  	bool have_msi; +	bool rx_stop_flag;  };  extern const char pch_driver_version[]; diff --git a/drivers/net/pch_gbe/pch_gbe_main.c b/drivers/net/pch_gbe/pch_gbe_main.c index eac3c5ca973..567ff10889b 100644 --- a/drivers/net/pch_gbe/pch_gbe_main.c +++ b/drivers/net/pch_gbe/pch_gbe_main.c @@ -20,7 +20,6 @@  #include "pch_gbe.h"  #include "pch_gbe_api.h" -#include <linux/prefetch.h>  #define DRV_VERSION     "1.00"  const char pch_driver_version[] = DRV_VERSION; @@ -34,11 +33,15 @@ const char pch_driver_version[] = DRV_VERSION;  #define PCH_GBE_WATCHDOG_PERIOD		(1 * HZ)	/* watchdog time */  #define PCH_GBE_COPYBREAK_DEFAULT	256  #define PCH_GBE_PCI_BAR			1 +#define PCH_GBE_RESERVE_MEMORY		0x200000	/* 2MB */  /* Macros for ML7223 */  #define PCI_VENDOR_ID_ROHM			0x10db  #define PCI_DEVICE_ID_ROHM_ML7223_GBE		0x8013 +/* Macros for ML7831 */ +#define PCI_DEVICE_ID_ROHM_ML7831_GBE		0x8802 +  #define PCH_GBE_TX_WEIGHT         64  #define PCH_GBE_RX_WEIGHT         64  #define PCH_GBE_RX_BUFFER_WRITE   16 @@ -52,6 +55,7 @@ const char pch_driver_version[] = DRV_VERSION;  	)  /* Ethertype field values */ +#define PCH_GBE_MAX_RX_BUFFER_SIZE      0x2880  #define PCH_GBE_MAX_JUMBO_FRAME_SIZE    10318  #define PCH_GBE_FRAME_SIZE_2048         2048  #define PCH_GBE_FRAME_SIZE_4096         4096 @@ -83,10 +87,12 @@ const char pch_driver_version[] = DRV_VERSION;  #define PCH_GBE_INT_ENABLE_MASK ( \  	PCH_GBE_INT_RX_DMA_CMPLT |    \  	PCH_GBE_INT_RX_DSC_EMP   |    \ +	PCH_GBE_INT_RX_FIFO_ERR  |    \  	PCH_GBE_INT_WOL_DET      |    \  	PCH_GBE_INT_TX_CMPLT          \  	) +#define PCH_GBE_INT_DISABLE_ALL		0  static unsigned int copybreak __read_mostly = PCH_GBE_COPYBREAK_DEFAULT; @@ -138,6 +144,27 @@ static void pch_gbe_wait_clr_bit(void *reg, u32 bit)  	if (!tmp)  		pr_err("Error: busy bit is not cleared\n");  } + +/** + * pch_gbe_wait_clr_bit_irq - Wait to clear a bit for interrupt context + * @reg:	Pointer of register + * @busy:	Busy bit + */ +static int pch_gbe_wait_clr_bit_irq(void *reg, u32 bit) +{ +	u32 tmp; +	int ret = -1; +	/* wait busy */ +	tmp = 20; +	while ((ioread32(reg) & bit) && --tmp) +		udelay(5); +	if (!tmp) +		pr_err("Error: busy bit is not cleared\n"); +	else +		ret = 0; +	return ret; +} +  /**   * pch_gbe_mac_mar_set - Set MAC address register   * @hw:	    Pointer to the HW structure @@ -189,6 +216,17 @@ static void pch_gbe_mac_reset_hw(struct pch_gbe_hw *hw)  	return;  } +static void pch_gbe_mac_reset_rx(struct pch_gbe_hw *hw) +{ +	/* Read the MAC address. and store to the private data */ +	pch_gbe_mac_read_mac_addr(hw); +	iowrite32(PCH_GBE_RX_RST, &hw->reg->RESET); +	pch_gbe_wait_clr_bit_irq(&hw->reg->RESET, PCH_GBE_RX_RST); +	/* Setup the MAC address */ +	pch_gbe_mac_mar_set(hw, hw->mac.addr, 0); +	return; +} +  /**   * pch_gbe_mac_init_rx_addrs - Initialize receive address's   * @hw:	Pointer to the HW structure @@ -671,13 +709,8 @@ static void pch_gbe_setup_rctl(struct pch_gbe_adapter *adapter)  	tcpip = ioread32(&hw->reg->TCPIP_ACC); -	if (netdev->features & NETIF_F_RXCSUM) { -		tcpip &= ~PCH_GBE_RX_TCPIPACC_OFF; -		tcpip |= PCH_GBE_RX_TCPIPACC_EN; -	} else { -		tcpip |= PCH_GBE_RX_TCPIPACC_OFF; -		tcpip &= ~PCH_GBE_RX_TCPIPACC_EN; -	} +	tcpip |= PCH_GBE_RX_TCPIPACC_OFF; +	tcpip &= ~PCH_GBE_RX_TCPIPACC_EN;  	iowrite32(tcpip, &hw->reg->TCPIP_ACC);  	return;  } @@ -717,13 +750,6 @@ static void pch_gbe_configure_rx(struct pch_gbe_adapter *adapter)  	iowrite32(rdba, &hw->reg->RX_DSC_BASE);  	iowrite32(rdlen, &hw->reg->RX_DSC_SIZE);  	iowrite32((rdba + rdlen), &hw->reg->RX_DSC_SW_P); - -	/* Enables Receive DMA */ -	rxdma = ioread32(&hw->reg->DMA_CTRL); -	rxdma |= PCH_GBE_RX_DMA_EN; -	iowrite32(rxdma, &hw->reg->DMA_CTRL); -	/* Enables Receive */ -	iowrite32(PCH_GBE_MRE_MAC_RX_EN, &hw->reg->MAC_RX_EN);  }  /** @@ -1097,6 +1123,48 @@ void pch_gbe_update_stats(struct pch_gbe_adapter *adapter)  	spin_unlock_irqrestore(&adapter->stats_lock, flags);  } +static void pch_gbe_stop_receive(struct pch_gbe_adapter *adapter) +{ +	struct pch_gbe_hw *hw = &adapter->hw; +	u32 rxdma; +	u16 value; +	int ret; + +	/* Disable Receive DMA */ +	rxdma = ioread32(&hw->reg->DMA_CTRL); +	rxdma &= ~PCH_GBE_RX_DMA_EN; +	iowrite32(rxdma, &hw->reg->DMA_CTRL); +	/* Wait Rx DMA BUS is IDLE */ +	ret = pch_gbe_wait_clr_bit_irq(&hw->reg->RX_DMA_ST, PCH_GBE_IDLE_CHECK); +	if (ret) { +		/* Disable Bus master */ +		pci_read_config_word(adapter->pdev, PCI_COMMAND, &value); +		value &= ~PCI_COMMAND_MASTER; +		pci_write_config_word(adapter->pdev, PCI_COMMAND, value); +		/* Stop Receive */ +		pch_gbe_mac_reset_rx(hw); +		/* Enable Bus master */ +		value |= PCI_COMMAND_MASTER; +		pci_write_config_word(adapter->pdev, PCI_COMMAND, value); +	} else { +		/* Stop Receive */ +		pch_gbe_mac_reset_rx(hw); +	} +} + +static void pch_gbe_start_receive(struct pch_gbe_hw *hw) +{ +	u32 rxdma; + +	/* Enables Receive DMA */ +	rxdma = ioread32(&hw->reg->DMA_CTRL); +	rxdma |= PCH_GBE_RX_DMA_EN; +	iowrite32(rxdma, &hw->reg->DMA_CTRL); +	/* Enables Receive */ +	iowrite32(PCH_GBE_MRE_MAC_RX_EN, &hw->reg->MAC_RX_EN); +	return; +} +  /**   * pch_gbe_intr - Interrupt Handler   * @irq:   Interrupt number @@ -1123,7 +1191,15 @@ static irqreturn_t pch_gbe_intr(int irq, void *data)  	if (int_st & PCH_GBE_INT_RX_FRAME_ERR)  		adapter->stats.intr_rx_frame_err_count++;  	if (int_st & PCH_GBE_INT_RX_FIFO_ERR) -		adapter->stats.intr_rx_fifo_err_count++; +		if (!adapter->rx_stop_flag) { +			adapter->stats.intr_rx_fifo_err_count++; +			pr_debug("Rx fifo over run\n"); +			adapter->rx_stop_flag = true; +			int_en = ioread32(&hw->reg->INT_EN); +			iowrite32((int_en & ~PCH_GBE_INT_RX_FIFO_ERR), +				  &hw->reg->INT_EN); +			pch_gbe_stop_receive(adapter); +		}  	if (int_st & PCH_GBE_INT_RX_DMA_ERR)  		adapter->stats.intr_rx_dma_err_count++;  	if (int_st & PCH_GBE_INT_TX_FIFO_ERR) @@ -1135,7 +1211,7 @@ static irqreturn_t pch_gbe_intr(int irq, void *data)  	/* When Rx descriptor is empty  */  	if ((int_st & PCH_GBE_INT_RX_DSC_EMP)) {  		adapter->stats.intr_rx_dsc_empty_count++; -		pr_err("Rx descriptor is empty\n"); +		pr_debug("Rx descriptor is empty\n");  		int_en = ioread32(&hw->reg->INT_EN);  		iowrite32((int_en & ~PCH_GBE_INT_RX_DSC_EMP), &hw->reg->INT_EN);  		if (hw->mac.tx_fc_enable) { @@ -1185,29 +1261,23 @@ pch_gbe_alloc_rx_buffers(struct pch_gbe_adapter *adapter,  	unsigned int i;  	unsigned int bufsz; -	bufsz = adapter->rx_buffer_len + PCH_GBE_DMA_ALIGN; +	bufsz = adapter->rx_buffer_len + NET_IP_ALIGN;  	i = rx_ring->next_to_use;  	while ((cleaned_count--)) {  		buffer_info = &rx_ring->buffer_info[i]; -		skb = buffer_info->skb; -		if (skb) { -			skb_trim(skb, 0); -		} else { -			skb = netdev_alloc_skb(netdev, bufsz); -			if (unlikely(!skb)) { -				/* Better luck next round */ -				adapter->stats.rx_alloc_buff_failed++; -				break; -			} -			/* 64byte align */ -			skb_reserve(skb, PCH_GBE_DMA_ALIGN); - -			buffer_info->skb = skb; -			buffer_info->length = adapter->rx_buffer_len; +		skb = netdev_alloc_skb(netdev, bufsz); +		if (unlikely(!skb)) { +			/* Better luck next round */ +			adapter->stats.rx_alloc_buff_failed++; +			break;  		} +		/* align */ +		skb_reserve(skb, NET_IP_ALIGN); +		buffer_info->skb = skb; +  		buffer_info->dma = dma_map_single(&pdev->dev, -						  skb->data, +						  buffer_info->rx_buffer,  						  buffer_info->length,  						  DMA_FROM_DEVICE);  		if (dma_mapping_error(&adapter->pdev->dev, buffer_info->dma)) { @@ -1240,6 +1310,36 @@ pch_gbe_alloc_rx_buffers(struct pch_gbe_adapter *adapter,  	return;  } +static int +pch_gbe_alloc_rx_buffers_pool(struct pch_gbe_adapter *adapter, +			 struct pch_gbe_rx_ring *rx_ring, int cleaned_count) +{ +	struct pci_dev *pdev = adapter->pdev; +	struct pch_gbe_buffer *buffer_info; +	unsigned int i; +	unsigned int bufsz; +	unsigned int size; + +	bufsz = adapter->rx_buffer_len; + +	size = rx_ring->count * bufsz + PCH_GBE_RESERVE_MEMORY; +	rx_ring->rx_buff_pool = dma_alloc_coherent(&pdev->dev, size, +						&rx_ring->rx_buff_pool_logic, +						GFP_KERNEL); +	if (!rx_ring->rx_buff_pool) { +		pr_err("Unable to allocate memory for the receive poll buffer\n"); +		return -ENOMEM; +	} +	memset(rx_ring->rx_buff_pool, 0, size); +	rx_ring->rx_buff_pool_size = size; +	for (i = 0; i < rx_ring->count; i++) { +		buffer_info = &rx_ring->buffer_info[i]; +		buffer_info->rx_buffer = rx_ring->rx_buff_pool + bufsz * i; +		buffer_info->length = bufsz; +	} +	return 0; +} +  /**   * pch_gbe_alloc_tx_buffers - Allocate transmit buffers   * @adapter:   Board private structure @@ -1380,7 +1480,7 @@ pch_gbe_clean_rx(struct pch_gbe_adapter *adapter,  	unsigned int i;  	unsigned int cleaned_count = 0;  	bool cleaned = false; -	struct sk_buff *skb, *new_skb; +	struct sk_buff *skb;  	u8 dma_status;  	u16 gbec_status;  	u32 tcp_ip_status; @@ -1401,13 +1501,12 @@ pch_gbe_clean_rx(struct pch_gbe_adapter *adapter,  		rx_desc->gbec_status = DSC_INIT16;  		buffer_info = &rx_ring->buffer_info[i];  		skb = buffer_info->skb; +		buffer_info->skb = NULL;  		/* unmap dma */  		dma_unmap_single(&pdev->dev, buffer_info->dma,  				   buffer_info->length, DMA_FROM_DEVICE);  		buffer_info->mapped = false; -		/* Prefetch the packet */ -		prefetch(skb->data);  		pr_debug("RxDecNo = 0x%04x  Status[DMA:0x%02x GBE:0x%04x "  			 "TCP:0x%08x]  BufInf = 0x%p\n", @@ -1427,70 +1526,16 @@ pch_gbe_clean_rx(struct pch_gbe_adapter *adapter,  			pr_err("Receive CRC Error\n");  		} else {  			/* get receive length */ -			/* length convert[-3] */ -			length = (rx_desc->rx_words_eob) - 3; +			/* length convert[-3], length includes FCS length */ +			length = (rx_desc->rx_words_eob) - 3 - ETH_FCS_LEN; +			if (rx_desc->rx_words_eob & 0x02) +				length = length - 4; +			/* +			 * buffer_info->rx_buffer: [Header:14][payload] +			 * skb->data: [Reserve:2][Header:14][payload] +			 */ +			memcpy(skb->data, buffer_info->rx_buffer, length); -			/* Decide the data conversion method */ -			if (!(netdev->features & NETIF_F_RXCSUM)) { -				/* [Header:14][payload] */ -				if (NET_IP_ALIGN) { -					/* Because alignment differs, -					 * the new_skb is newly allocated, -					 * and data is copied to new_skb.*/ -					new_skb = netdev_alloc_skb(netdev, -							 length + NET_IP_ALIGN); -					if (!new_skb) { -						/* dorrop error */ -						pr_err("New skb allocation " -							"Error\n"); -						goto dorrop; -					} -					skb_reserve(new_skb, NET_IP_ALIGN); -					memcpy(new_skb->data, skb->data, -					       length); -					skb = new_skb; -				} else { -					/* DMA buffer is used as SKB as it is.*/ -					buffer_info->skb = NULL; -				} -			} else { -				/* [Header:14][padding:2][payload] */ -				/* The length includes padding length */ -				length = length - PCH_GBE_DMA_PADDING; -				if ((length < copybreak) || -				    (NET_IP_ALIGN != PCH_GBE_DMA_PADDING)) { -					/* Because alignment differs, -					 * the new_skb is newly allocated, -					 * and data is copied to new_skb. -					 * Padding data is deleted -					 * at the time of a copy.*/ -					new_skb = netdev_alloc_skb(netdev, -							 length + NET_IP_ALIGN); -					if (!new_skb) { -						/* dorrop error */ -						pr_err("New skb allocation " -							"Error\n"); -						goto dorrop; -					} -					skb_reserve(new_skb, NET_IP_ALIGN); -					memcpy(new_skb->data, skb->data, -					       ETH_HLEN); -					memcpy(&new_skb->data[ETH_HLEN], -					       &skb->data[ETH_HLEN + -					       PCH_GBE_DMA_PADDING], -					       length - ETH_HLEN); -					skb = new_skb; -				} else { -					/* Padding data is deleted -					 * by moving header data.*/ -					memmove(&skb->data[PCH_GBE_DMA_PADDING], -						&skb->data[0], ETH_HLEN); -					skb_reserve(skb, NET_IP_ALIGN); -					buffer_info->skb = NULL; -				} -			} -			/* The length includes FCS length */ -			length = length - ETH_FCS_LEN;  			/* update status of driver */  			adapter->stats.rx_bytes += length;  			adapter->stats.rx_packets++; @@ -1509,7 +1554,6 @@ pch_gbe_clean_rx(struct pch_gbe_adapter *adapter,  			pr_debug("Receive skb->ip_summed: %d length: %d\n",  				 skb->ip_summed, length);  		} -dorrop:  		/* return some buffers to hardware, one at a time is too slow */  		if (unlikely(cleaned_count >= PCH_GBE_RX_BUFFER_WRITE)) {  			pch_gbe_alloc_rx_buffers(adapter, rx_ring, @@ -1714,9 +1758,15 @@ int pch_gbe_up(struct pch_gbe_adapter *adapter)  		pr_err("Error: can't bring device up\n");  		return err;  	} +	err = pch_gbe_alloc_rx_buffers_pool(adapter, rx_ring, rx_ring->count); +	if (err) { +		pr_err("Error: can't bring device up\n"); +		return err; +	}  	pch_gbe_alloc_tx_buffers(adapter, tx_ring);  	pch_gbe_alloc_rx_buffers(adapter, rx_ring, rx_ring->count);  	adapter->tx_queue_len = netdev->tx_queue_len; +	pch_gbe_start_receive(&adapter->hw);  	mod_timer(&adapter->watchdog_timer, jiffies); @@ -1734,6 +1784,7 @@ int pch_gbe_up(struct pch_gbe_adapter *adapter)  void pch_gbe_down(struct pch_gbe_adapter *adapter)  {  	struct net_device *netdev = adapter->netdev; +	struct pch_gbe_rx_ring *rx_ring = adapter->rx_ring;  	/* signal that we're down so the interrupt handler does not  	 * reschedule our watchdog timer */ @@ -1752,6 +1803,12 @@ void pch_gbe_down(struct pch_gbe_adapter *adapter)  	pch_gbe_reset(adapter);  	pch_gbe_clean_tx_ring(adapter, adapter->tx_ring);  	pch_gbe_clean_rx_ring(adapter, adapter->rx_ring); + +	pci_free_consistent(adapter->pdev, rx_ring->rx_buff_pool_size, +			    rx_ring->rx_buff_pool, rx_ring->rx_buff_pool_logic); +	rx_ring->rx_buff_pool_logic = 0; +	rx_ring->rx_buff_pool_size = 0; +	rx_ring->rx_buff_pool = NULL;  }  /** @@ -2004,6 +2061,8 @@ static int pch_gbe_change_mtu(struct net_device *netdev, int new_mtu)  {  	struct pch_gbe_adapter *adapter = netdev_priv(netdev);  	int max_frame; +	unsigned long old_rx_buffer_len = adapter->rx_buffer_len; +	int err;  	max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;  	if ((max_frame < ETH_ZLEN + ETH_FCS_LEN) || @@ -2018,14 +2077,24 @@ static int pch_gbe_change_mtu(struct net_device *netdev, int new_mtu)  	else if (max_frame <= PCH_GBE_FRAME_SIZE_8192)  		adapter->rx_buffer_len = PCH_GBE_FRAME_SIZE_8192;  	else -		adapter->rx_buffer_len = PCH_GBE_MAX_JUMBO_FRAME_SIZE; -	netdev->mtu = new_mtu; -	adapter->hw.mac.max_frame_size = max_frame; +		adapter->rx_buffer_len = PCH_GBE_MAX_RX_BUFFER_SIZE; -	if (netif_running(netdev)) -		pch_gbe_reinit_locked(adapter); -	else +	if (netif_running(netdev)) { +		pch_gbe_down(adapter); +		err = pch_gbe_up(adapter); +		if (err) { +			adapter->rx_buffer_len = old_rx_buffer_len; +			pch_gbe_up(adapter); +			return -ENOMEM; +		} else { +			netdev->mtu = new_mtu; +			adapter->hw.mac.max_frame_size = max_frame; +		} +	} else {  		pch_gbe_reset(adapter); +		netdev->mtu = new_mtu; +		adapter->hw.mac.max_frame_size = max_frame; +	}  	pr_debug("max_frame : %d  rx_buffer_len : %d  mtu : %d  max_frame_size : %d\n",  		 max_frame, (u32) adapter->rx_buffer_len, netdev->mtu, @@ -2103,6 +2172,7 @@ static int pch_gbe_napi_poll(struct napi_struct *napi, int budget)  	int work_done = 0;  	bool poll_end_flag = false;  	bool cleaned = false; +	u32 int_en;  	pr_debug("budget : %d\n", budget); @@ -2110,8 +2180,15 @@ static int pch_gbe_napi_poll(struct napi_struct *napi, int budget)  	if (!netif_carrier_ok(netdev)) {  		poll_end_flag = true;  	} else { -		cleaned = pch_gbe_clean_tx(adapter, adapter->tx_ring);  		pch_gbe_clean_rx(adapter, adapter->rx_ring, &work_done, budget); +		if (adapter->rx_stop_flag) { +			adapter->rx_stop_flag = false; +			pch_gbe_start_receive(&adapter->hw); +			int_en = ioread32(&adapter->hw.reg->INT_EN); +			iowrite32((int_en | PCH_GBE_INT_RX_FIFO_ERR), +					&adapter->hw.reg->INT_EN); +		} +		cleaned = pch_gbe_clean_tx(adapter, adapter->tx_ring);  		if (cleaned)  			work_done = budget; @@ -2452,6 +2529,13 @@ static DEFINE_PCI_DEVICE_TABLE(pch_gbe_pcidev_id) = {  	 .class = (PCI_CLASS_NETWORK_ETHERNET << 8),  	 .class_mask = (0xFFFF00)  	 }, +	{.vendor = PCI_VENDOR_ID_ROHM, +	 .device = PCI_DEVICE_ID_ROHM_ML7831_GBE, +	 .subvendor = PCI_ANY_ID, +	 .subdevice = PCI_ANY_ID, +	 .class = (PCI_CLASS_NETWORK_ETHERNET << 8), +	 .class_mask = (0xFFFF00) +	 },  	/* required last entry */  	{0}  }; diff --git a/drivers/net/ppp_generic.c b/drivers/net/ppp_generic.c index 10e5d985afa..edfa15d2e79 100644 --- a/drivers/net/ppp_generic.c +++ b/drivers/net/ppp_generic.c @@ -1465,7 +1465,12 @@ static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb)  			continue;  		} -		mtu = pch->chan->mtu - hdrlen; +		/* +		 * hdrlen includes the 2-byte PPP protocol field, but the +		 * MTU counts only the payload excluding the protocol field. +		 * (RFC1661 Section 2) +		 */ +		mtu = pch->chan->mtu - (hdrlen - 2);  		if (mtu < 4)  			mtu = 4;  		if (flen > mtu) diff --git a/drivers/net/pxa168_eth.c b/drivers/net/pxa168_eth.c index 1a3033d8e7e..d17d0624c5e 100644 --- a/drivers/net/pxa168_eth.c +++ b/drivers/net/pxa168_eth.c @@ -40,6 +40,7 @@  #include <linux/clk.h>  #include <linux/phy.h>  #include <linux/io.h> +#include <linux/interrupt.h>  #include <linux/types.h>  #include <asm/pgtable.h>  #include <asm/system.h> diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c index 02339b3352e..c2366701792 100644 --- a/drivers/net/r8169.c +++ b/drivers/net/r8169.c @@ -407,6 +407,7 @@ enum rtl_register_content {  	RxOK		= 0x0001,  	/* RxStatusDesc */ +	RxBOVF	= (1 << 24),  	RxFOVF	= (1 << 23),  	RxRWT	= (1 << 22),  	RxRES	= (1 << 21), @@ -682,6 +683,7 @@ struct rtl8169_private {  	struct mii_if_info mii;  	struct rtl8169_counters counters;  	u32 saved_wolopts; +	u32 opts1_mask;  	struct rtl_fw {  		const struct firmware *fw; @@ -710,6 +712,7 @@ MODULE_FIRMWARE(FIRMWARE_8168D_1);  MODULE_FIRMWARE(FIRMWARE_8168D_2);  MODULE_FIRMWARE(FIRMWARE_8168E_1);  MODULE_FIRMWARE(FIRMWARE_8168E_2); +MODULE_FIRMWARE(FIRMWARE_8168E_3);  MODULE_FIRMWARE(FIRMWARE_8105E_1);  static int rtl8169_open(struct net_device *dev); @@ -3077,6 +3080,14 @@ static void rtl8169_phy_reset(struct net_device *dev,  	netif_err(tp, link, dev, "PHY reset failed\n");  } +static bool rtl_tbi_enabled(struct rtl8169_private *tp) +{ +	void __iomem *ioaddr = tp->mmio_addr; + +	return (tp->mac_version == RTL_GIGA_MAC_VER_01) && +	    (RTL_R8(PHYstatus) & TBI_Enable); +} +  static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp)  {  	void __iomem *ioaddr = tp->mmio_addr; @@ -3109,7 +3120,7 @@ static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp)  			   ADVERTISED_1000baseT_Half |  			   ADVERTISED_1000baseT_Full : 0)); -	if (RTL_R8(PHYstatus) & TBI_Enable) +	if (rtl_tbi_enabled(tp))  		netif_info(tp, link, dev, "TBI auto-negotiating\n");  } @@ -3319,9 +3330,16 @@ static void r810x_phy_power_up(struct rtl8169_private *tp)  static void r810x_pll_power_down(struct rtl8169_private *tp)  { +	void __iomem *ioaddr = tp->mmio_addr; +  	if (__rtl8169_get_wol(tp) & WAKE_ANY) {  		rtl_writephy(tp, 0x1f, 0x0000);  		rtl_writephy(tp, MII_BMCR, 0x0000); + +		if (tp->mac_version == RTL_GIGA_MAC_VER_29 || +		    tp->mac_version == RTL_GIGA_MAC_VER_30) +			RTL_W32(RxConfig, RTL_R32(RxConfig) | AcceptBroadcast | +				AcceptMulticast | AcceptMyPhys);  		return;  	} @@ -3417,7 +3435,8 @@ static void r8168_pll_power_down(struct rtl8169_private *tp)  		rtl_writephy(tp, MII_BMCR, 0x0000);  		if (tp->mac_version == RTL_GIGA_MAC_VER_32 || -		    tp->mac_version == RTL_GIGA_MAC_VER_33) +		    tp->mac_version == RTL_GIGA_MAC_VER_33 || +		    tp->mac_version == RTL_GIGA_MAC_VER_34)  			RTL_W32(RxConfig, RTL_R32(RxConfig) | AcceptBroadcast |  				AcceptMulticast | AcceptMyPhys);  		return; @@ -3727,8 +3746,7 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)  	tp->features |= rtl_try_msi(pdev, ioaddr, cfg);  	RTL_W8(Cfg9346, Cfg9346_Lock); -	if ((tp->mac_version <= RTL_GIGA_MAC_VER_06) && -	    (RTL_R8(PHYstatus) & TBI_Enable)) { +	if (rtl_tbi_enabled(tp)) {  		tp->set_speed = rtl8169_set_speed_tbi;  		tp->get_settings = rtl8169_gset_tbi;  		tp->phy_reset_enable = rtl8169_tbi_reset_enable; @@ -3777,6 +3795,9 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)  	tp->intr_event = cfg->intr_event;  	tp->napi_event = cfg->napi_event; +	tp->opts1_mask = (tp->mac_version != RTL_GIGA_MAC_VER_01) ? +		~(RxBOVF | RxFOVF) : ~0; +  	init_timer(&tp->timer);  	tp->timer.data = (unsigned long) dev;  	tp->timer.function = rtl8169_phy_timer; @@ -3988,6 +4009,7 @@ static void rtl8169_hw_reset(struct rtl8169_private *tp)  		while (RTL_R8(TxPoll) & NPQ)  			udelay(20);  	} else if (tp->mac_version == RTL_GIGA_MAC_VER_34) { +		RTL_W8(ChipCmd, RTL_R8(ChipCmd) | StopReq);  		while (!(RTL_R32(TxConfig) & TXCFG_EMPTY))  			udelay(100);  	} else { @@ -5314,7 +5336,7 @@ static int rtl8169_rx_interrupt(struct net_device *dev,  		u32 status;  		rmb(); -		status = le32_to_cpu(desc->opts1); +		status = le32_to_cpu(desc->opts1) & tp->opts1_mask;  		if (status & DescOwn)  			break; diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c index faca764aa21..b59abc706d9 100644 --- a/drivers/net/sfc/efx.c +++ b/drivers/net/sfc/efx.c @@ -1050,7 +1050,6 @@ static int efx_init_io(struct efx_nic *efx)  {  	struct pci_dev *pci_dev = efx->pci_dev;  	dma_addr_t dma_mask = efx->type->max_dma_mask; -	bool use_wc;  	int rc;  	netif_dbg(efx, probe, efx->net_dev, "initialising I/O\n"); @@ -1101,21 +1100,8 @@ static int efx_init_io(struct efx_nic *efx)  		rc = -EIO;  		goto fail3;  	} - -	/* bug22643: If SR-IOV is enabled then tx push over a write combined -	 * mapping is unsafe. We need to disable write combining in this case. -	 * MSI is unsupported when SR-IOV is enabled, and the firmware will -	 * have removed the MSI capability. So write combining is safe if -	 * there is an MSI capability. -	 */ -	use_wc = (!EFX_WORKAROUND_22643(efx) || -		  pci_find_capability(pci_dev, PCI_CAP_ID_MSI)); -	if (use_wc) -		efx->membase = ioremap_wc(efx->membase_phys, -					  efx->type->mem_map_size); -	else -		efx->membase = ioremap_nocache(efx->membase_phys, -					       efx->type->mem_map_size); +	efx->membase = ioremap_nocache(efx->membase_phys, +				       efx->type->mem_map_size);  	if (!efx->membase) {  		netif_err(efx, probe, efx->net_dev,  			  "could not map memory BAR at %llx+%x\n", diff --git a/drivers/net/sfc/io.h b/drivers/net/sfc/io.h index cc978803d48..751d1ec112c 100644 --- a/drivers/net/sfc/io.h +++ b/drivers/net/sfc/io.h @@ -103,7 +103,6 @@ static inline void efx_writeo(struct efx_nic *efx, efx_oword_t *value,  	_efx_writed(efx, value->u32[2], reg + 8);  	_efx_writed(efx, value->u32[3], reg + 12);  #endif -	wmb();  	mmiowb();  	spin_unlock_irqrestore(&efx->biu_lock, flags);  } @@ -126,7 +125,6 @@ static inline void efx_sram_writeq(struct efx_nic *efx, void __iomem *membase,  	__raw_writel((__force u32)value->u32[0], membase + addr);  	__raw_writel((__force u32)value->u32[1], membase + addr + 4);  #endif -	wmb();  	mmiowb();  	spin_unlock_irqrestore(&efx->biu_lock, flags);  } @@ -141,7 +139,6 @@ static inline void efx_writed(struct efx_nic *efx, efx_dword_t *value,  	/* No lock required */  	_efx_writed(efx, value->u32[0], reg); -	wmb();  }  /* Read a 128-bit CSR, locking as appropriate. */ @@ -152,7 +149,6 @@ static inline void efx_reado(struct efx_nic *efx, efx_oword_t *value,  	spin_lock_irqsave(&efx->biu_lock, flags);  	value->u32[0] = _efx_readd(efx, reg + 0); -	rmb();  	value->u32[1] = _efx_readd(efx, reg + 4);  	value->u32[2] = _efx_readd(efx, reg + 8);  	value->u32[3] = _efx_readd(efx, reg + 12); @@ -175,7 +171,6 @@ static inline void efx_sram_readq(struct efx_nic *efx, void __iomem *membase,  	value->u64[0] = (__force __le64)__raw_readq(membase + addr);  #else  	value->u32[0] = (__force __le32)__raw_readl(membase + addr); -	rmb();  	value->u32[1] = (__force __le32)__raw_readl(membase + addr + 4);  #endif  	spin_unlock_irqrestore(&efx->biu_lock, flags); @@ -249,7 +244,6 @@ static inline void _efx_writeo_page(struct efx_nic *efx, efx_oword_t *value,  	_efx_writed(efx, value->u32[2], reg + 8);  	_efx_writed(efx, value->u32[3], reg + 12);  #endif -	wmb();  }  #define efx_writeo_page(efx, value, reg, page)				\  	_efx_writeo_page(efx, value,					\ diff --git a/drivers/net/sfc/mcdi.c b/drivers/net/sfc/mcdi.c index 3dd45ed61f0..81a42539746 100644 --- a/drivers/net/sfc/mcdi.c +++ b/drivers/net/sfc/mcdi.c @@ -50,20 +50,6 @@ static inline struct efx_mcdi_iface *efx_mcdi(struct efx_nic *efx)  	return &nic_data->mcdi;  } -static inline void -efx_mcdi_readd(struct efx_nic *efx, efx_dword_t *value, unsigned reg) -{ -	struct siena_nic_data *nic_data = efx->nic_data; -	value->u32[0] = (__force __le32)__raw_readl(nic_data->mcdi_smem + reg); -} - -static inline void -efx_mcdi_writed(struct efx_nic *efx, const efx_dword_t *value, unsigned reg) -{ -	struct siena_nic_data *nic_data = efx->nic_data; -	__raw_writel((__force u32)value->u32[0], nic_data->mcdi_smem + reg); -} -  void efx_mcdi_init(struct efx_nic *efx)  {  	struct efx_mcdi_iface *mcdi; @@ -84,8 +70,8 @@ static void efx_mcdi_copyin(struct efx_nic *efx, unsigned cmd,  			    const u8 *inbuf, size_t inlen)  {  	struct efx_mcdi_iface *mcdi = efx_mcdi(efx); -	unsigned pdu = MCDI_PDU(efx); -	unsigned doorbell = MCDI_DOORBELL(efx); +	unsigned pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx); +	unsigned doorbell = FR_CZ_MC_TREG_SMEM + MCDI_DOORBELL(efx);  	unsigned int i;  	efx_dword_t hdr;  	u32 xflags, seqno; @@ -106,28 +92,29 @@ static void efx_mcdi_copyin(struct efx_nic *efx, unsigned cmd,  			     MCDI_HEADER_SEQ, seqno,  			     MCDI_HEADER_XFLAGS, xflags); -	efx_mcdi_writed(efx, &hdr, pdu); +	efx_writed(efx, &hdr, pdu);  	for (i = 0; i < inlen; i += 4) -		efx_mcdi_writed(efx, (const efx_dword_t *)(inbuf + i), -				pdu + 4 + i); +		_efx_writed(efx, *((__le32 *)(inbuf + i)), pdu + 4 + i); + +	/* Ensure the payload is written out before the header */ +	wmb();  	/* ring the doorbell with a distinctive value */ -	EFX_POPULATE_DWORD_1(hdr, EFX_DWORD_0, 0x45789abc); -	efx_mcdi_writed(efx, &hdr, doorbell); +	_efx_writed(efx, (__force __le32) 0x45789abc, doorbell);  }  static void efx_mcdi_copyout(struct efx_nic *efx, u8 *outbuf, size_t outlen)  {  	struct efx_mcdi_iface *mcdi = efx_mcdi(efx); -	unsigned int pdu = MCDI_PDU(efx); +	unsigned int pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx);  	int i;  	BUG_ON(atomic_read(&mcdi->state) == MCDI_STATE_QUIESCENT);  	BUG_ON(outlen & 3 || outlen >= 0x100);  	for (i = 0; i < outlen; i += 4) -		efx_mcdi_readd(efx, (efx_dword_t *)(outbuf + i), pdu + 4 + i); +		*((__le32 *)(outbuf + i)) = _efx_readd(efx, pdu + 4 + i);  }  static int efx_mcdi_poll(struct efx_nic *efx) @@ -135,7 +122,7 @@ static int efx_mcdi_poll(struct efx_nic *efx)  	struct efx_mcdi_iface *mcdi = efx_mcdi(efx);  	unsigned int time, finish;  	unsigned int respseq, respcmd, error; -	unsigned int pdu = MCDI_PDU(efx); +	unsigned int pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx);  	unsigned int rc, spins;  	efx_dword_t reg; @@ -161,7 +148,8 @@ static int efx_mcdi_poll(struct efx_nic *efx)  		time = get_seconds(); -		efx_mcdi_readd(efx, ®, pdu); +		rmb(); +		efx_readd(efx, ®, pdu);  		/* All 1's indicates that shared memory is in reset (and is  		 * not a valid header). Wait for it to come out reset before @@ -188,7 +176,7 @@ static int efx_mcdi_poll(struct efx_nic *efx)  			  respseq, mcdi->seqno);  		rc = EIO;  	} else if (error) { -		efx_mcdi_readd(efx, ®, pdu + 4); +		efx_readd(efx, ®, pdu + 4);  		switch (EFX_DWORD_FIELD(reg, EFX_DWORD_0)) {  #define TRANSLATE_ERROR(name)					\  		case MC_CMD_ERR_ ## name:			\ @@ -222,21 +210,21 @@ out:  /* Test and clear MC-rebooted flag for this port/function */  int efx_mcdi_poll_reboot(struct efx_nic *efx)  { -	unsigned int addr = MCDI_REBOOT_FLAG(efx); +	unsigned int addr = FR_CZ_MC_TREG_SMEM + MCDI_REBOOT_FLAG(efx);  	efx_dword_t reg;  	uint32_t value;  	if (efx_nic_rev(efx) < EFX_REV_SIENA_A0)  		return false; -	efx_mcdi_readd(efx, ®, addr); +	efx_readd(efx, ®, addr);  	value = EFX_DWORD_FIELD(reg, EFX_DWORD_0);  	if (value == 0)  		return 0;  	EFX_ZERO_DWORD(reg); -	efx_mcdi_writed(efx, ®, addr); +	efx_writed(efx, ®, addr);  	if (value == MC_STATUS_DWORD_ASSERT)  		return -EINTR; diff --git a/drivers/net/sfc/nic.c b/drivers/net/sfc/nic.c index bafa23a6874..3edfbaf5f02 100644 --- a/drivers/net/sfc/nic.c +++ b/drivers/net/sfc/nic.c @@ -1936,13 +1936,6 @@ void efx_nic_get_regs(struct efx_nic *efx, void *buf)  		size = min_t(size_t, table->step, 16); -		if (table->offset >= efx->type->mem_map_size) { -			/* No longer mapped; return dummy data */ -			memcpy(buf, "\xde\xc0\xad\xde", 4); -			buf += table->rows * size; -			continue; -		} -  		for (i = 0; i < table->rows; i++) {  			switch (table->step) {  			case 4: /* 32-bit register or SRAM */ diff --git a/drivers/net/sfc/nic.h b/drivers/net/sfc/nic.h index 4bd1f2839df..7443f99c977 100644 --- a/drivers/net/sfc/nic.h +++ b/drivers/net/sfc/nic.h @@ -143,12 +143,10 @@ static inline struct falcon_board *falcon_board(struct efx_nic *efx)  /**   * struct siena_nic_data - Siena NIC state   * @mcdi: Management-Controller-to-Driver Interface - * @mcdi_smem: MCDI shared memory mapping. The mapping is always uncacheable.   * @wol_filter_id: Wake-on-LAN packet filter id   */  struct siena_nic_data {  	struct efx_mcdi_iface mcdi; -	void __iomem *mcdi_smem;  	int wol_filter_id;  }; diff --git a/drivers/net/sfc/siena.c b/drivers/net/sfc/siena.c index 5735e84c69d..2c3bd93fab5 100644 --- a/drivers/net/sfc/siena.c +++ b/drivers/net/sfc/siena.c @@ -250,26 +250,12 @@ static int siena_probe_nic(struct efx_nic *efx)  	efx_reado(efx, ®, FR_AZ_CS_DEBUG);  	efx->net_dev->dev_id = EFX_OWORD_FIELD(reg, FRF_CZ_CS_PORT_NUM) - 1; -	/* Initialise MCDI */ -	nic_data->mcdi_smem = ioremap_nocache(efx->membase_phys + -					      FR_CZ_MC_TREG_SMEM, -					      FR_CZ_MC_TREG_SMEM_STEP * -					      FR_CZ_MC_TREG_SMEM_ROWS); -	if (!nic_data->mcdi_smem) { -		netif_err(efx, probe, efx->net_dev, -			  "could not map MCDI at %llx+%x\n", -			  (unsigned long long)efx->membase_phys + -			  FR_CZ_MC_TREG_SMEM, -			  FR_CZ_MC_TREG_SMEM_STEP * FR_CZ_MC_TREG_SMEM_ROWS); -		rc = -ENOMEM; -		goto fail1; -	}  	efx_mcdi_init(efx);  	/* Recover from a failed assertion before probing */  	rc = efx_mcdi_handle_assertion(efx);  	if (rc) -		goto fail2; +		goto fail1;  	/* Let the BMC know that the driver is now in charge of link and  	 * filter settings. We must do this before we reset the NIC */ @@ -324,7 +310,6 @@ fail4:  fail3:  	efx_mcdi_drv_attach(efx, false, NULL);  fail2: -	iounmap(nic_data->mcdi_smem);  fail1:  	kfree(efx->nic_data);  	return rc; @@ -404,8 +389,6 @@ static int siena_init_nic(struct efx_nic *efx)  static void siena_remove_nic(struct efx_nic *efx)  { -	struct siena_nic_data *nic_data = efx->nic_data; -  	efx_nic_free_buffer(efx, &efx->irq_status);  	siena_reset_hw(efx, RESET_TYPE_ALL); @@ -415,8 +398,7 @@ static void siena_remove_nic(struct efx_nic *efx)  		efx_mcdi_drv_attach(efx, false, NULL);  	/* Tear down the private nic state */ -	iounmap(nic_data->mcdi_smem); -	kfree(nic_data); +	kfree(efx->nic_data);  	efx->nic_data = NULL;  } @@ -656,7 +638,8 @@ const struct efx_nic_type siena_a0_nic_type = {  	.default_mac_ops = &efx_mcdi_mac_operations,  	.revision = EFX_REV_SIENA_A0, -	.mem_map_size = FR_CZ_MC_TREG_SMEM, /* MC_TREG_SMEM mapped separately */ +	.mem_map_size = (FR_CZ_MC_TREG_SMEM + +			 FR_CZ_MC_TREG_SMEM_STEP * FR_CZ_MC_TREG_SMEM_ROWS),  	.txd_ptr_tbl_base = FR_BZ_TX_DESC_PTR_TBL,  	.rxd_ptr_tbl_base = FR_BZ_RX_DESC_PTR_TBL,  	.buf_tbl_base = FR_BZ_BUF_FULL_TBL, diff --git a/drivers/net/sfc/workarounds.h b/drivers/net/sfc/workarounds.h index 99ff11400ce..e4dd3a7f304 100644 --- a/drivers/net/sfc/workarounds.h +++ b/drivers/net/sfc/workarounds.h @@ -38,8 +38,6 @@  #define EFX_WORKAROUND_15783 EFX_WORKAROUND_ALWAYS  /* Legacy interrupt storm when interrupt fifo fills */  #define EFX_WORKAROUND_17213 EFX_WORKAROUND_SIENA -/* Write combining and sriov=enabled are incompatible */ -#define EFX_WORKAROUND_22643 EFX_WORKAROUND_SIENA  /* Spurious parity errors in TSORT buffers */  #define EFX_WORKAROUND_5129 EFX_WORKAROUND_FALCON_A diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c index dc3fbf61910..4a1374df608 100644 --- a/drivers/net/tg3.c +++ b/drivers/net/tg3.c @@ -6234,12 +6234,10 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)  		}  	} -#ifdef BCM_KERNEL_SUPPORTS_8021Q  	if (vlan_tx_tag_present(skb)) {  		base_flags |= TXD_FLAG_VLAN;  		vlan = vlan_tx_tag_get(skb);  	} -#endif  	if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&  	    !mss && skb->len > VLAN_ETH_FRAME_LEN) diff --git a/drivers/net/usb/ipheth.c b/drivers/net/usb/ipheth.c index 15772b1b6a9..13c1f044b40 100644 --- a/drivers/net/usb/ipheth.c +++ b/drivers/net/usb/ipheth.c @@ -59,6 +59,7 @@  #define USB_PRODUCT_IPHONE_3G   0x1292  #define USB_PRODUCT_IPHONE_3GS  0x1294  #define USB_PRODUCT_IPHONE_4	0x1297 +#define USB_PRODUCT_IPHONE_4_VZW 0x129c  #define IPHETH_USBINTF_CLASS    255  #define IPHETH_USBINTF_SUBCLASS 253 @@ -98,6 +99,10 @@ static struct usb_device_id ipheth_table[] = {  		USB_VENDOR_APPLE, USB_PRODUCT_IPHONE_4,  		IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS,  		IPHETH_USBINTF_PROTO) }, +	{ USB_DEVICE_AND_INTERFACE_INFO( +		USB_VENDOR_APPLE, USB_PRODUCT_IPHONE_4_VZW, +		IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS, +		IPHETH_USBINTF_PROTO) },  	{ }  };  MODULE_DEVICE_TABLE(usb, ipheth_table); diff --git a/drivers/net/wireless/ath/ath9k/ar9002_calib.c b/drivers/net/wireless/ath/ath9k/ar9002_calib.c index 2d4c0910295..2d394af8217 100644 --- a/drivers/net/wireless/ath/ath9k/ar9002_calib.c +++ b/drivers/net/wireless/ath/ath9k/ar9002_calib.c @@ -41,7 +41,8 @@ static bool ar9002_hw_is_cal_supported(struct ath_hw *ah,  	case ADC_DC_CAL:  		/* Run ADC Gain Cal for non-CCK & non 2GHz-HT20 only */  		if (!IS_CHAN_B(chan) && -		    !(IS_CHAN_2GHZ(chan) && IS_CHAN_HT20(chan))) +		    !((IS_CHAN_2GHZ(chan) || IS_CHAN_A_FAST_CLOCK(ah, chan)) && +		      IS_CHAN_HT20(chan)))  			supported = true;  		break;  	} diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.c b/drivers/net/wireless/ath/ath9k/ar9003_phy.c index 1baca8e4715..fcafec0605f 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_phy.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.c @@ -671,7 +671,7 @@ static int ar9003_hw_process_ini(struct ath_hw *ah,  		REG_WRITE_ARRAY(&ah->iniModesAdditional,  				modesIndex, regWrites); -	if (AR_SREV_9300(ah)) +	if (AR_SREV_9330(ah))  		REG_WRITE_ARRAY(&ah->iniModesAdditional, 1, regWrites);  	if (AR_SREV_9340(ah) && !ah->is_clk_25mhz) diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c index 6530694a59a..722967b86cf 100644 --- a/drivers/net/wireless/ath/ath9k/main.c +++ b/drivers/net/wireless/ath/ath9k/main.c @@ -2303,6 +2303,12 @@ static void ath9k_flush(struct ieee80211_hw *hw, bool drop)  	mutex_lock(&sc->mutex);  	cancel_delayed_work_sync(&sc->tx_complete_work); +	if (ah->ah_flags & AH_UNPLUGGED) { +		ath_dbg(common, ATH_DBG_ANY, "Device has been unplugged!\n"); +		mutex_unlock(&sc->mutex); +		return; +	} +  	if (sc->sc_flags & SC_OP_INVALID) {  		ath_dbg(common, ATH_DBG_ANY, "Device not present\n");  		mutex_unlock(&sc->mutex); diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c index 26f1ab840cc..e293a7921bf 100644 --- a/drivers/net/wireless/b43/main.c +++ b/drivers/net/wireless/b43/main.c @@ -1632,7 +1632,8 @@ static void handle_irq_beacon(struct b43_wldev *dev)  	u32 cmd, beacon0_valid, beacon1_valid;  	if (!b43_is_mode(wl, NL80211_IFTYPE_AP) && -	    !b43_is_mode(wl, NL80211_IFTYPE_MESH_POINT)) +	    !b43_is_mode(wl, NL80211_IFTYPE_MESH_POINT) && +	    !b43_is_mode(wl, NL80211_IFTYPE_ADHOC))  		return;  	/* This is the bottom half of the asynchronous beacon update. */ diff --git a/drivers/net/wireless/ipw2x00/ipw2100.c b/drivers/net/wireless/ipw2x00/ipw2100.c index 3774dd03474..ef9ad79d1bf 100644 --- a/drivers/net/wireless/ipw2x00/ipw2100.c +++ b/drivers/net/wireless/ipw2x00/ipw2100.c @@ -1903,15 +1903,17 @@ static void ipw2100_down(struct ipw2100_priv *priv)  static int ipw2100_net_init(struct net_device *dev)  {  	struct ipw2100_priv *priv = libipw_priv(dev); + +	return ipw2100_up(priv, 1); +} + +static int ipw2100_wdev_init(struct net_device *dev) +{ +	struct ipw2100_priv *priv = libipw_priv(dev);  	const struct libipw_geo *geo = libipw_get_geo(priv->ieee);  	struct wireless_dev *wdev = &priv->ieee->wdev; -	int ret;  	int i; -	ret = ipw2100_up(priv, 1); -	if (ret) -		return ret; -  	memcpy(wdev->wiphy->perm_addr, priv->mac_addr, ETH_ALEN);  	/* fill-out priv->ieee->bg_band */ @@ -6350,9 +6352,13 @@ static int ipw2100_pci_init_one(struct pci_dev *pci_dev,  		       "Error calling register_netdev.\n");  		goto fail;  	} +	registered = 1; + +	err = ipw2100_wdev_init(dev); +	if (err) +		goto fail;  	mutex_lock(&priv->action_mutex); -	registered = 1;  	IPW_DEBUG_INFO("%s: Bound to %s\n", dev->name, pci_name(pci_dev)); @@ -6389,7 +6395,8 @@ static int ipw2100_pci_init_one(struct pci_dev *pci_dev,        fail_unlock:  	mutex_unlock(&priv->action_mutex); - +	wiphy_unregister(priv->ieee->wdev.wiphy); +	kfree(priv->ieee->bg_band.channels);        fail:  	if (dev) {  		if (registered) diff --git a/drivers/net/wireless/ipw2x00/ipw2200.c b/drivers/net/wireless/ipw2x00/ipw2200.c index 87813c33bdc..4ffebede5e0 100644 --- a/drivers/net/wireless/ipw2x00/ipw2200.c +++ b/drivers/net/wireless/ipw2x00/ipw2200.c @@ -11425,16 +11425,23 @@ static void ipw_bg_down(struct work_struct *work)  /* Called by register_netdev() */  static int ipw_net_init(struct net_device *dev)  { +	int rc = 0; +	struct ipw_priv *priv = libipw_priv(dev); + +	mutex_lock(&priv->mutex); +	if (ipw_up(priv)) +		rc = -EIO; +	mutex_unlock(&priv->mutex); + +	return rc; +} + +static int ipw_wdev_init(struct net_device *dev) +{  	int i, rc = 0;  	struct ipw_priv *priv = libipw_priv(dev);  	const struct libipw_geo *geo = libipw_get_geo(priv->ieee);  	struct wireless_dev *wdev = &priv->ieee->wdev; -	mutex_lock(&priv->mutex); - -	if (ipw_up(priv)) { -		rc = -EIO; -		goto out; -	}  	memcpy(wdev->wiphy->perm_addr, priv->mac_addr, ETH_ALEN); @@ -11519,13 +11526,9 @@ static int ipw_net_init(struct net_device *dev)  	set_wiphy_dev(wdev->wiphy, &priv->pci_dev->dev);  	/* With that information in place, we can now register the wiphy... */ -	if (wiphy_register(wdev->wiphy)) { +	if (wiphy_register(wdev->wiphy))  		rc = -EIO; -		goto out; -	} -  out: -	mutex_unlock(&priv->mutex);  	return rc;  } @@ -11832,14 +11835,22 @@ static int __devinit ipw_pci_probe(struct pci_dev *pdev,  		goto out_remove_sysfs;  	} +	err = ipw_wdev_init(net_dev); +	if (err) { +		IPW_ERROR("failed to register wireless device\n"); +		goto out_unregister_netdev; +	} +  #ifdef CONFIG_IPW2200_PROMISCUOUS  	if (rtap_iface) {  	        err = ipw_prom_alloc(priv);  		if (err) {  			IPW_ERROR("Failed to register promiscuous network "  				  "device (error %d).\n", err); -			unregister_netdev(priv->net_dev); -			goto out_remove_sysfs; +			wiphy_unregister(priv->ieee->wdev.wiphy); +			kfree(priv->ieee->a_band.channels); +			kfree(priv->ieee->bg_band.channels); +			goto out_unregister_netdev;  		}  	}  #endif @@ -11851,6 +11862,8 @@ static int __devinit ipw_pci_probe(struct pci_dev *pdev,  	return 0; +      out_unregister_netdev: +	unregister_netdev(priv->net_dev);        out_remove_sysfs:  	sysfs_remove_group(&pdev->dev.kobj, &ipw_attribute_group);        out_release_irq: diff --git a/drivers/net/wireless/iwlegacy/iwl-3945-rs.c b/drivers/net/wireless/iwlegacy/iwl-3945-rs.c index 977bd2477c6..164bcae821f 100644 --- a/drivers/net/wireless/iwlegacy/iwl-3945-rs.c +++ b/drivers/net/wireless/iwlegacy/iwl-3945-rs.c @@ -822,12 +822,15 @@ static void iwl3945_rs_get_rate(void *priv_r, struct ieee80211_sta *sta,   out: -	rs_sta->last_txrate_idx = index; -	if (sband->band == IEEE80211_BAND_5GHZ) -		info->control.rates[0].idx = rs_sta->last_txrate_idx - -				IWL_FIRST_OFDM_RATE; -	else +	if (sband->band == IEEE80211_BAND_5GHZ) { +		if (WARN_ON_ONCE(index < IWL_FIRST_OFDM_RATE)) +			index = IWL_FIRST_OFDM_RATE; +		rs_sta->last_txrate_idx = index; +		info->control.rates[0].idx = index - IWL_FIRST_OFDM_RATE; +	} else { +		rs_sta->last_txrate_idx = index;  		info->control.rates[0].idx = rs_sta->last_txrate_idx; +	}  	IWL_DEBUG_RATE(priv, "leave: %d\n", index);  } diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c b/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c index a895a099d08..56211006a18 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c +++ b/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c @@ -167,7 +167,7 @@ static int iwlagn_set_temperature_offset_calib(struct iwl_priv *priv)  	memset(&cmd, 0, sizeof(cmd));  	iwl_set_calib_hdr(&cmd.hdr, IWL_PHY_CALIBRATE_TEMP_OFFSET_CMD); -	memcpy(&cmd.radio_sensor_offset, offset_calib, sizeof(offset_calib)); +	memcpy(&cmd.radio_sensor_offset, offset_calib, sizeof(*offset_calib));  	if (!(cmd.radio_sensor_offset))  		cmd.radio_sensor_offset = DEFAULT_RADIO_SENSOR_OFFSET; diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c index b0ae4de7f08..f9c3cd95d61 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn.c +++ b/drivers/net/wireless/iwlwifi/iwl-agn.c @@ -2140,7 +2140,12 @@ static int iwl_mac_setup_register(struct iwl_priv *priv,  		    IEEE80211_HW_SPECTRUM_MGMT |  		    IEEE80211_HW_REPORTS_TX_ACK_STATUS; +	/* +	 * Including the following line will crash some AP's.  This +	 * workaround removes the stimulus which causes the crash until +	 * the AP software can be fixed.  	hw->max_tx_aggregation_subframes = LINK_QUAL_AGG_FRAME_LIMIT_DEF; +	 */  	hw->flags |= IEEE80211_HW_SUPPORTS_PS |  		     IEEE80211_HW_SUPPORTS_DYNAMIC_PS; diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-tx-pcie.c b/drivers/net/wireless/iwlwifi/iwl-trans-tx-pcie.c index a6b2b1db0b1..222d410c586 100644 --- a/drivers/net/wireless/iwlwifi/iwl-trans-tx-pcie.c +++ b/drivers/net/wireless/iwlwifi/iwl-trans-tx-pcie.c @@ -771,6 +771,8 @@ void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)  	cmd = txq->cmd[cmd_index];  	meta = &txq->meta[cmd_index]; +	txq->time_stamp = jiffies; +  	iwlagn_unmap_tfd(priv, meta, &txq->tfds[index], DMA_BIDIRECTIONAL);  	/* Input error checking is done when commands are added to queue. */ diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c index ef67f6786a8..0019dfd8fb0 100644 --- a/drivers/net/wireless/rt2x00/rt2800lib.c +++ b/drivers/net/wireless/rt2x00/rt2800lib.c @@ -3697,14 +3697,15 @@ static void rt2800_efuse_read(struct rt2x00_dev *rt2x00dev, unsigned int i)  	rt2800_regbusy_read(rt2x00dev, EFUSE_CTRL, EFUSE_CTRL_KICK, ®);  	/* Apparently the data is read from end to start */ -	rt2800_register_read_lock(rt2x00dev, EFUSE_DATA3, -					(u32 *)&rt2x00dev->eeprom[i]); -	rt2800_register_read_lock(rt2x00dev, EFUSE_DATA2, -					(u32 *)&rt2x00dev->eeprom[i + 2]); -	rt2800_register_read_lock(rt2x00dev, EFUSE_DATA1, -					(u32 *)&rt2x00dev->eeprom[i + 4]); -	rt2800_register_read_lock(rt2x00dev, EFUSE_DATA0, -					(u32 *)&rt2x00dev->eeprom[i + 6]); +	rt2800_register_read_lock(rt2x00dev, EFUSE_DATA3, ®); +	/* The returned value is in CPU order, but eeprom is le */ +	rt2x00dev->eeprom[i] = cpu_to_le32(reg); +	rt2800_register_read_lock(rt2x00dev, EFUSE_DATA2, ®); +	*(u32 *)&rt2x00dev->eeprom[i + 2] = cpu_to_le32(reg); +	rt2800_register_read_lock(rt2x00dev, EFUSE_DATA1, ®); +	*(u32 *)&rt2x00dev->eeprom[i + 4] = cpu_to_le32(reg); +	rt2800_register_read_lock(rt2x00dev, EFUSE_DATA0, ®); +	*(u32 *)&rt2x00dev->eeprom[i + 6] = cpu_to_le32(reg);  	mutex_unlock(&rt2x00dev->csr_mutex);  } @@ -3870,19 +3871,23 @@ int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)  		return -ENODEV;  	} -	if (!rt2x00_rf(rt2x00dev, RF2820) && -	    !rt2x00_rf(rt2x00dev, RF2850) && -	    !rt2x00_rf(rt2x00dev, RF2720) && -	    !rt2x00_rf(rt2x00dev, RF2750) && -	    !rt2x00_rf(rt2x00dev, RF3020) && -	    !rt2x00_rf(rt2x00dev, RF2020) && -	    !rt2x00_rf(rt2x00dev, RF3021) && -	    !rt2x00_rf(rt2x00dev, RF3022) && -	    !rt2x00_rf(rt2x00dev, RF3052) && -	    !rt2x00_rf(rt2x00dev, RF3320) && -	    !rt2x00_rf(rt2x00dev, RF5370) && -	    !rt2x00_rf(rt2x00dev, RF5390)) { -		ERROR(rt2x00dev, "Invalid RF chipset detected.\n"); +	switch (rt2x00dev->chip.rf) { +	case RF2820: +	case RF2850: +	case RF2720: +	case RF2750: +	case RF3020: +	case RF2020: +	case RF3021: +	case RF3022: +	case RF3052: +	case RF3320: +	case RF5370: +	case RF5390: +		break; +	default: +		ERROR(rt2x00dev, "Invalid RF chipset 0x%x detected.\n", +		      rt2x00dev->chip.rf);  		return -ENODEV;  	} diff --git a/drivers/net/wireless/rtlwifi/core.c b/drivers/net/wireless/rtlwifi/core.c index 1bdc1aa305c..04c4e9eb6ee 100644 --- a/drivers/net/wireless/rtlwifi/core.c +++ b/drivers/net/wireless/rtlwifi/core.c @@ -610,6 +610,11 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,  			mac->link_state = MAC80211_NOLINK;  			memset(mac->bssid, 0, 6); + +			/* reset sec info */ +			rtl_cam_reset_sec_info(hw); + +			rtl_cam_reset_all_entry(hw);  			mac->vendor = PEER_UNKNOWN;  			RT_TRACE(rtlpriv, COMP_MAC80211, DBG_DMESG, @@ -1063,6 +1068,9 @@ static int rtl_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,  		 *or clear all entry here.  		 */  		rtl_cam_delete_one_entry(hw, mac_addr, key_idx); + +		rtl_cam_reset_sec_info(hw); +  		break;  	default:  		RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c index 906e7aa55bc..3e52a549622 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c +++ b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c @@ -549,15 +549,16 @@ void rtl92cu_tx_fill_desc(struct ieee80211_hw *hw,  			       (tcb_desc->rts_use_shortpreamble ? 1 : 0)  			       : (tcb_desc->rts_use_shortgi ? 1 : 0)));  	if (mac->bw_40) { -		if (tcb_desc->packet_bw) { +		if (rate_flag & IEEE80211_TX_RC_DUP_DATA) {  			SET_TX_DESC_DATA_BW(txdesc, 1);  			SET_TX_DESC_DATA_SC(txdesc, 3); +		} else if(rate_flag & IEEE80211_TX_RC_40_MHZ_WIDTH){ +			SET_TX_DESC_DATA_BW(txdesc, 1); +			SET_TX_DESC_DATA_SC(txdesc, mac->cur_40_prime_sc);  		} else {  			SET_TX_DESC_DATA_BW(txdesc, 0); -				if (rate_flag & IEEE80211_TX_RC_DUP_DATA) -					SET_TX_DESC_DATA_SC(txdesc, -							  mac->cur_40_prime_sc); -			} +			SET_TX_DESC_DATA_SC(txdesc, 0); +		}  	} else {  		SET_TX_DESC_DATA_BW(txdesc, 0);  		SET_TX_DESC_DATA_SC(txdesc, 0); diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index b1187ff31d8..f3f94a5c068 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c @@ -1351,7 +1351,8 @@ static int pcie_find_smpss(struct pci_dev *dev, void *data)  	 * will occur as normal.  	 */  	if (dev->is_hotplug_bridge && (!list_is_singular(&dev->bus->devices) || -	    dev->bus->self->pcie_type != PCI_EXP_TYPE_ROOT_PORT)) +	     (dev->bus->self && +	      dev->bus->self->pcie_type != PCI_EXP_TYPE_ROOT_PORT)))  		*smpss = 0;  	if (*smpss > dev->pcie_mpss) diff --git a/drivers/rtc/rtc-imxdi.c b/drivers/rtc/rtc-imxdi.c index 2dd3c016327..d93a9608b1f 100644 --- a/drivers/rtc/rtc-imxdi.c +++ b/drivers/rtc/rtc-imxdi.c @@ -35,6 +35,7 @@  #include <linux/module.h>  #include <linux/platform_device.h>  #include <linux/rtc.h> +#include <linux/sched.h>  #include <linux/workqueue.h>  /* DryIce Register Definitions */ diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c index 4e7c04e773e..7639ab906f0 100644 --- a/drivers/rtc/rtc-s3c.c +++ b/drivers/rtc/rtc-s3c.c @@ -51,6 +51,27 @@ static enum s3c_cpu_type s3c_rtc_cpu_type;  static DEFINE_SPINLOCK(s3c_rtc_pie_lock); +static void s3c_rtc_alarm_clk_enable(bool enable) +{ +	static DEFINE_SPINLOCK(s3c_rtc_alarm_clk_lock); +	static bool alarm_clk_enabled; +	unsigned long irq_flags; + +	spin_lock_irqsave(&s3c_rtc_alarm_clk_lock, irq_flags); +	if (enable) { +		if (!alarm_clk_enabled) { +			clk_enable(rtc_clk); +			alarm_clk_enabled = true; +		} +	} else { +		if (alarm_clk_enabled) { +			clk_disable(rtc_clk); +			alarm_clk_enabled = false; +		} +	} +	spin_unlock_irqrestore(&s3c_rtc_alarm_clk_lock, irq_flags); +} +  /* IRQ Handlers */  static irqreturn_t s3c_rtc_alarmirq(int irq, void *id) @@ -64,6 +85,9 @@ static irqreturn_t s3c_rtc_alarmirq(int irq, void *id)  		writeb(S3C2410_INTP_ALM, s3c_rtc_base + S3C2410_INTP);  	clk_disable(rtc_clk); + +	s3c_rtc_alarm_clk_enable(false); +  	return IRQ_HANDLED;  } @@ -97,6 +121,8 @@ static int s3c_rtc_setaie(struct device *dev, unsigned int enabled)  	writeb(tmp, s3c_rtc_base + S3C2410_RTCALM);  	clk_disable(rtc_clk); +	s3c_rtc_alarm_clk_enable(enabled); +  	return 0;  } diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c index 9ae80cd5953..dba72a4e6a1 100644 --- a/drivers/scsi/bnx2i/bnx2i_hwi.c +++ b/drivers/scsi/bnx2i/bnx2i_hwi.c @@ -563,7 +563,7 @@ int bnx2i_send_iscsi_nopout(struct bnx2i_conn *bnx2i_conn,  	nopout_wqe->itt = ((u16)task->itt |  			   (ISCSI_TASK_TYPE_MPATH <<  			    ISCSI_TMF_REQUEST_TYPE_SHIFT)); -	nopout_wqe->ttt = nopout_hdr->ttt; +	nopout_wqe->ttt = be32_to_cpu(nopout_hdr->ttt);  	nopout_wqe->flags = 0;  	if (!unsol)  		nopout_wqe->flags = ISCSI_NOP_OUT_REQUEST_LOCAL_COMPLETION; diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c index ba710e350ac..5d0e9a24ae9 100644 --- a/drivers/scsi/fcoe/fcoe.c +++ b/drivers/scsi/fcoe/fcoe.c @@ -432,6 +432,8 @@ void fcoe_interface_cleanup(struct fcoe_interface *fcoe)  	u8 flogi_maddr[ETH_ALEN];  	const struct net_device_ops *ops; +	rtnl_lock(); +  	/*  	 * Don't listen for Ethernet packets anymore.  	 * synchronize_net() ensures that the packet handlers are not running @@ -461,6 +463,8 @@ void fcoe_interface_cleanup(struct fcoe_interface *fcoe)  					" specific feature for LLD.\n");  	} +	rtnl_unlock(); +  	/* Release the self-reference taken during fcoe_interface_create() */  	fcoe_interface_put(fcoe);  } @@ -1951,11 +1955,8 @@ static void fcoe_destroy_work(struct work_struct *work)  	fcoe_if_destroy(port->lport);  	/* Do not tear down the fcoe interface for NPIV port */ -	if (!npiv) { -		rtnl_lock(); +	if (!npiv)  		fcoe_interface_cleanup(fcoe); -		rtnl_unlock(); -	}  	mutex_unlock(&fcoe_config_mutex);  } @@ -2009,8 +2010,9 @@ static int fcoe_create(struct net_device *netdev, enum fip_state fip_mode)  		printk(KERN_ERR "fcoe: Failed to create interface (%s)\n",  		       netdev->name);  		rc = -EIO; +		rtnl_unlock();  		fcoe_interface_cleanup(fcoe); -		goto out_nodev; +		goto out_nortnl;  	}  	/* Make this the "master" N_Port */ @@ -2027,6 +2029,7 @@ static int fcoe_create(struct net_device *netdev, enum fip_state fip_mode)  out_nodev:  	rtnl_unlock(); +out_nortnl:  	mutex_unlock(&fcoe_config_mutex);  	return rc;  } diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c index ec61bdb833a..b200b736b00 100644 --- a/drivers/scsi/hpsa.c +++ b/drivers/scsi/hpsa.c @@ -676,6 +676,16 @@ static void hpsa_scsi_replace_entry(struct ctlr_info *h, int hostno,  	BUG_ON(entry < 0 || entry >= HPSA_MAX_SCSI_DEVS_PER_HBA);  	removed[*nremoved] = h->dev[entry];  	(*nremoved)++; + +	/* +	 * New physical devices won't have target/lun assigned yet +	 * so we need to preserve the values in the slot we are replacing. +	 */ +	if (new_entry->target == -1) { +		new_entry->target = h->dev[entry]->target; +		new_entry->lun = h->dev[entry]->lun; +	} +  	h->dev[entry] = new_entry;  	added[*nadded] = new_entry;  	(*nadded)++; @@ -1548,10 +1558,17 @@ static inline void hpsa_set_bus_target_lun(struct hpsa_scsi_dev_t *device,  }  static int hpsa_update_device_info(struct ctlr_info *h, -	unsigned char scsi3addr[], struct hpsa_scsi_dev_t *this_device) +	unsigned char scsi3addr[], struct hpsa_scsi_dev_t *this_device, +	unsigned char *is_OBDR_device)  { -#define OBDR_TAPE_INQ_SIZE 49 + +#define OBDR_SIG_OFFSET 43 +#define OBDR_TAPE_SIG "$DR-10" +#define OBDR_SIG_LEN (sizeof(OBDR_TAPE_SIG) - 1) +#define OBDR_TAPE_INQ_SIZE (OBDR_SIG_OFFSET + OBDR_SIG_LEN) +  	unsigned char *inq_buff; +	unsigned char *obdr_sig;  	inq_buff = kzalloc(OBDR_TAPE_INQ_SIZE, GFP_KERNEL);  	if (!inq_buff) @@ -1583,6 +1600,16 @@ static int hpsa_update_device_info(struct ctlr_info *h,  	else  		this_device->raid_level = RAID_UNKNOWN; +	if (is_OBDR_device) { +		/* See if this is a One-Button-Disaster-Recovery device +		 * by looking for "$DR-10" at offset 43 in inquiry data. +		 */ +		obdr_sig = &inq_buff[OBDR_SIG_OFFSET]; +		*is_OBDR_device = (this_device->devtype == TYPE_ROM && +					strncmp(obdr_sig, OBDR_TAPE_SIG, +						OBDR_SIG_LEN) == 0); +	} +  	kfree(inq_buff);  	return 0; @@ -1716,7 +1743,7 @@ static int add_msa2xxx_enclosure_device(struct ctlr_info *h,  		return 0;  	} -	if (hpsa_update_device_info(h, scsi3addr, this_device)) +	if (hpsa_update_device_info(h, scsi3addr, this_device, NULL))  		return 0;  	(*nmsa2xxx_enclosures)++;  	hpsa_set_bus_target_lun(this_device, bus, target, 0); @@ -1808,7 +1835,6 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)  	 */  	struct ReportLUNdata *physdev_list = NULL;  	struct ReportLUNdata *logdev_list = NULL; -	unsigned char *inq_buff = NULL;  	u32 nphysicals = 0;  	u32 nlogicals = 0;  	u32 ndev_allocated = 0; @@ -1824,11 +1850,9 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)  		GFP_KERNEL);  	physdev_list = kzalloc(reportlunsize, GFP_KERNEL);  	logdev_list = kzalloc(reportlunsize, GFP_KERNEL); -	inq_buff = kmalloc(OBDR_TAPE_INQ_SIZE, GFP_KERNEL);  	tmpdevice = kzalloc(sizeof(*tmpdevice), GFP_KERNEL); -	if (!currentsd || !physdev_list || !logdev_list || -		!inq_buff || !tmpdevice) { +	if (!currentsd || !physdev_list || !logdev_list || !tmpdevice) {  		dev_err(&h->pdev->dev, "out of memory\n");  		goto out;  	} @@ -1863,7 +1887,7 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)  	/* adjust our table of devices */  	nmsa2xxx_enclosures = 0;  	for (i = 0; i < nphysicals + nlogicals + 1; i++) { -		u8 *lunaddrbytes; +		u8 *lunaddrbytes, is_OBDR = 0;  		/* Figure out where the LUN ID info is coming from */  		lunaddrbytes = figure_lunaddrbytes(h, raid_ctlr_position, @@ -1874,7 +1898,8 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)  			continue;  		/* Get device type, vendor, model, device id */ -		if (hpsa_update_device_info(h, lunaddrbytes, tmpdevice)) +		if (hpsa_update_device_info(h, lunaddrbytes, tmpdevice, +							&is_OBDR))  			continue; /* skip it if we can't talk to it. */  		figure_bus_target_lun(h, lunaddrbytes, &bus, &target, &lun,  			tmpdevice); @@ -1898,7 +1923,7 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)  		hpsa_set_bus_target_lun(this_device, bus, target, lun);  		switch (this_device->devtype) { -		case TYPE_ROM: { +		case TYPE_ROM:  			/* We don't *really* support actual CD-ROM devices,  			 * just "One Button Disaster Recovery" tape drive  			 * which temporarily pretends to be a CD-ROM drive. @@ -1906,15 +1931,8 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)  			 * device by checking for "$DR-10" in bytes 43-48 of  			 * the inquiry data.  			 */ -				char obdr_sig[7]; -#define OBDR_TAPE_SIG "$DR-10" -				strncpy(obdr_sig, &inq_buff[43], 6); -				obdr_sig[6] = '\0'; -				if (strncmp(obdr_sig, OBDR_TAPE_SIG, 6) != 0) -					/* Not OBDR device, ignore it. */ -					break; -			} -			ncurrent++; +			if (is_OBDR) +				ncurrent++;  			break;  		case TYPE_DISK:  			if (i < nphysicals) @@ -1947,7 +1965,6 @@ out:  	for (i = 0; i < ndev_allocated; i++)  		kfree(currentsd[i]);  	kfree(currentsd); -	kfree(inq_buff);  	kfree(physdev_list);  	kfree(logdev_list);  } diff --git a/drivers/scsi/isci/host.c b/drivers/scsi/isci/host.c index 26072f1e985..6981b773a88 100644 --- a/drivers/scsi/isci/host.c +++ b/drivers/scsi/isci/host.c @@ -531,6 +531,9 @@ static void sci_controller_process_completions(struct isci_host *ihost)  			break;  		case SCU_COMPLETION_TYPE_EVENT: +			sci_controller_event_completion(ihost, ent); +			break; +  		case SCU_COMPLETION_TYPE_NOTIFY: {  			event_cycle ^= ((event_get+1) & SCU_MAX_EVENTS) <<  				       (SMU_COMPLETION_QUEUE_GET_EVENT_CYCLE_BIT_SHIFT - SCU_MAX_EVENTS_SHIFT); @@ -1091,6 +1094,7 @@ static void isci_host_completion_routine(unsigned long data)  	struct isci_request *request;  	struct isci_request *next_request;  	struct sas_task     *task; +	u16 active;  	INIT_LIST_HEAD(&completed_request_list);  	INIT_LIST_HEAD(&errored_request_list); @@ -1181,6 +1185,13 @@ static void isci_host_completion_routine(unsigned long data)  		}  	} +	/* the coalesence timeout doubles at each encoding step, so +	 * update it based on the ilog2 value of the outstanding requests +	 */ +	active = isci_tci_active(ihost); +	writel(SMU_ICC_GEN_VAL(NUMBER, active) | +	       SMU_ICC_GEN_VAL(TIMER, ISCI_COALESCE_BASE + ilog2(active)), +	       &ihost->smu_registers->interrupt_coalesce_control);  }  /** @@ -1471,7 +1482,7 @@ static void sci_controller_ready_state_enter(struct sci_base_state_machine *sm)  	struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);  	/* set the default interrupt coalescence number and timeout value. */ -	sci_controller_set_interrupt_coalescence(ihost, 0x10, 250); +	sci_controller_set_interrupt_coalescence(ihost, 0, 0);  }  static void sci_controller_ready_state_exit(struct sci_base_state_machine *sm) diff --git a/drivers/scsi/isci/host.h b/drivers/scsi/isci/host.h index 062101a39f7..9f33831a2f0 100644 --- a/drivers/scsi/isci/host.h +++ b/drivers/scsi/isci/host.h @@ -369,6 +369,9 @@ static inline struct isci_host *dev_to_ihost(struct domain_device *dev)  #define ISCI_TAG_SEQ(tag) (((tag) >> 12) & (SCI_MAX_SEQ-1))  #define ISCI_TAG_TCI(tag) ((tag) & (SCI_MAX_IO_REQUESTS-1)) +/* interrupt coalescing baseline: 9 == 3 to 5us interrupt delay per command */ +#define ISCI_COALESCE_BASE 9 +  /* expander attached sata devices require 3 rnc slots */  static inline int sci_remote_device_node_count(struct isci_remote_device *idev)  { diff --git a/drivers/scsi/isci/init.c b/drivers/scsi/isci/init.c index 61e0d09e2b5..29aa34efb0f 100644 --- a/drivers/scsi/isci/init.c +++ b/drivers/scsi/isci/init.c @@ -59,10 +59,19 @@  #include <linux/firmware.h>  #include <linux/efi.h>  #include <asm/string.h> +#include <scsi/scsi_host.h>  #include "isci.h"  #include "task.h"  #include "probe_roms.h" +#define MAJ 1 +#define MIN 0 +#define BUILD 0 +#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \ +	__stringify(BUILD) + +MODULE_VERSION(DRV_VERSION); +  static struct scsi_transport_template *isci_transport_template;  static DEFINE_PCI_DEVICE_TABLE(isci_id_table) = { @@ -113,6 +122,22 @@ unsigned char max_concurr_spinup = 1;  module_param(max_concurr_spinup, byte, 0);  MODULE_PARM_DESC(max_concurr_spinup, "Max concurrent device spinup"); +static ssize_t isci_show_id(struct device *dev, struct device_attribute *attr, char *buf) +{ +	struct Scsi_Host *shost = container_of(dev, typeof(*shost), shost_dev); +	struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(shost); +	struct isci_host *ihost = container_of(sas_ha, typeof(*ihost), sas_ha); + +	return snprintf(buf, PAGE_SIZE, "%d\n", ihost->id); +} + +static DEVICE_ATTR(isci_id, S_IRUGO, isci_show_id, NULL); + +struct device_attribute *isci_host_attrs[] = { +	&dev_attr_isci_id, +	NULL +}; +  static struct scsi_host_template isci_sht = {  	.module				= THIS_MODULE, @@ -138,6 +163,7 @@ static struct scsi_host_template isci_sht = {  	.slave_alloc			= sas_slave_alloc,  	.target_destroy			= sas_target_destroy,  	.ioctl				= sas_ioctl, +	.shost_attrs			= isci_host_attrs,  };  static struct sas_domain_function_template isci_transport_ops  = { @@ -232,17 +258,6 @@ static int isci_register_sas_ha(struct isci_host *isci_host)  	return 0;  } -static ssize_t isci_show_id(struct device *dev, struct device_attribute *attr, char *buf) -{ -	struct Scsi_Host *shost = container_of(dev, typeof(*shost), shost_dev); -	struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(shost); -	struct isci_host *ihost = container_of(sas_ha, typeof(*ihost), sas_ha); - -	return snprintf(buf, PAGE_SIZE, "%d\n", ihost->id); -} - -static DEVICE_ATTR(isci_id, S_IRUGO, isci_show_id, NULL); -  static void isci_unregister(struct isci_host *isci_host)  {  	struct Scsi_Host *shost; @@ -251,7 +266,6 @@ static void isci_unregister(struct isci_host *isci_host)  		return;  	shost = isci_host->shost; -	device_remove_file(&shost->shost_dev, &dev_attr_isci_id);  	sas_unregister_ha(&isci_host->sas_ha); @@ -415,14 +429,8 @@ static struct isci_host *isci_host_alloc(struct pci_dev *pdev, int id)  	if (err)  		goto err_shost_remove; -	err = device_create_file(&shost->shost_dev, &dev_attr_isci_id); -	if (err) -		goto err_unregister_ha; -  	return isci_host; - err_unregister_ha: -	sas_unregister_ha(&(isci_host->sas_ha));   err_shost_remove:  	scsi_remove_host(shost);   err_shost: @@ -540,7 +548,8 @@ static __init int isci_init(void)  {  	int err; -	pr_info("%s: Intel(R) C600 SAS Controller Driver\n", DRV_NAME); +	pr_info("%s: Intel(R) C600 SAS Controller Driver - version %s\n", +		DRV_NAME, DRV_VERSION);  	isci_transport_template = sas_domain_attach_transport(&isci_transport_ops);  	if (!isci_transport_template) diff --git a/drivers/scsi/isci/phy.c b/drivers/scsi/isci/phy.c index 79313a7a235..430fc8ff014 100644 --- a/drivers/scsi/isci/phy.c +++ b/drivers/scsi/isci/phy.c @@ -104,6 +104,7 @@ sci_phy_link_layer_initialization(struct isci_phy *iphy,  	u32 parity_count = 0;  	u32 llctl, link_rate;  	u32 clksm_value = 0; +	u32 sp_timeouts = 0;  	iphy->link_layer_registers = reg; @@ -211,6 +212,18 @@ sci_phy_link_layer_initialization(struct isci_phy *iphy,  	llctl |= SCU_SAS_LLCTL_GEN_VAL(MAX_LINK_RATE, link_rate);  	writel(llctl, &iphy->link_layer_registers->link_layer_control); +	sp_timeouts = readl(&iphy->link_layer_registers->sas_phy_timeouts); + +	/* Clear the default 0x36 (54us) RATE_CHANGE timeout value. */ +	sp_timeouts &= ~SCU_SAS_PHYTOV_GEN_VAL(RATE_CHANGE, 0xFF); + +	/* Set RATE_CHANGE timeout value to 0x3B (59us).  This ensures SCU can +	 * lock with 3Gb drive when SCU max rate is set to 1.5Gb. +	 */ +	sp_timeouts |= SCU_SAS_PHYTOV_GEN_VAL(RATE_CHANGE, 0x3B); + +	writel(sp_timeouts, &iphy->link_layer_registers->sas_phy_timeouts); +  	if (is_a2(ihost->pdev)) {  		/* Program the max ARB time for the PHY to 700us so we inter-operate with  		 * the PMC expander which shuts down PHYs if the expander PHY generates too diff --git a/drivers/scsi/isci/registers.h b/drivers/scsi/isci/registers.h index 9b266c7428e..00afc738bbe 100644 --- a/drivers/scsi/isci/registers.h +++ b/drivers/scsi/isci/registers.h @@ -1299,6 +1299,18 @@ struct scu_transport_layer_registers {  #define SCU_AFE_XCVRCR_OFFSET       0x00DC  #define SCU_AFE_LUTCR_OFFSET        0x00E0 +#define SCU_SAS_PHY_TIMER_TIMEOUT_VALUES_ALIGN_DETECTION_SHIFT          (0UL) +#define SCU_SAS_PHY_TIMER_TIMEOUT_VALUES_ALIGN_DETECTION_MASK           (0x000000FFUL) +#define SCU_SAS_PHY_TIMER_TIMEOUT_VALUES_HOT_PLUG_SHIFT                 (8UL) +#define SCU_SAS_PHY_TIMER_TIMEOUT_VALUES_HOT_PLUG_MASK                  (0x0000FF00UL) +#define SCU_SAS_PHY_TIMER_TIMEOUT_VALUES_COMSAS_DETECTION_SHIFT         (16UL) +#define SCU_SAS_PHY_TIMER_TIMEOUT_VALUES_COMSAS_DETECTION_MASK          (0x00FF0000UL) +#define SCU_SAS_PHY_TIMER_TIMEOUT_VALUES_RATE_CHANGE_SHIFT              (24UL) +#define SCU_SAS_PHY_TIMER_TIMEOUT_VALUES_RATE_CHANGE_MASK               (0xFF000000UL) + +#define SCU_SAS_PHYTOV_GEN_VAL(name, value) \ +	SCU_GEN_VALUE(SCU_SAS_PHY_TIMER_TIMEOUT_VALUES_##name, value) +  #define SCU_SAS_LINK_LAYER_CONTROL_MAX_LINK_RATE_SHIFT                  (0)  #define SCU_SAS_LINK_LAYER_CONTROL_MAX_LINK_RATE_MASK                   (0x00000003)  #define SCU_SAS_LINK_LAYER_CONTROL_MAX_LINK_RATE_GEN1                   (0) diff --git a/drivers/scsi/isci/request.c b/drivers/scsi/isci/request.c index a46e07ac789..b5d3a8c4d32 100644 --- a/drivers/scsi/isci/request.c +++ b/drivers/scsi/isci/request.c @@ -732,12 +732,20 @@ sci_io_request_terminate(struct isci_request *ireq)  		sci_change_state(&ireq->sm, SCI_REQ_ABORTING);  		return SCI_SUCCESS;  	case SCI_REQ_TASK_WAIT_TC_RESP: +		/* The task frame was already confirmed to have been +		 * sent by the SCU HW.  Since the state machine is +		 * now only waiting for the task response itself, +		 * abort the request and complete it immediately +		 * and don't wait for the task response. +		 */  		sci_change_state(&ireq->sm, SCI_REQ_ABORTING);  		sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);  		return SCI_SUCCESS;  	case SCI_REQ_ABORTING: -		sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); -		return SCI_SUCCESS; +		/* If a request has a termination requested twice, return +		 * a failure indication, since HW confirmation of the first +		 * abort is still outstanding. +		 */  	case SCI_REQ_COMPLETED:  	default:  		dev_warn(&ireq->owning_controller->pdev->dev, @@ -2399,22 +2407,19 @@ static void isci_task_save_for_upper_layer_completion(  	}  } -static void isci_request_process_stp_response(struct sas_task *task, -					      void *response_buffer) +static void isci_process_stp_response(struct sas_task *task, struct dev_to_host_fis *fis)  { -	struct dev_to_host_fis *d2h_reg_fis = response_buffer;  	struct task_status_struct *ts = &task->task_status;  	struct ata_task_resp *resp = (void *)&ts->buf[0]; -	resp->frame_len = le16_to_cpu(*(__le16 *)(response_buffer + 6)); -	memcpy(&resp->ending_fis[0], response_buffer + 16, 24); +	resp->frame_len = sizeof(*fis); +	memcpy(resp->ending_fis, fis, sizeof(*fis));  	ts->buf_valid_size = sizeof(*resp); -	/** -	 * If the device fault bit is set in the status register, then +	/* If the device fault bit is set in the status register, then  	 * set the sense data and return.  	 */ -	if (d2h_reg_fis->status & ATA_DF) +	if (fis->status & ATA_DF)  		ts->stat = SAS_PROTO_RESPONSE;  	else  		ts->stat = SAM_STAT_GOOD; @@ -2428,7 +2433,6 @@ static void isci_request_io_request_complete(struct isci_host *ihost,  {  	struct sas_task *task = isci_request_access_task(request);  	struct ssp_response_iu *resp_iu; -	void *resp_buf;  	unsigned long task_flags;  	struct isci_remote_device *idev = isci_lookup_device(task->dev);  	enum service_response response       = SAS_TASK_UNDELIVERED; @@ -2565,9 +2569,7 @@ static void isci_request_io_request_complete(struct isci_host *ihost,  				task);  			if (sas_protocol_ata(task->task_proto)) { -				resp_buf = &request->stp.rsp; -				isci_request_process_stp_response(task, -								  resp_buf); +				isci_process_stp_response(task, &request->stp.rsp);  			} else if (SAS_PROTOCOL_SSP == task->task_proto) {  				/* crack the iu response buffer. */ diff --git a/drivers/scsi/isci/unsolicited_frame_control.c b/drivers/scsi/isci/unsolicited_frame_control.c index e9e1e2abacb..16f88ab939c 100644 --- a/drivers/scsi/isci/unsolicited_frame_control.c +++ b/drivers/scsi/isci/unsolicited_frame_control.c @@ -72,7 +72,7 @@ int sci_unsolicited_frame_control_construct(struct isci_host *ihost)  	 */  	buf_len = SCU_MAX_UNSOLICITED_FRAMES * SCU_UNSOLICITED_FRAME_BUFFER_SIZE;  	header_len = SCU_MAX_UNSOLICITED_FRAMES * sizeof(struct scu_unsolicited_frame_header); -	size = buf_len + header_len + SCU_MAX_UNSOLICITED_FRAMES * sizeof(dma_addr_t); +	size = buf_len + header_len + SCU_MAX_UNSOLICITED_FRAMES * sizeof(uf_control->address_table.array[0]);  	/*  	 * The Unsolicited Frame buffers are set at the start of the UF diff --git a/drivers/scsi/isci/unsolicited_frame_control.h b/drivers/scsi/isci/unsolicited_frame_control.h index 31cb9506f52..75d896686f5 100644 --- a/drivers/scsi/isci/unsolicited_frame_control.h +++ b/drivers/scsi/isci/unsolicited_frame_control.h @@ -214,7 +214,7 @@ struct sci_uf_address_table_array {  	 * starting address of the UF address table.  	 * 64-bit pointers are required by the hardware.  	 */ -	dma_addr_t *array; +	u64 *array;  	/**  	 * This field specifies the physical address location for the UF diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c index 01ff082dc34..d261e982a2f 100644 --- a/drivers/scsi/libfc/fc_exch.c +++ b/drivers/scsi/libfc/fc_exch.c @@ -494,6 +494,9 @@ static int fc_seq_send(struct fc_lport *lport, struct fc_seq *sp,  	 */  	error = lport->tt.frame_send(lport, fp); +	if (fh->fh_type == FC_TYPE_BLS) +		return error; +  	/*  	 * Update the exchange and sequence flags,  	 * assuming all frames for the sequence have been sent. @@ -575,42 +578,35 @@ static void fc_seq_set_resp(struct fc_seq *sp,  }  /** - * fc_seq_exch_abort() - Abort an exchange and sequence - * @req_sp:	The sequence to be aborted + * fc_exch_abort_locked() - Abort an exchange + * @ep:	The exchange to be aborted   * @timer_msec: The period of time to wait before aborting   * - * Generally called because of a timeout or an abort from the upper layer. + * Locking notes:  Called with exch lock held + * + * Return value: 0 on success else error code   */ -static int fc_seq_exch_abort(const struct fc_seq *req_sp, -			     unsigned int timer_msec) +static int fc_exch_abort_locked(struct fc_exch *ep, +				unsigned int timer_msec)  {  	struct fc_seq *sp; -	struct fc_exch *ep;  	struct fc_frame *fp;  	int error; -	ep = fc_seq_exch(req_sp); - -	spin_lock_bh(&ep->ex_lock);  	if (ep->esb_stat & (ESB_ST_COMPLETE | ESB_ST_ABNORMAL) || -	    ep->state & (FC_EX_DONE | FC_EX_RST_CLEANUP)) { -		spin_unlock_bh(&ep->ex_lock); +	    ep->state & (FC_EX_DONE | FC_EX_RST_CLEANUP))  		return -ENXIO; -	}  	/*  	 * Send the abort on a new sequence if possible.  	 */  	sp = fc_seq_start_next_locked(&ep->seq); -	if (!sp) { -		spin_unlock_bh(&ep->ex_lock); +	if (!sp)  		return -ENOMEM; -	}  	ep->esb_stat |= ESB_ST_SEQ_INIT | ESB_ST_ABNORMAL;  	if (timer_msec)  		fc_exch_timer_set_locked(ep, timer_msec); -	spin_unlock_bh(&ep->ex_lock);  	/*  	 * If not logged into the fabric, don't send ABTS but leave @@ -633,6 +629,28 @@ static int fc_seq_exch_abort(const struct fc_seq *req_sp,  }  /** + * fc_seq_exch_abort() - Abort an exchange and sequence + * @req_sp:	The sequence to be aborted + * @timer_msec: The period of time to wait before aborting + * + * Generally called because of a timeout or an abort from the upper layer. + * + * Return value: 0 on success else error code + */ +static int fc_seq_exch_abort(const struct fc_seq *req_sp, +			     unsigned int timer_msec) +{ +	struct fc_exch *ep; +	int error; + +	ep = fc_seq_exch(req_sp); +	spin_lock_bh(&ep->ex_lock); +	error = fc_exch_abort_locked(ep, timer_msec); +	spin_unlock_bh(&ep->ex_lock); +	return error; +} + +/**   * fc_exch_timeout() - Handle exchange timer expiration   * @work: The work_struct identifying the exchange that timed out   */ @@ -1715,6 +1733,7 @@ static void fc_exch_reset(struct fc_exch *ep)  	int rc = 1;  	spin_lock_bh(&ep->ex_lock); +	fc_exch_abort_locked(ep, 0);  	ep->state |= FC_EX_RST_CLEANUP;  	if (cancel_delayed_work(&ep->timeout_work))  		atomic_dec(&ep->ex_refcnt);	/* drop hold for timer */ @@ -1962,6 +1981,7 @@ static struct fc_seq *fc_exch_seq_send(struct fc_lport *lport,  	struct fc_exch *ep;  	struct fc_seq *sp = NULL;  	struct fc_frame_header *fh; +	struct fc_fcp_pkt *fsp = NULL;  	int rc = 1;  	ep = fc_exch_alloc(lport, fp); @@ -1984,8 +2004,10 @@ static struct fc_seq *fc_exch_seq_send(struct fc_lport *lport,  	fc_exch_setup_hdr(ep, fp, ep->f_ctl);  	sp->cnt++; -	if (ep->xid <= lport->lro_xid && fh->fh_r_ctl == FC_RCTL_DD_UNSOL_CMD) +	if (ep->xid <= lport->lro_xid && fh->fh_r_ctl == FC_RCTL_DD_UNSOL_CMD) { +		fsp = fr_fsp(fp);  		fc_fcp_ddp_setup(fr_fsp(fp), ep->xid); +	}  	if (unlikely(lport->tt.frame_send(lport, fp)))  		goto err; @@ -1999,7 +2021,8 @@ static struct fc_seq *fc_exch_seq_send(struct fc_lport *lport,  	spin_unlock_bh(&ep->ex_lock);  	return sp;  err: -	fc_fcp_ddp_done(fr_fsp(fp)); +	if (fsp) +		fc_fcp_ddp_done(fsp);  	rc = fc_exch_done_locked(ep);  	spin_unlock_bh(&ep->ex_lock);  	if (!rc) diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c index afb63c84314..4c41ee816f0 100644 --- a/drivers/scsi/libfc/fc_fcp.c +++ b/drivers/scsi/libfc/fc_fcp.c @@ -2019,6 +2019,11 @@ int fc_eh_abort(struct scsi_cmnd *sc_cmd)  	struct fc_fcp_internal *si;  	int rc = FAILED;  	unsigned long flags; +	int rval; + +	rval = fc_block_scsi_eh(sc_cmd); +	if (rval) +		return rval;  	lport = shost_priv(sc_cmd->device->host);  	if (lport->state != LPORT_ST_READY) @@ -2068,9 +2073,9 @@ int fc_eh_device_reset(struct scsi_cmnd *sc_cmd)  	int rc = FAILED;  	int rval; -	rval = fc_remote_port_chkready(rport); +	rval = fc_block_scsi_eh(sc_cmd);  	if (rval) -		goto out; +		return rval;  	lport = shost_priv(sc_cmd->device->host); @@ -2116,6 +2121,8 @@ int fc_eh_host_reset(struct scsi_cmnd *sc_cmd)  	FC_SCSI_DBG(lport, "Resetting host\n"); +	fc_block_scsi_eh(sc_cmd); +  	lport->tt.lport_reset(lport);  	wait_tmo = jiffies + FC_HOST_RESET_TIMEOUT;  	while (!fc_fcp_lport_queue_ready(lport) && time_before(jiffies, diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c index e55ed9cf23f..628f347404f 100644 --- a/drivers/scsi/libfc/fc_lport.c +++ b/drivers/scsi/libfc/fc_lport.c @@ -88,6 +88,7 @@   */  #include <linux/timer.h> +#include <linux/delay.h>  #include <linux/slab.h>  #include <asm/unaligned.h> @@ -1029,8 +1030,16 @@ static void fc_lport_enter_reset(struct fc_lport *lport)  			   FCH_EVT_LIPRESET, 0);  	fc_vports_linkchange(lport);  	fc_lport_reset_locked(lport); -	if (lport->link_up) +	if (lport->link_up) { +		/* +		 * Wait upto resource allocation time out before +		 * doing re-login since incomplete FIP exchanged +		 * from last session may collide with exchanges +		 * in new session. +		 */ +		msleep(lport->r_a_tov);  		fc_lport_enter_flogi(lport); +	}  }  /** diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c index 7836eb01c7f..a31e05f3bfd 100644 --- a/drivers/scsi/qla2xxx/qla_attr.c +++ b/drivers/scsi/qla2xxx/qla_attr.c @@ -1786,13 +1786,16 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)  			fc_vport_set_state(fc_vport, FC_VPORT_LINKDOWN);  	} -	if ((IS_QLA25XX(ha) || IS_QLA81XX(ha)) && ql2xenabledif) { +	if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) {  		if (ha->fw_attributes & BIT_4) { +			int prot = 0;  			vha->flags.difdix_supported = 1;  			ql_dbg(ql_dbg_user, vha, 0x7082,  			    "Registered for DIF/DIX type 1 and 3 protection.\n"); +			if (ql2xenabledif == 1) +				prot = SHOST_DIX_TYPE0_PROTECTION;  			scsi_host_set_prot(vha->host, -			    SHOST_DIF_TYPE1_PROTECTION +			    prot | SHOST_DIF_TYPE1_PROTECTION  			    | SHOST_DIF_TYPE2_PROTECTION  			    | SHOST_DIF_TYPE3_PROTECTION  			    | SHOST_DIX_TYPE1_PROTECTION diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c index 2155071f310..d79cd8a5f83 100644 --- a/drivers/scsi/qla2xxx/qla_dbg.c +++ b/drivers/scsi/qla2xxx/qla_dbg.c @@ -8,24 +8,24 @@  /*   * Table for showing the current message id in use for particular level   * Change this table for addition of log/debug messages. - * ----------------------------------------------------- - * |             Level            |   Last Value Used  | - * ----------------------------------------------------- - * | Module Init and Probe        |       0x0116       | - * | Mailbox commands             |       0x111e       | - * | Device Discovery             |       0x2083       | - * | Queue Command and IO tracing |       0x302e       | - * | DPC Thread                   |       0x401c       | - * | Async Events                 |       0x5059       | - * | Timer Routines               |       0x600d       | - * | User Space Interactions      |       0x709c       | - * | Task Management              |       0x8043       | - * | AER/EEH                      |       0x900f       | - * | Virtual Port                 |       0xa007       | - * | ISP82XX Specific             |       0xb027       | - * | MultiQ                       |       0xc00b       | - * | Misc                         |       0xd00b       | - * ----------------------------------------------------- + * ---------------------------------------------------------------------- + * |             Level            |   Last Value Used  |     Holes	| + * ---------------------------------------------------------------------- + * | Module Init and Probe        |       0x0116       |  		| + * | Mailbox commands             |       0x1126       |		| + * | Device Discovery             |       0x2083       |		| + * | Queue Command and IO tracing |       0x302e       |     0x3008     | + * | DPC Thread                   |       0x401c       |		| + * | Async Events                 |       0x5059       |		| + * | Timer Routines               |       0x600d       |		| + * | User Space Interactions      |       0x709d       |		| + * | Task Management              |       0x8041       |    		| + * | AER/EEH                      |       0x900f       |		| + * | Virtual Port                 |       0xa007       |		| + * | ISP82XX Specific             |       0xb04f       |    		| + * | MultiQ                       |       0xc00b       |		| + * | Misc                         |       0xd00b       |		| + * ----------------------------------------------------------------------   */  #include "qla_def.h" diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index cc5a79259d3..a03eaf40f37 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h @@ -2529,6 +2529,7 @@ struct qla_hw_data {  #define DT_ISP8021			BIT_14  #define DT_ISP_LAST			(DT_ISP8021 << 1) +#define DT_T10_PI                       BIT_25  #define DT_IIDMA                        BIT_26  #define DT_FWI2                         BIT_27  #define DT_ZIO_SUPPORTED                BIT_28 @@ -2572,6 +2573,7 @@ struct qla_hw_data {  #define IS_NOCACHE_VPD_TYPE(ha)	(IS_QLA81XX(ha))  #define IS_ALOGIO_CAPABLE(ha)	(IS_QLA23XX(ha) || IS_FWI2_CAPABLE(ha)) +#define IS_T10_PI_CAPABLE(ha)   ((ha)->device_type & DT_T10_PI)  #define IS_IIDMA_CAPABLE(ha)    ((ha)->device_type & DT_IIDMA)  #define IS_FWI2_CAPABLE(ha)     ((ha)->device_type & DT_FWI2)  #define IS_ZIO_SUPPORTED(ha)    ((ha)->device_type & DT_ZIO_SUPPORTED) diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h index 691783abfb6..aa69486dc06 100644 --- a/drivers/scsi/qla2xxx/qla_fw.h +++ b/drivers/scsi/qla2xxx/qla_fw.h @@ -537,6 +537,11 @@ struct sts_entry_24xx {  	/*  	 * If DIF Error is set in comp_status, these additional fields are  	 * defined: +	 * +	 * !!! NOTE: Firmware sends expected/actual DIF data in big endian +	 * format; but all of the "data" field gets swab32-d in the beginning +	 * of qla2x00_status_entry(). +	 *  	 * &data[10] : uint8_t report_runt_bg[2];	- computed guard  	 * &data[12] : uint8_t actual_dif[8];		- DIF Data received  	 * &data[20] : uint8_t expected_dif[8];		- DIF Data computed diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index def694271bf..37da04d3db2 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -3838,15 +3838,12 @@ qla2x00_loop_resync(scsi_qla_host_t *vha)  		req = vha->req;  	rsp = req->rsp; -	atomic_set(&vha->loop_state, LOOP_UPDATE);  	clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);  	if (vha->flags.online) {  		if (!(rval = qla2x00_fw_ready(vha))) {  			/* Wait at most MAX_TARGET RSCNs for a stable link. */  			wait_time = 256;  			do { -				atomic_set(&vha->loop_state, LOOP_UPDATE); -  				/* Issue a marker after FW becomes ready. */  				qla2x00_marker(vha, req, rsp, 0, 0,  					MK_SYNC_ALL); diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h index d2e904bc21c..9902834e0b7 100644 --- a/drivers/scsi/qla2xxx/qla_inline.h +++ b/drivers/scsi/qla2xxx/qla_inline.h @@ -102,3 +102,32 @@ qla2x00_set_fcport_state(fc_port_t *fcport, int state)  		    fcport->d_id.b.al_pa);  	}  } + +static inline int +qla2x00_hba_err_chk_enabled(srb_t *sp) +{ +	/* +	 * Uncomment when corresponding SCSI changes are done. +	 * +	if (!sp->cmd->prot_chk) +		return 0; +	 * +	 */ + +	switch (scsi_get_prot_op(sp->cmd)) { +	case SCSI_PROT_READ_STRIP: +	case SCSI_PROT_WRITE_INSERT: +		if (ql2xenablehba_err_chk >= 1) +			return 1; +		break; +	case SCSI_PROT_READ_PASS: +	case SCSI_PROT_WRITE_PASS: +		if (ql2xenablehba_err_chk >= 2) +			return 1; +		break; +	case SCSI_PROT_READ_INSERT: +	case SCSI_PROT_WRITE_STRIP: +		return 1; +	} +	return 0; +} diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c index 49d6906af88..dbec89622a0 100644 --- a/drivers/scsi/qla2xxx/qla_iocb.c +++ b/drivers/scsi/qla2xxx/qla_iocb.c @@ -709,20 +709,28 @@ struct fw_dif_context {   *   */  static inline void -qla24xx_set_t10dif_tags(struct scsi_cmnd *cmd, struct fw_dif_context *pkt, +qla24xx_set_t10dif_tags(srb_t *sp, struct fw_dif_context *pkt,      unsigned int protcnt)  { -	struct sd_dif_tuple *spt; +	struct scsi_cmnd *cmd = sp->cmd;  	scsi_qla_host_t *vha = shost_priv(cmd->device->host); -	unsigned char op = scsi_get_prot_op(cmd);  	switch (scsi_get_prot_type(cmd)) { -	/* For TYPE 0 protection: no checking */  	case SCSI_PROT_DIF_TYPE0: -		pkt->ref_tag_mask[0] = 0x00; -		pkt->ref_tag_mask[1] = 0x00; -		pkt->ref_tag_mask[2] = 0x00; -		pkt->ref_tag_mask[3] = 0x00; +		/* +		 * No check for ql2xenablehba_err_chk, as it would be an +		 * I/O error if hba tag generation is not done. +		 */ +		pkt->ref_tag = cpu_to_le32((uint32_t) +		    (0xffffffff & scsi_get_lba(cmd))); + +		if (!qla2x00_hba_err_chk_enabled(sp)) +			break; + +		pkt->ref_tag_mask[0] = 0xff; +		pkt->ref_tag_mask[1] = 0xff; +		pkt->ref_tag_mask[2] = 0xff; +		pkt->ref_tag_mask[3] = 0xff;  		break;  	/* @@ -730,20 +738,16 @@ qla24xx_set_t10dif_tags(struct scsi_cmnd *cmd, struct fw_dif_context *pkt,  	 * match LBA in CDB + N  	 */  	case SCSI_PROT_DIF_TYPE2: -		if (!ql2xenablehba_err_chk) -			break; - -		if (scsi_prot_sg_count(cmd)) { -			spt = page_address(sg_page(scsi_prot_sglist(cmd))) + -			    scsi_prot_sglist(cmd)[0].offset; -			pkt->app_tag = swab32(spt->app_tag); -			pkt->app_tag_mask[0] =  0xff; -			pkt->app_tag_mask[1] =  0xff; -		} +		pkt->app_tag = __constant_cpu_to_le16(0); +		pkt->app_tag_mask[0] = 0x0; +		pkt->app_tag_mask[1] = 0x0;  		pkt->ref_tag = cpu_to_le32((uint32_t)  		    (0xffffffff & scsi_get_lba(cmd))); +		if (!qla2x00_hba_err_chk_enabled(sp)) +			break; +  		/* enable ALL bytes of the ref tag */  		pkt->ref_tag_mask[0] = 0xff;  		pkt->ref_tag_mask[1] = 0xff; @@ -763,26 +767,15 @@ qla24xx_set_t10dif_tags(struct scsi_cmnd *cmd, struct fw_dif_context *pkt,  	 * 16 bit app tag.  	 */  	case SCSI_PROT_DIF_TYPE1: -		if (!ql2xenablehba_err_chk) +		pkt->ref_tag = cpu_to_le32((uint32_t) +		    (0xffffffff & scsi_get_lba(cmd))); +		pkt->app_tag = __constant_cpu_to_le16(0); +		pkt->app_tag_mask[0] = 0x0; +		pkt->app_tag_mask[1] = 0x0; + +		if (!qla2x00_hba_err_chk_enabled(sp))  			break; -		if (protcnt && (op == SCSI_PROT_WRITE_STRIP || -		    op == SCSI_PROT_WRITE_PASS)) { -			spt = page_address(sg_page(scsi_prot_sglist(cmd))) + -			    scsi_prot_sglist(cmd)[0].offset; -			ql_dbg(ql_dbg_io, vha, 0x3008, -			    "LBA from user %p, lba = 0x%x for cmd=%p.\n", -			    spt, (int)spt->ref_tag, cmd); -			pkt->ref_tag = swab32(spt->ref_tag); -			pkt->app_tag_mask[0] = 0x0; -			pkt->app_tag_mask[1] = 0x0; -		} else { -			pkt->ref_tag = cpu_to_le32((uint32_t) -			    (0xffffffff & scsi_get_lba(cmd))); -			pkt->app_tag = __constant_cpu_to_le16(0); -			pkt->app_tag_mask[0] = 0x0; -			pkt->app_tag_mask[1] = 0x0; -		}  		/* enable ALL bytes of the ref tag */  		pkt->ref_tag_mask[0] = 0xff;  		pkt->ref_tag_mask[1] = 0xff; @@ -798,8 +791,162 @@ qla24xx_set_t10dif_tags(struct scsi_cmnd *cmd, struct fw_dif_context *pkt,  	    scsi_get_prot_type(cmd), cmd);  } +struct qla2_sgx { +	dma_addr_t		dma_addr;	/* OUT */ +	uint32_t		dma_len;	/* OUT */ + +	uint32_t		tot_bytes;	/* IN */ +	struct scatterlist	*cur_sg;	/* IN */ + +	/* for book keeping, bzero on initial invocation */ +	uint32_t		bytes_consumed; +	uint32_t		num_bytes; +	uint32_t		tot_partial; + +	/* for debugging */ +	uint32_t		num_sg; +	srb_t			*sp; +};  static int +qla24xx_get_one_block_sg(uint32_t blk_sz, struct qla2_sgx *sgx, +	uint32_t *partial) +{ +	struct scatterlist *sg; +	uint32_t cumulative_partial, sg_len; +	dma_addr_t sg_dma_addr; + +	if (sgx->num_bytes == sgx->tot_bytes) +		return 0; + +	sg = sgx->cur_sg; +	cumulative_partial = sgx->tot_partial; + +	sg_dma_addr = sg_dma_address(sg); +	sg_len = sg_dma_len(sg); + +	sgx->dma_addr = sg_dma_addr + sgx->bytes_consumed; + +	if ((cumulative_partial + (sg_len - sgx->bytes_consumed)) >= blk_sz) { +		sgx->dma_len = (blk_sz - cumulative_partial); +		sgx->tot_partial = 0; +		sgx->num_bytes += blk_sz; +		*partial = 0; +	} else { +		sgx->dma_len = sg_len - sgx->bytes_consumed; +		sgx->tot_partial += sgx->dma_len; +		*partial = 1; +	} + +	sgx->bytes_consumed += sgx->dma_len; + +	if (sg_len == sgx->bytes_consumed) { +		sg = sg_next(sg); +		sgx->num_sg++; +		sgx->cur_sg = sg; +		sgx->bytes_consumed = 0; +	} + +	return 1; +} + +static int +qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp, +	uint32_t *dsd, uint16_t tot_dsds) +{ +	void *next_dsd; +	uint8_t avail_dsds = 0; +	uint32_t dsd_list_len; +	struct dsd_dma *dsd_ptr; +	struct scatterlist *sg_prot; +	uint32_t *cur_dsd = dsd; +	uint16_t	used_dsds = tot_dsds; + +	uint32_t	prot_int; +	uint32_t	partial; +	struct qla2_sgx sgx; +	dma_addr_t	sle_dma; +	uint32_t	sle_dma_len, tot_prot_dma_len = 0; +	struct scsi_cmnd *cmd = sp->cmd; + +	prot_int = cmd->device->sector_size; + +	memset(&sgx, 0, sizeof(struct qla2_sgx)); +	sgx.tot_bytes = scsi_bufflen(sp->cmd); +	sgx.cur_sg = scsi_sglist(sp->cmd); +	sgx.sp = sp; + +	sg_prot = scsi_prot_sglist(sp->cmd); + +	while (qla24xx_get_one_block_sg(prot_int, &sgx, &partial)) { + +		sle_dma = sgx.dma_addr; +		sle_dma_len = sgx.dma_len; +alloc_and_fill: +		/* Allocate additional continuation packets? */ +		if (avail_dsds == 0) { +			avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ? +					QLA_DSDS_PER_IOCB : used_dsds; +			dsd_list_len = (avail_dsds + 1) * 12; +			used_dsds -= avail_dsds; + +			/* allocate tracking DS */ +			dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC); +			if (!dsd_ptr) +				return 1; + +			/* allocate new list */ +			dsd_ptr->dsd_addr = next_dsd = +			    dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC, +				&dsd_ptr->dsd_list_dma); + +			if (!next_dsd) { +				/* +				 * Need to cleanup only this dsd_ptr, rest +				 * will be done by sp_free_dma() +				 */ +				kfree(dsd_ptr); +				return 1; +			} + +			list_add_tail(&dsd_ptr->list, +			    &((struct crc_context *)sp->ctx)->dsd_list); + +			sp->flags |= SRB_CRC_CTX_DSD_VALID; + +			/* add new list to cmd iocb or last list */ +			*cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma)); +			*cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma)); +			*cur_dsd++ = dsd_list_len; +			cur_dsd = (uint32_t *)next_dsd; +		} +		*cur_dsd++ = cpu_to_le32(LSD(sle_dma)); +		*cur_dsd++ = cpu_to_le32(MSD(sle_dma)); +		*cur_dsd++ = cpu_to_le32(sle_dma_len); +		avail_dsds--; + +		if (partial == 0) { +			/* Got a full protection interval */ +			sle_dma = sg_dma_address(sg_prot) + tot_prot_dma_len; +			sle_dma_len = 8; + +			tot_prot_dma_len += sle_dma_len; +			if (tot_prot_dma_len == sg_dma_len(sg_prot)) { +				tot_prot_dma_len = 0; +				sg_prot = sg_next(sg_prot); +			} + +			partial = 1; /* So as to not re-enter this block */ +			goto alloc_and_fill; +		} +	} +	/* Null termination */ +	*cur_dsd++ = 0; +	*cur_dsd++ = 0; +	*cur_dsd++ = 0; +	return 0; +} +static int  qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,  	uint16_t tot_dsds)  { @@ -981,7 +1128,7 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,  	struct scsi_cmnd	*cmd;  	struct scatterlist	*cur_seg;  	int			sgc; -	uint32_t		total_bytes; +	uint32_t		total_bytes = 0;  	uint32_t		data_bytes;  	uint32_t		dif_bytes;  	uint8_t			bundling = 1; @@ -1023,8 +1170,10 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,  		    __constant_cpu_to_le16(CF_READ_DATA);  	} -	tot_prot_dsds = scsi_prot_sg_count(cmd); -	if (!tot_prot_dsds) +	if ((scsi_get_prot_op(sp->cmd) == SCSI_PROT_READ_INSERT) || +	    (scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_STRIP) || +	    (scsi_get_prot_op(sp->cmd) == SCSI_PROT_READ_STRIP) || +	    (scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_INSERT))  		bundling = 0;  	/* Allocate CRC context from global pool */ @@ -1047,7 +1196,7 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,  	INIT_LIST_HEAD(&crc_ctx_pkt->dsd_list); -	qla24xx_set_t10dif_tags(cmd, (struct fw_dif_context *) +	qla24xx_set_t10dif_tags(sp, (struct fw_dif_context *)  	    &crc_ctx_pkt->ref_tag, tot_prot_dsds);  	cmd_pkt->crc_context_address[0] = cpu_to_le32(LSD(crc_ctx_dma)); @@ -1076,7 +1225,6 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,  		fcp_cmnd->additional_cdb_len |= 2;  	int_to_scsilun(sp->cmd->device->lun, &fcp_cmnd->lun); -	host_to_fcp_swap((uint8_t *)&fcp_cmnd->lun, sizeof(fcp_cmnd->lun));  	memcpy(fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);  	cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(fcp_cmnd_len);  	cmd_pkt->fcp_cmnd_dseg_address[0] = cpu_to_le32( @@ -1107,15 +1255,28 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,  	cmd_pkt->fcp_rsp_dseg_len = 0; /* Let response come in status iocb */  	/* Compute dif len and adjust data len to incude protection */ -	total_bytes = data_bytes;  	dif_bytes = 0;  	blk_size = cmd->device->sector_size; -	if (scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) { -		dif_bytes = (data_bytes / blk_size) * 8; -		total_bytes += dif_bytes; +	dif_bytes = (data_bytes / blk_size) * 8; + +	switch (scsi_get_prot_op(sp->cmd)) { +	case SCSI_PROT_READ_INSERT: +	case SCSI_PROT_WRITE_STRIP: +	    total_bytes = data_bytes; +	    data_bytes += dif_bytes; +	    break; + +	case SCSI_PROT_READ_STRIP: +	case SCSI_PROT_WRITE_INSERT: +	case SCSI_PROT_READ_PASS: +	case SCSI_PROT_WRITE_PASS: +	    total_bytes = data_bytes + dif_bytes; +	    break; +	default: +	    BUG();  	} -	if (!ql2xenablehba_err_chk) +	if (!qla2x00_hba_err_chk_enabled(sp))  		fw_prot_opts |= 0x10; /* Disable Guard tag checking */  	if (!bundling) { @@ -1151,7 +1312,12 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,  	cmd_pkt->control_flags |=  	    __constant_cpu_to_le16(CF_DATA_SEG_DESCR_ENABLE); -	if (qla24xx_walk_and_build_sglist(ha, sp, cur_dsd, + +	if (!bundling && tot_prot_dsds) { +		if (qla24xx_walk_and_build_sglist_no_difb(ha, sp, +		    cur_dsd, tot_dsds)) +			goto crc_queuing_error; +	} else if (qla24xx_walk_and_build_sglist(ha, sp, cur_dsd,  	    (tot_dsds - tot_prot_dsds)))  		goto crc_queuing_error; @@ -1414,6 +1580,22 @@ qla24xx_dif_start_scsi(srb_t *sp)  			goto queuing_error;  		else  			sp->flags |= SRB_DMA_VALID; + +		if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) || +		    (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) { +			struct qla2_sgx sgx; +			uint32_t	partial; + +			memset(&sgx, 0, sizeof(struct qla2_sgx)); +			sgx.tot_bytes = scsi_bufflen(cmd); +			sgx.cur_sg = scsi_sglist(cmd); +			sgx.sp = sp; + +			nseg = 0; +			while (qla24xx_get_one_block_sg( +			    cmd->device->sector_size, &sgx, &partial)) +				nseg++; +		}  	} else  		nseg = 0; @@ -1428,6 +1610,11 @@ qla24xx_dif_start_scsi(srb_t *sp)  			goto queuing_error;  		else  			sp->flags |= SRB_CRC_PROT_DMA_VALID; + +		if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) || +		    (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) { +			nseg = scsi_bufflen(cmd) / cmd->device->sector_size; +		}  	} else {  		nseg = 0;  	} @@ -1454,6 +1641,7 @@ qla24xx_dif_start_scsi(srb_t *sp)  	/* Build header part of command packet (excluding the OPCODE). */  	req->current_outstanding_cmd = handle;  	req->outstanding_cmds[handle] = sp; +	sp->handle = handle;  	sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;  	req->cnt -= req_cnt; diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index b16b7725dee..646fc5263d5 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c @@ -719,7 +719,6 @@ skip_rio:  			vha->flags.rscn_queue_overflow = 1;  		} -		atomic_set(&vha->loop_state, LOOP_UPDATE);  		atomic_set(&vha->loop_down_timer, 0);  		vha->flags.management_server_logged_in = 0; @@ -1435,25 +1434,27 @@ struct scsi_dif_tuple {   * ASC/ASCQ fields in the sense buffer with ILLEGAL_REQUEST   * to indicate to the kernel that the HBA detected error.   */ -static inline void +static inline int  qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24)  {  	struct scsi_qla_host *vha = sp->fcport->vha;  	struct scsi_cmnd *cmd = sp->cmd; -	struct scsi_dif_tuple	*ep = -			(struct scsi_dif_tuple *)&sts24->data[20]; -	struct scsi_dif_tuple	*ap = -			(struct scsi_dif_tuple *)&sts24->data[12]; +	uint8_t		*ap = &sts24->data[12]; +	uint8_t		*ep = &sts24->data[20];  	uint32_t	e_ref_tag, a_ref_tag;  	uint16_t	e_app_tag, a_app_tag;  	uint16_t	e_guard, a_guard; -	e_ref_tag = be32_to_cpu(ep->ref_tag); -	a_ref_tag = be32_to_cpu(ap->ref_tag); -	e_app_tag = be16_to_cpu(ep->app_tag); -	a_app_tag = be16_to_cpu(ap->app_tag); -	e_guard = be16_to_cpu(ep->guard); -	a_guard = be16_to_cpu(ap->guard); +	/* +	 * swab32 of the "data" field in the beginning of qla2x00_status_entry() +	 * would make guard field appear at offset 2 +	 */ +	a_guard   = le16_to_cpu(*(uint16_t *)(ap + 2)); +	a_app_tag = le16_to_cpu(*(uint16_t *)(ap + 0)); +	a_ref_tag = le32_to_cpu(*(uint32_t *)(ap + 4)); +	e_guard   = le16_to_cpu(*(uint16_t *)(ep + 2)); +	e_app_tag = le16_to_cpu(*(uint16_t *)(ep + 0)); +	e_ref_tag = le32_to_cpu(*(uint32_t *)(ep + 4));  	ql_dbg(ql_dbg_io, vha, 0x3023,  	    "iocb(s) %p Returned STATUS.\n", sts24); @@ -1465,6 +1466,63 @@ qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24)  	    cmd->cmnd[0], (u64)scsi_get_lba(cmd), a_ref_tag, e_ref_tag,  	    a_app_tag, e_app_tag, a_guard, e_guard); +	/* +	 * Ignore sector if: +	 * For type     3: ref & app tag is all 'f's +	 * For type 0,1,2: app tag is all 'f's +	 */ +	if ((a_app_tag == 0xffff) && +	    ((scsi_get_prot_type(cmd) != SCSI_PROT_DIF_TYPE3) || +	     (a_ref_tag == 0xffffffff))) { +		uint32_t blocks_done, resid; +		sector_t lba_s = scsi_get_lba(cmd); + +		/* 2TB boundary case covered automatically with this */ +		blocks_done = e_ref_tag - (uint32_t)lba_s + 1; + +		resid = scsi_bufflen(cmd) - (blocks_done * +		    cmd->device->sector_size); + +		scsi_set_resid(cmd, resid); +		cmd->result = DID_OK << 16; + +		/* Update protection tag */ +		if (scsi_prot_sg_count(cmd)) { +			uint32_t i, j = 0, k = 0, num_ent; +			struct scatterlist *sg; +			struct sd_dif_tuple *spt; + +			/* Patch the corresponding protection tags */ +			scsi_for_each_prot_sg(cmd, sg, +			    scsi_prot_sg_count(cmd), i) { +				num_ent = sg_dma_len(sg) / 8; +				if (k + num_ent < blocks_done) { +					k += num_ent; +					continue; +				} +				j = blocks_done - k - 1; +				k = blocks_done; +				break; +			} + +			if (k != blocks_done) { +				qla_printk(KERN_WARNING, sp->fcport->vha->hw, +				    "unexpected tag values tag:lba=%x:%lx)\n", +				    e_ref_tag, lba_s); +				return 1; +			} + +			spt = page_address(sg_page(sg)) + sg->offset; +			spt += j; + +			spt->app_tag = 0xffff; +			if (scsi_get_prot_type(cmd) == SCSI_PROT_DIF_TYPE3) +				spt->ref_tag = 0xffffffff; +		} + +		return 0; +	} +  	/* check guard */  	if (e_guard != a_guard) {  		scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST, @@ -1472,28 +1530,30 @@ qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24)  		set_driver_byte(cmd, DRIVER_SENSE);  		set_host_byte(cmd, DID_ABORT);  		cmd->result |= SAM_STAT_CHECK_CONDITION << 1; -		return; +		return 1;  	} -	/* check appl tag */ -	if (e_app_tag != a_app_tag) { +	/* check ref tag */ +	if (e_ref_tag != a_ref_tag) {  		scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST, -		    0x10, 0x2); +		    0x10, 0x3);  		set_driver_byte(cmd, DRIVER_SENSE);  		set_host_byte(cmd, DID_ABORT);  		cmd->result |= SAM_STAT_CHECK_CONDITION << 1; -		return; +		return 1;  	} -	/* check ref tag */ -	if (e_ref_tag != a_ref_tag) { +	/* check appl tag */ +	if (e_app_tag != a_app_tag) {  		scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST, -		    0x10, 0x3); +		    0x10, 0x2);  		set_driver_byte(cmd, DRIVER_SENSE);  		set_host_byte(cmd, DID_ABORT);  		cmd->result |= SAM_STAT_CHECK_CONDITION << 1; -		return; +		return 1;  	} + +	return 1;  }  /** @@ -1767,7 +1827,7 @@ check_scsi_status:  		break;  	case CS_DIF_ERROR: -		qla2x00_handle_dif_error(sp, sts24); +		logit = qla2x00_handle_dif_error(sp, sts24);  		break;  	default:  		cp->result = DID_ERROR << 16; @@ -2468,11 +2528,10 @@ qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp)  		goto skip_msi;  	} -	if (IS_QLA2432(ha) && (ha->pdev->revision < QLA_MSIX_CHIP_REV_24XX || -		!QLA_MSIX_FW_MODE_1(ha->fw_attributes))) { +	if (IS_QLA2432(ha) && (ha->pdev->revision < QLA_MSIX_CHIP_REV_24XX)) {  		ql_log(ql_log_warn, vha, 0x0035,  		    "MSI-X; Unsupported ISP2432 (0x%X, 0x%X).\n", -		    ha->pdev->revision, ha->fw_attributes); +		    ha->pdev->revision, QLA_MSIX_CHIP_REV_24XX);  		goto skip_msix;  	} diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c index c706ed37000..f488cc69fc7 100644 --- a/drivers/scsi/qla2xxx/qla_mid.c +++ b/drivers/scsi/qla2xxx/qla_mid.c @@ -472,7 +472,7 @@ qla24xx_create_vhost(struct fc_vport *fc_vport)  	host->can_queue = base_vha->req->length + 128;  	host->this_id = 255;  	host->cmd_per_lun = 3; -	if ((IS_QLA25XX(ha) || IS_QLA81XX(ha)) && ql2xenabledif) +	if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif)  		host->max_cmd_len = 32;  	else  		host->max_cmd_len = MAX_CMDSZ; diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c index 5cbf33a50b1..049807cda41 100644 --- a/drivers/scsi/qla2xxx/qla_nx.c +++ b/drivers/scsi/qla2xxx/qla_nx.c @@ -2208,6 +2208,7 @@ qla82xx_msix_rsp_q(int irq, void *dev_id)  	struct qla_hw_data *ha;  	struct rsp_que *rsp;  	struct device_reg_82xx __iomem *reg; +	unsigned long flags;  	rsp = (struct rsp_que *) dev_id;  	if (!rsp) { @@ -2218,11 +2219,11 @@ qla82xx_msix_rsp_q(int irq, void *dev_id)  	ha = rsp->hw;  	reg = &ha->iobase->isp82; -	spin_lock_irq(&ha->hardware_lock); +	spin_lock_irqsave(&ha->hardware_lock, flags);  	vha = pci_get_drvdata(ha->pdev);  	qla24xx_process_response_queue(vha, rsp);  	WRT_REG_DWORD(®->host_int, 0); -	spin_unlock_irq(&ha->hardware_lock); +	spin_unlock_irqrestore(&ha->hardware_lock, flags);  	return IRQ_HANDLED;  } @@ -2838,6 +2839,16 @@ sufficient_dsds:  		int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun);  		host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun)); +		/* build FCP_CMND IU */ +		memset(ctx->fcp_cmnd, 0, sizeof(struct fcp_cmnd)); +		int_to_scsilun(sp->cmd->device->lun, &ctx->fcp_cmnd->lun); +		ctx->fcp_cmnd->additional_cdb_len = additional_cdb_len; + +		if (cmd->sc_data_direction == DMA_TO_DEVICE) +			ctx->fcp_cmnd->additional_cdb_len |= 1; +		else if (cmd->sc_data_direction == DMA_FROM_DEVICE) +			ctx->fcp_cmnd->additional_cdb_len |= 2; +  		/*  		 * Update tagged queuing modifier -- default is TSK_SIMPLE (0).  		 */ @@ -2854,16 +2865,6 @@ sufficient_dsds:  			}  		} -		/* build FCP_CMND IU */ -		memset(ctx->fcp_cmnd, 0, sizeof(struct fcp_cmnd)); -		int_to_scsilun(sp->cmd->device->lun, &ctx->fcp_cmnd->lun); -		ctx->fcp_cmnd->additional_cdb_len = additional_cdb_len; - -		if (cmd->sc_data_direction == DMA_TO_DEVICE) -			ctx->fcp_cmnd->additional_cdb_len |= 1; -		else if (cmd->sc_data_direction == DMA_FROM_DEVICE) -			ctx->fcp_cmnd->additional_cdb_len |= 2; -  		memcpy(ctx->fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);  		fcp_dl = (uint32_t *)(ctx->fcp_cmnd->cdb + 16 + diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index e02df276804..4cace3f20c0 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -106,17 +106,21 @@ MODULE_PARM_DESC(ql2xmaxqdepth,  		"Maximum queue depth to report for target devices.");  /* Do not change the value of this after module load */ -int ql2xenabledif = 1; +int ql2xenabledif = 0;  module_param(ql2xenabledif, int, S_IRUGO|S_IWUSR);  MODULE_PARM_DESC(ql2xenabledif,  		" Enable T10-CRC-DIF " -		" Default is 0 - No DIF Support. 1 - Enable it"); +		" Default is 0 - No DIF Support. 1 - Enable it" +		", 2 - Enable DIF for all types, except Type 0."); -int ql2xenablehba_err_chk; +int ql2xenablehba_err_chk = 2;  module_param(ql2xenablehba_err_chk, int, S_IRUGO|S_IWUSR);  MODULE_PARM_DESC(ql2xenablehba_err_chk, -		" Enable T10-CRC-DIF Error isolation by HBA" -		" Default is 0 - Error isolation disabled, 1 - Enable it"); +		" Enable T10-CRC-DIF Error isolation by HBA:\n" +		" Default is 1.\n" +		"  0 -- Error isolation disabled\n" +		"  1 -- Error isolation enabled only for DIX Type 0\n" +		"  2 -- Error isolation enabled for all Types\n");  int ql2xiidmaenable=1;  module_param(ql2xiidmaenable, int, S_IRUGO); @@ -909,7 +913,14 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)  		    "Abort command mbx success.\n");  		wait = 1;  	} + +	spin_lock_irqsave(&ha->hardware_lock, flags);  	qla2x00_sp_compl(ha, sp); +	spin_unlock_irqrestore(&ha->hardware_lock, flags); + +	/* Did the command return during mailbox execution? */ +	if (ret == FAILED && !CMD_SP(cmd)) +		ret = SUCCESS;  	/* Wait for the command to be returned. */  	if (wait) { @@ -2251,7 +2262,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)  	host->this_id = 255;  	host->cmd_per_lun = 3;  	host->unique_id = host->host_no; -	if ((IS_QLA25XX(ha) || IS_QLA81XX(ha)) && ql2xenabledif) +	if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif)  		host->max_cmd_len = 32;  	else  		host->max_cmd_len = MAX_CMDSZ; @@ -2378,13 +2389,16 @@ skip_dpc:  	    "Detected hba at address=%p.\n",  	    ha); -	if ((IS_QLA25XX(ha) || IS_QLA81XX(ha)) && ql2xenabledif) { +	if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) {  		if (ha->fw_attributes & BIT_4) { +			int prot = 0;  			base_vha->flags.difdix_supported = 1;  			ql_dbg(ql_dbg_init, base_vha, 0x00f1,  			    "Registering for DIF/DIX type 1 and 3 protection.\n"); +			if (ql2xenabledif == 1) +				prot = SHOST_DIX_TYPE0_PROTECTION;  			scsi_host_set_prot(host, -			    SHOST_DIF_TYPE1_PROTECTION +			    prot | SHOST_DIF_TYPE1_PROTECTION  			    | SHOST_DIF_TYPE2_PROTECTION  			    | SHOST_DIF_TYPE3_PROTECTION  			    | SHOST_DIX_TYPE1_PROTECTION diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h index 062c97bf62f..13b6357c1fa 100644 --- a/drivers/scsi/qla2xxx/qla_version.h +++ b/drivers/scsi/qla2xxx/qla_version.h @@ -7,7 +7,7 @@  /*   * Driver version   */ -#define QLA2XXX_VERSION      "8.03.07.03-k" +#define QLA2XXX_VERSION      "8.03.07.07-k"  #define QLA_DRIVER_MAJOR_VER	8  #define QLA_DRIVER_MINOR_VER	3 diff --git a/drivers/staging/comedi/drivers/ni_labpc.c b/drivers/staging/comedi/drivers/ni_labpc.c index 6859af0778c..7611def97d0 100644 --- a/drivers/staging/comedi/drivers/ni_labpc.c +++ b/drivers/staging/comedi/drivers/ni_labpc.c @@ -241,8 +241,10 @@ static int labpc_eeprom_write_insn(struct comedi_device *dev,  				   struct comedi_insn *insn,  				   unsigned int *data);  static void labpc_adc_timing(struct comedi_device *dev, struct comedi_cmd *cmd); -#ifdef CONFIG_COMEDI_PCI +#ifdef CONFIG_ISA_DMA_API  static unsigned int labpc_suggest_transfer_size(struct comedi_cmd cmd); +#endif +#ifdef CONFIG_COMEDI_PCI  static int labpc_find_device(struct comedi_device *dev, int bus, int slot);  #endif  static int labpc_dio_mem_callback(int dir, int port, int data, diff --git a/drivers/staging/zcache/zcache-main.c b/drivers/staging/zcache/zcache-main.c index a3f5162bfed..462fbc20561 100644 --- a/drivers/staging/zcache/zcache-main.c +++ b/drivers/staging/zcache/zcache-main.c @@ -1242,7 +1242,7 @@ static int zcache_pampd_get_data_and_free(char *data, size_t *bufsize, bool raw,  	int ret = 0;  	BUG_ON(!is_ephemeral(pool)); -	zbud_decompress(virt_to_page(data), pampd); +	zbud_decompress((struct page *)(data), pampd);  	zbud_free_and_delist((struct zbud_hdr *)pampd);  	atomic_dec(&zcache_curr_eph_pampd_count);  	return ret; diff --git a/drivers/target/iscsi/iscsi_target_parameters.c b/drivers/target/iscsi/iscsi_target_parameters.c index 497b2e718a7..5b773160200 100644 --- a/drivers/target/iscsi/iscsi_target_parameters.c +++ b/drivers/target/iscsi/iscsi_target_parameters.c @@ -1430,7 +1430,7 @@ static int iscsi_enforce_integrity_rules(  	u8 DataSequenceInOrder = 0;  	u8 ErrorRecoveryLevel = 0, SessionType = 0;  	u8 IFMarker = 0, OFMarker = 0; -	u8 IFMarkInt_Reject = 0, OFMarkInt_Reject = 0; +	u8 IFMarkInt_Reject = 1, OFMarkInt_Reject = 1;  	u32 FirstBurstLength = 0, MaxBurstLength = 0;  	struct iscsi_param *param = NULL; diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c index a0d23bc0fc9..f00137f377b 100644 --- a/drivers/target/iscsi/iscsi_target_util.c +++ b/drivers/target/iscsi/iscsi_target_util.c @@ -875,40 +875,6 @@ void iscsit_inc_session_usage_count(struct iscsi_session *sess)  }  /* - *	Used before iscsi_do[rx,tx]_data() to determine iov and [rx,tx]_marker - *	array counts needed for sync and steering. - */ -static int iscsit_determine_sync_and_steering_counts( -	struct iscsi_conn *conn, -	struct iscsi_data_count *count) -{ -	u32 length = count->data_length; -	u32 marker, markint; - -	count->sync_and_steering = 1; - -	marker = (count->type == ISCSI_RX_DATA) ? -			conn->of_marker : conn->if_marker; -	markint = (count->type == ISCSI_RX_DATA) ? -			(conn->conn_ops->OFMarkInt * 4) : -			(conn->conn_ops->IFMarkInt * 4); -	count->ss_iov_count = count->iov_count; - -	while (length > 0) { -		if (length >= marker) { -			count->ss_iov_count += 3; -			count->ss_marker_count += 2; - -			length -= marker; -			marker = markint; -		} else -			length = 0; -	} - -	return 0; -} - -/*   *	Setup conn->if_marker and conn->of_marker values based upon   *	the initial marker-less interval. (see iSCSI v19 A.2)   */ @@ -1290,7 +1256,7 @@ int iscsit_fe_sendpage_sg(  	struct kvec iov;  	u32 tx_hdr_size, data_len;  	u32 offset = cmd->first_data_sg_off; -	int tx_sent; +	int tx_sent, iov_off;  send_hdr:  	tx_hdr_size = ISCSI_HDR_LEN; @@ -1310,9 +1276,19 @@ send_hdr:  	}  	data_len = cmd->tx_size - tx_hdr_size - cmd->padding; -	if (conn->conn_ops->DataDigest) +	/* +	 * Set iov_off used by padding and data digest tx_data() calls below +	 * in order to determine proper offset into cmd->iov_data[] +	 */ +	if (conn->conn_ops->DataDigest) {  		data_len -= ISCSI_CRC_LEN; - +		if (cmd->padding) +			iov_off = (cmd->iov_data_count - 2); +		else +			iov_off = (cmd->iov_data_count - 1); +	} else { +		iov_off = (cmd->iov_data_count - 1); +	}  	/*  	 * Perform sendpage() for each page in the scatterlist  	 */ @@ -1341,8 +1317,7 @@ send_pg:  send_padding:  	if (cmd->padding) { -		struct kvec *iov_p = -			&cmd->iov_data[cmd->iov_data_count-1]; +		struct kvec *iov_p = &cmd->iov_data[iov_off++];  		tx_sent = tx_data(conn, iov_p, 1, cmd->padding);  		if (cmd->padding != tx_sent) { @@ -1356,8 +1331,7 @@ send_padding:  send_datacrc:  	if (conn->conn_ops->DataDigest) { -		struct kvec *iov_d = -			&cmd->iov_data[cmd->iov_data_count]; +		struct kvec *iov_d = &cmd->iov_data[iov_off];  		tx_sent = tx_data(conn, iov_d, 1, ISCSI_CRC_LEN);  		if (ISCSI_CRC_LEN != tx_sent) { @@ -1431,8 +1405,7 @@ static int iscsit_do_rx_data(  	struct iscsi_data_count *count)  {  	int data = count->data_length, rx_loop = 0, total_rx = 0, iov_len; -	u32 rx_marker_val[count->ss_marker_count], rx_marker_iov = 0; -	struct kvec iov[count->ss_iov_count], *iov_p; +	struct kvec *iov_p;  	struct msghdr msg;  	if (!conn || !conn->sock || !conn->conn_ops) @@ -1440,93 +1413,8 @@ static int iscsit_do_rx_data(  	memset(&msg, 0, sizeof(struct msghdr)); -	if (count->sync_and_steering) { -		int size = 0; -		u32 i, orig_iov_count = 0; -		u32 orig_iov_len = 0, orig_iov_loc = 0; -		u32 iov_count = 0, per_iov_bytes = 0; -		u32 *rx_marker, old_rx_marker = 0; -		struct kvec *iov_record; - -		memset(&rx_marker_val, 0, -				count->ss_marker_count * sizeof(u32)); -		memset(&iov, 0, count->ss_iov_count * sizeof(struct kvec)); - -		iov_record = count->iov; -		orig_iov_count = count->iov_count; -		rx_marker = &conn->of_marker; - -		i = 0; -		size = data; -		orig_iov_len = iov_record[orig_iov_loc].iov_len; -		while (size > 0) { -			pr_debug("rx_data: #1 orig_iov_len %u," -			" orig_iov_loc %u\n", orig_iov_len, orig_iov_loc); -			pr_debug("rx_data: #2 rx_marker %u, size" -				" %u\n", *rx_marker, size); - -			if (orig_iov_len >= *rx_marker) { -				iov[iov_count].iov_len = *rx_marker; -				iov[iov_count++].iov_base = -					(iov_record[orig_iov_loc].iov_base + -						per_iov_bytes); - -				iov[iov_count].iov_len = (MARKER_SIZE / 2); -				iov[iov_count++].iov_base = -					&rx_marker_val[rx_marker_iov++]; -				iov[iov_count].iov_len = (MARKER_SIZE / 2); -				iov[iov_count++].iov_base = -					&rx_marker_val[rx_marker_iov++]; -				old_rx_marker = *rx_marker; - -				/* -				 * OFMarkInt is in 32-bit words. -				 */ -				*rx_marker = (conn->conn_ops->OFMarkInt * 4); -				size -= old_rx_marker; -				orig_iov_len -= old_rx_marker; -				per_iov_bytes += old_rx_marker; - -				pr_debug("rx_data: #3 new_rx_marker" -					" %u, size %u\n", *rx_marker, size); -			} else { -				iov[iov_count].iov_len = orig_iov_len; -				iov[iov_count++].iov_base = -					(iov_record[orig_iov_loc].iov_base + -						per_iov_bytes); - -				per_iov_bytes = 0; -				*rx_marker -= orig_iov_len; -				size -= orig_iov_len; - -				if (size) -					orig_iov_len = -					iov_record[++orig_iov_loc].iov_len; - -				pr_debug("rx_data: #4 new_rx_marker" -					" %u, size %u\n", *rx_marker, size); -			} -		} -		data += (rx_marker_iov * (MARKER_SIZE / 2)); - -		iov_p	= &iov[0]; -		iov_len	= iov_count; - -		if (iov_count > count->ss_iov_count) { -			pr_err("iov_count: %d, count->ss_iov_count:" -				" %d\n", iov_count, count->ss_iov_count); -			return -1; -		} -		if (rx_marker_iov > count->ss_marker_count) { -			pr_err("rx_marker_iov: %d, count->ss_marker" -				"_count: %d\n", rx_marker_iov, -				count->ss_marker_count); -			return -1; -		} -	} else { -		iov_p = count->iov; -		iov_len	= count->iov_count; -	} +	iov_p = count->iov; +	iov_len	= count->iov_count;  	while (total_rx < data) {  		rx_loop = kernel_recvmsg(conn->sock, &msg, iov_p, iov_len, @@ -1541,16 +1429,6 @@ static int iscsit_do_rx_data(  				rx_loop, total_rx, data);  	} -	if (count->sync_and_steering) { -		int j; -		for (j = 0; j < rx_marker_iov; j++) { -			pr_debug("rx_data: #5 j: %d, offset: %d\n", -				j, rx_marker_val[j]); -			conn->of_marker_offset = rx_marker_val[j]; -		} -		total_rx -= (rx_marker_iov * (MARKER_SIZE / 2)); -	} -  	return total_rx;  } @@ -1559,8 +1437,7 @@ static int iscsit_do_tx_data(  	struct iscsi_data_count *count)  {  	int data = count->data_length, total_tx = 0, tx_loop = 0, iov_len; -	u32 tx_marker_val[count->ss_marker_count], tx_marker_iov = 0; -	struct kvec iov[count->ss_iov_count], *iov_p; +	struct kvec *iov_p;  	struct msghdr msg;  	if (!conn || !conn->sock || !conn->conn_ops) @@ -1573,98 +1450,8 @@ static int iscsit_do_tx_data(  	memset(&msg, 0, sizeof(struct msghdr)); -	if (count->sync_and_steering) { -		int size = 0; -		u32 i, orig_iov_count = 0; -		u32 orig_iov_len = 0, orig_iov_loc = 0; -		u32 iov_count = 0, per_iov_bytes = 0; -		u32 *tx_marker, old_tx_marker = 0; -		struct kvec *iov_record; - -		memset(&tx_marker_val, 0, -			count->ss_marker_count * sizeof(u32)); -		memset(&iov, 0, count->ss_iov_count * sizeof(struct kvec)); - -		iov_record = count->iov; -		orig_iov_count = count->iov_count; -		tx_marker = &conn->if_marker; - -		i = 0; -		size = data; -		orig_iov_len = iov_record[orig_iov_loc].iov_len; -		while (size > 0) { -			pr_debug("tx_data: #1 orig_iov_len %u," -			" orig_iov_loc %u\n", orig_iov_len, orig_iov_loc); -			pr_debug("tx_data: #2 tx_marker %u, size" -				" %u\n", *tx_marker, size); - -			if (orig_iov_len >= *tx_marker) { -				iov[iov_count].iov_len = *tx_marker; -				iov[iov_count++].iov_base = -					(iov_record[orig_iov_loc].iov_base + -						per_iov_bytes); - -				tx_marker_val[tx_marker_iov] = -						(size - *tx_marker); -				iov[iov_count].iov_len = (MARKER_SIZE / 2); -				iov[iov_count++].iov_base = -					&tx_marker_val[tx_marker_iov++]; -				iov[iov_count].iov_len = (MARKER_SIZE / 2); -				iov[iov_count++].iov_base = -					&tx_marker_val[tx_marker_iov++]; -				old_tx_marker = *tx_marker; - -				/* -				 * IFMarkInt is in 32-bit words. -				 */ -				*tx_marker = (conn->conn_ops->IFMarkInt * 4); -				size -= old_tx_marker; -				orig_iov_len -= old_tx_marker; -				per_iov_bytes += old_tx_marker; - -				pr_debug("tx_data: #3 new_tx_marker" -					" %u, size %u\n", *tx_marker, size); -				pr_debug("tx_data: #4 offset %u\n", -					tx_marker_val[tx_marker_iov-1]); -			} else { -				iov[iov_count].iov_len = orig_iov_len; -				iov[iov_count++].iov_base -					= (iov_record[orig_iov_loc].iov_base + -						per_iov_bytes); - -				per_iov_bytes = 0; -				*tx_marker -= orig_iov_len; -				size -= orig_iov_len; - -				if (size) -					orig_iov_len = -					iov_record[++orig_iov_loc].iov_len; - -				pr_debug("tx_data: #5 new_tx_marker" -					" %u, size %u\n", *tx_marker, size); -			} -		} - -		data += (tx_marker_iov * (MARKER_SIZE / 2)); - -		iov_p = &iov[0]; -		iov_len = iov_count; - -		if (iov_count > count->ss_iov_count) { -			pr_err("iov_count: %d, count->ss_iov_count:" -				" %d\n", iov_count, count->ss_iov_count); -			return -1; -		} -		if (tx_marker_iov > count->ss_marker_count) { -			pr_err("tx_marker_iov: %d, count->ss_marker" -				"_count: %d\n", tx_marker_iov, -				count->ss_marker_count); -			return -1; -		} -	} else { -		iov_p = count->iov; -		iov_len = count->iov_count; -	} +	iov_p = count->iov; +	iov_len = count->iov_count;  	while (total_tx < data) {  		tx_loop = kernel_sendmsg(conn->sock, &msg, iov_p, iov_len, @@ -1679,9 +1466,6 @@ static int iscsit_do_tx_data(  					tx_loop, total_tx, data);  	} -	if (count->sync_and_steering) -		total_tx -= (tx_marker_iov * (MARKER_SIZE / 2)); -  	return total_tx;  } @@ -1702,12 +1486,6 @@ int rx_data(  	c.data_length = data;  	c.type = ISCSI_RX_DATA; -	if (conn->conn_ops->OFMarker && -	   (conn->conn_state >= TARG_CONN_STATE_LOGGED_IN)) { -		if (iscsit_determine_sync_and_steering_counts(conn, &c) < 0) -			return -1; -	} -  	return iscsit_do_rx_data(conn, &c);  } @@ -1728,12 +1506,6 @@ int tx_data(  	c.data_length = data;  	c.type = ISCSI_TX_DATA; -	if (conn->conn_ops->IFMarker && -	   (conn->conn_state >= TARG_CONN_STATE_LOGGED_IN)) { -		if (iscsit_determine_sync_and_steering_counts(conn, &c) < 0) -			return -1; -	} -  	return iscsit_do_tx_data(conn, &c);  } diff --git a/drivers/target/target_core_cdb.c b/drivers/target/target_core_cdb.c index 89ae923c5da..f04d4ef99dc 100644 --- a/drivers/target/target_core_cdb.c +++ b/drivers/target/target_core_cdb.c @@ -24,6 +24,7 @@   */  #include <linux/kernel.h> +#include <linux/ctype.h>  #include <asm/unaligned.h>  #include <scsi/scsi.h> @@ -154,6 +155,37 @@ target_emulate_evpd_80(struct se_cmd *cmd, unsigned char *buf)  	return 0;  } +static void +target_parse_naa_6h_vendor_specific(struct se_device *dev, unsigned char *buf_off) +{ +	unsigned char *p = &dev->se_sub_dev->t10_wwn.unit_serial[0]; +	unsigned char *buf = buf_off; +	int cnt = 0, next = 1; +	/* +	 * Generate up to 36 bits of VENDOR SPECIFIC IDENTIFIER starting on +	 * byte 3 bit 3-0 for NAA IEEE Registered Extended DESIGNATOR field +	 * format, followed by 64 bits of VENDOR SPECIFIC IDENTIFIER EXTENSION +	 * to complete the payload.  These are based from VPD=0x80 PRODUCT SERIAL +	 * NUMBER set via vpd_unit_serial in target_core_configfs.c to ensure +	 * per device uniqeness. +	 */ +	while (*p != '\0') { +		if (cnt >= 13) +			break; +		if (!isxdigit(*p)) { +			p++; +			continue; +		} +		if (next != 0) { +			buf[cnt++] |= hex_to_bin(*p++); +			next = 0; +		} else { +			buf[cnt] = hex_to_bin(*p++) << 4; +			next = 1; +		} +	} +} +  /*   * Device identification VPD, for a complete list of   * DESIGNATOR TYPEs see spc4r17 Table 459. @@ -219,8 +251,7 @@ target_emulate_evpd_83(struct se_cmd *cmd, unsigned char *buf)  	 * VENDOR_SPECIFIC_IDENTIFIER and  	 * VENDOR_SPECIFIC_IDENTIFIER_EXTENTION  	 */ -	buf[off++] |= hex_to_bin(dev->se_sub_dev->t10_wwn.unit_serial[0]); -	hex2bin(&buf[off], &dev->se_sub_dev->t10_wwn.unit_serial[1], 12); +	target_parse_naa_6h_vendor_specific(dev, &buf[off]);  	len = 20;  	off = (len + 4); diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 8d0c58ea631..a4b0a8d27f2 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -977,15 +977,17 @@ static void target_qf_do_work(struct work_struct *work)  {  	struct se_device *dev = container_of(work, struct se_device,  					qf_work_queue); +	LIST_HEAD(qf_cmd_list);  	struct se_cmd *cmd, *cmd_tmp;  	spin_lock_irq(&dev->qf_cmd_lock); -	list_for_each_entry_safe(cmd, cmd_tmp, &dev->qf_cmd_list, se_qf_node) { +	list_splice_init(&dev->qf_cmd_list, &qf_cmd_list); +	spin_unlock_irq(&dev->qf_cmd_lock); +	list_for_each_entry_safe(cmd, cmd_tmp, &qf_cmd_list, se_qf_node) {  		list_del(&cmd->se_qf_node);  		atomic_dec(&dev->dev_qf_count);  		smp_mb__after_atomic_dec(); -		spin_unlock_irq(&dev->qf_cmd_lock);  		pr_debug("Processing %s cmd: %p QUEUE_FULL in work queue"  			" context: %s\n", cmd->se_tfo->get_fabric_name(), cmd, @@ -997,10 +999,7 @@ static void target_qf_do_work(struct work_struct *work)  		 * has been added to head of queue  		 */  		transport_add_cmd_to_queue(cmd, cmd->t_state); - -		spin_lock_irq(&dev->qf_cmd_lock);  	} -	spin_unlock_irq(&dev->qf_cmd_lock);  }  unsigned char *transport_dump_cmd_direction(struct se_cmd *cmd) diff --git a/drivers/target/tcm_fc/tcm_fc.h b/drivers/target/tcm_fc/tcm_fc.h index bd4fe21a23b..3749d8b4b42 100644 --- a/drivers/target/tcm_fc/tcm_fc.h +++ b/drivers/target/tcm_fc/tcm_fc.h @@ -98,8 +98,7 @@ struct ft_tpg {  	struct list_head list;		/* linkage in ft_lport_acl tpg_list */  	struct list_head lun_list;	/* head of LUNs */  	struct se_portal_group se_tpg; -	struct task_struct *thread;	/* processing thread */ -	struct se_queue_obj qobj;	/* queue for processing thread */ +	struct workqueue_struct *workqueue;  };  struct ft_lport_acl { @@ -110,16 +109,10 @@ struct ft_lport_acl {  	struct se_wwn fc_lport_wwn;  }; -enum ft_cmd_state { -	FC_CMD_ST_NEW = 0, -	FC_CMD_ST_REJ -}; -  /*   * Commands   */  struct ft_cmd { -	enum ft_cmd_state state;  	u32 lun;                        /* LUN from request */  	struct ft_sess *sess;		/* session held for cmd */  	struct fc_seq *seq;		/* sequence in exchange mgr */ @@ -127,7 +120,7 @@ struct ft_cmd {  	struct fc_frame *req_frame;  	unsigned char *cdb;		/* pointer to CDB inside frame */  	u32 write_data_len;		/* data received on writes */ -	struct se_queue_req se_req; +	struct work_struct work;  	/* Local sense buffer */  	unsigned char ft_sense_buffer[TRANSPORT_SENSE_BUFFER];  	u32 was_ddp_setup:1;		/* Set only if ddp is setup */ @@ -177,7 +170,6 @@ int ft_is_state_remove(struct se_cmd *);  /*   * other internal functions.   */ -int ft_thread(void *);  void ft_recv_req(struct ft_sess *, struct fc_frame *);  struct ft_tpg *ft_lport_find_tpg(struct fc_lport *);  struct ft_node_acl *ft_acl_get(struct ft_tpg *, struct fc_rport_priv *); diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c index 5654dc22f7a..80fbcde00cb 100644 --- a/drivers/target/tcm_fc/tfc_cmd.c +++ b/drivers/target/tcm_fc/tfc_cmd.c @@ -62,8 +62,8 @@ void ft_dump_cmd(struct ft_cmd *cmd, const char *caller)  	int count;  	se_cmd = &cmd->se_cmd; -	pr_debug("%s: cmd %p state %d sess %p seq %p se_cmd %p\n", -		caller, cmd, cmd->state, cmd->sess, cmd->seq, se_cmd); +	pr_debug("%s: cmd %p sess %p seq %p se_cmd %p\n", +		caller, cmd, cmd->sess, cmd->seq, se_cmd);  	pr_debug("%s: cmd %p cdb %p\n",  		caller, cmd, cmd->cdb);  	pr_debug("%s: cmd %p lun %d\n", caller, cmd, cmd->lun); @@ -90,38 +90,6 @@ void ft_dump_cmd(struct ft_cmd *cmd, const char *caller)  		16, 4, cmd->cdb, MAX_COMMAND_SIZE, 0);  } -static void ft_queue_cmd(struct ft_sess *sess, struct ft_cmd *cmd) -{ -	struct ft_tpg *tpg = sess->tport->tpg; -	struct se_queue_obj *qobj = &tpg->qobj; -	unsigned long flags; - -	qobj = &sess->tport->tpg->qobj; -	spin_lock_irqsave(&qobj->cmd_queue_lock, flags); -	list_add_tail(&cmd->se_req.qr_list, &qobj->qobj_list); -	atomic_inc(&qobj->queue_cnt); -	spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); - -	wake_up_process(tpg->thread); -} - -static struct ft_cmd *ft_dequeue_cmd(struct se_queue_obj *qobj) -{ -	unsigned long flags; -	struct se_queue_req *qr; - -	spin_lock_irqsave(&qobj->cmd_queue_lock, flags); -	if (list_empty(&qobj->qobj_list)) { -		spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); -		return NULL; -	} -	qr = list_first_entry(&qobj->qobj_list, struct se_queue_req, qr_list); -	list_del(&qr->qr_list); -	atomic_dec(&qobj->queue_cnt); -	spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); -	return container_of(qr, struct ft_cmd, se_req); -} -  static void ft_free_cmd(struct ft_cmd *cmd)  {  	struct fc_frame *fp; @@ -282,9 +250,7 @@ u32 ft_get_task_tag(struct se_cmd *se_cmd)  int ft_get_cmd_state(struct se_cmd *se_cmd)  { -	struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd); - -	return cmd->state; +	return 0;  }  int ft_is_state_remove(struct se_cmd *se_cmd) @@ -505,6 +471,8 @@ int ft_queue_tm_resp(struct se_cmd *se_cmd)  	return 0;  } +static void ft_send_work(struct work_struct *work); +  /*   * Handle incoming FCP command.   */ @@ -523,7 +491,9 @@ static void ft_recv_cmd(struct ft_sess *sess, struct fc_frame *fp)  		goto busy;  	}  	cmd->req_frame = fp;		/* hold frame during cmd */ -	ft_queue_cmd(sess, cmd); + +	INIT_WORK(&cmd->work, ft_send_work); +	queue_work(sess->tport->tpg->workqueue, &cmd->work);  	return;  busy: @@ -563,12 +533,13 @@ void ft_recv_req(struct ft_sess *sess, struct fc_frame *fp)  /*   * Send new command to target.   */ -static void ft_send_cmd(struct ft_cmd *cmd) +static void ft_send_work(struct work_struct *work)  { +	struct ft_cmd *cmd = container_of(work, struct ft_cmd, work);  	struct fc_frame_header *fh = fc_frame_header_get(cmd->req_frame);  	struct se_cmd *se_cmd;  	struct fcp_cmnd *fcp; -	int data_dir; +	int data_dir = 0;  	u32 data_len;  	int task_attr;  	int ret; @@ -675,42 +646,3 @@ static void ft_send_cmd(struct ft_cmd *cmd)  err:  	ft_send_resp_code_and_free(cmd, FCP_CMND_FIELDS_INVALID);  } - -/* - * Handle request in the command thread. - */ -static void ft_exec_req(struct ft_cmd *cmd) -{ -	pr_debug("cmd state %x\n", cmd->state); -	switch (cmd->state) { -	case FC_CMD_ST_NEW: -		ft_send_cmd(cmd); -		break; -	default: -		break; -	} -} - -/* - * Processing thread. - * Currently one thread per tpg. - */ -int ft_thread(void *arg) -{ -	struct ft_tpg *tpg = arg; -	struct se_queue_obj *qobj = &tpg->qobj; -	struct ft_cmd *cmd; - -	while (!kthread_should_stop()) { -		schedule_timeout_interruptible(MAX_SCHEDULE_TIMEOUT); -		if (kthread_should_stop()) -			goto out; - -		cmd = ft_dequeue_cmd(qobj); -		if (cmd) -			ft_exec_req(cmd); -	} - -out: -	return 0; -} diff --git a/drivers/target/tcm_fc/tfc_conf.c b/drivers/target/tcm_fc/tfc_conf.c index b15879d43e2..8fa39b74f22 100644 --- a/drivers/target/tcm_fc/tfc_conf.c +++ b/drivers/target/tcm_fc/tfc_conf.c @@ -327,7 +327,6 @@ static struct se_portal_group *ft_add_tpg(  	tpg->index = index;  	tpg->lport_acl = lacl;  	INIT_LIST_HEAD(&tpg->lun_list); -	transport_init_queue_obj(&tpg->qobj);  	ret = core_tpg_register(&ft_configfs->tf_ops, wwn, &tpg->se_tpg,  				tpg, TRANSPORT_TPG_TYPE_NORMAL); @@ -336,8 +335,8 @@ static struct se_portal_group *ft_add_tpg(  		return NULL;  	} -	tpg->thread = kthread_run(ft_thread, tpg, "ft_tpg%lu", index); -	if (IS_ERR(tpg->thread)) { +	tpg->workqueue = alloc_workqueue("tcm_fc", 0, 1); +	if (!tpg->workqueue) {  		kfree(tpg);  		return NULL;  	} @@ -356,7 +355,7 @@ static void ft_del_tpg(struct se_portal_group *se_tpg)  	pr_debug("del tpg %s\n",  		    config_item_name(&tpg->se_tpg.tpg_group.cg_item)); -	kthread_stop(tpg->thread); +	destroy_workqueue(tpg->workqueue);  	/* Wait for sessions to be freed thru RCU, for BUG_ON below */  	synchronize_rcu(); diff --git a/drivers/target/tcm_fc/tfc_io.c b/drivers/target/tcm_fc/tfc_io.c index c37f4cd9645..d35ea5a3d56 100644 --- a/drivers/target/tcm_fc/tfc_io.c +++ b/drivers/target/tcm_fc/tfc_io.c @@ -219,43 +219,41 @@ void ft_recv_write_data(struct ft_cmd *cmd, struct fc_frame *fp)  	if (cmd->was_ddp_setup) {  		BUG_ON(!ep);  		BUG_ON(!lport); -	} - -	/* -	 * Doesn't expect payload if DDP is setup. Payload -	 * is expected to be copied directly to user buffers -	 * due to DDP (Large Rx offload), -	 */ -	buf = fc_frame_payload_get(fp, 1); -	if (buf) -		pr_err("%s: xid 0x%x, f_ctl 0x%x, cmd->sg %p, " +		/* +		 * Since DDP (Large Rx offload) was setup for this request, +		 * payload is expected to be copied directly to user buffers. +		 */ +		buf = fc_frame_payload_get(fp, 1); +		if (buf) +			pr_err("%s: xid 0x%x, f_ctl 0x%x, cmd->sg %p, "  				"cmd->sg_cnt 0x%x. DDP was setup"  				" hence not expected to receive frame with " -				"payload, Frame will be dropped if " -				"'Sequence Initiative' bit in f_ctl is " +				"payload, Frame will be dropped if" +				"'Sequence Initiative' bit in f_ctl is"  				"not set\n", __func__, ep->xid, f_ctl,  				cmd->sg, cmd->sg_cnt); -	/* - 	 * Invalidate HW DDP context if it was setup for respective - 	 * command. Invalidation of HW DDP context is requited in both - 	 * situation (success and error).  - 	 */ -	ft_invl_hw_context(cmd); +		/* +		 * Invalidate HW DDP context if it was setup for respective +		 * command. Invalidation of HW DDP context is requited in both +		 * situation (success and error). +		 */ +		ft_invl_hw_context(cmd); -	/* -	 * If "Sequence Initiative (TSI)" bit set in f_ctl, means last -	 * write data frame is received successfully where payload is -	 * posted directly to user buffer and only the last frame's -	 * header is posted in receive queue. -	 * -	 * If "Sequence Initiative (TSI)" bit is not set, means error -	 * condition w.r.t. DDP, hence drop the packet and let explict -	 * ABORTS from other end of exchange timer trigger the recovery. -	 */ -	if (f_ctl & FC_FC_SEQ_INIT) -		goto last_frame; -	else -		goto drop; +		/* +		 * If "Sequence Initiative (TSI)" bit set in f_ctl, means last +		 * write data frame is received successfully where payload is +		 * posted directly to user buffer and only the last frame's +		 * header is posted in receive queue. +		 * +		 * If "Sequence Initiative (TSI)" bit is not set, means error +		 * condition w.r.t. DDP, hence drop the packet and let explict +		 * ABORTS from other end of exchange timer trigger the recovery. +		 */ +		if (f_ctl & FC_FC_SEQ_INIT) +			goto last_frame; +		else +			goto drop; +	}  	rel_off = ntohl(fh->fh_parm_offset);  	frame_len = fr_len(fp); diff --git a/drivers/tty/serial/crisv10.c b/drivers/tty/serial/crisv10.c index 225123b37f1..58be715913c 100644 --- a/drivers/tty/serial/crisv10.c +++ b/drivers/tty/serial/crisv10.c @@ -4450,7 +4450,7 @@ static int __init rs_init(void)  #if defined(CONFIG_ETRAX_RS485)  #if defined(CONFIG_ETRAX_RS485_ON_PA) -	if (cris_io_interface_allocate_pins(if_ser0, 'a', rs485_pa_bit, +	if (cris_io_interface_allocate_pins(if_serial_0, 'a', rs485_pa_bit,  			rs485_pa_bit)) {  		printk(KERN_CRIT "ETRAX100LX serial: Could not allocate "  			"RS485 pin\n"); @@ -4459,7 +4459,7 @@ static int __init rs_init(void)  	}  #endif  #if defined(CONFIG_ETRAX_RS485_ON_PORT_G) -	if (cris_io_interface_allocate_pins(if_ser0, 'g', rs485_pa_bit, +	if (cris_io_interface_allocate_pins(if_serial_0, 'g', rs485_pa_bit,  			rs485_port_g_bit)) {  		printk(KERN_CRIT "ETRAX100LX serial: Could not allocate "  			"RS485 pin\n"); diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c index 1e96d1f1fe6..723f8231193 100644 --- a/drivers/usb/host/xhci-hub.c +++ b/drivers/usb/host/xhci-hub.c @@ -761,7 +761,7 @@ int xhci_hub_status_data(struct usb_hcd *hcd, char *buf)  	memset(buf, 0, retval);  	status = 0; -	mask = PORT_CSC | PORT_PEC | PORT_OCC | PORT_PLC; +	mask = PORT_CSC | PORT_PEC | PORT_OCC | PORT_PLC | PORT_WRC;  	spin_lock_irqsave(&xhci->lock, flags);  	/* For each port, did anything change?  If so, set that bit in buf. */ diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index 54139a2f06c..952e2ded61a 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c @@ -1934,8 +1934,10 @@ static int handle_tx_event(struct xhci_hcd *xhci,  	int status = -EINPROGRESS;  	struct urb_priv *urb_priv;  	struct xhci_ep_ctx *ep_ctx; +	struct list_head *tmp;  	u32 trb_comp_code;  	int ret = 0; +	int td_num = 0;  	slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));  	xdev = xhci->devs[slot_id]; @@ -1957,6 +1959,12 @@ static int handle_tx_event(struct xhci_hcd *xhci,  		return -ENODEV;  	} +	/* Count current td numbers if ep->skip is set */ +	if (ep->skip) { +		list_for_each(tmp, &ep_ring->td_list) +			td_num++; +	} +  	event_dma = le64_to_cpu(event->buffer);  	trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));  	/* Look for common error cases */ @@ -2068,7 +2076,18 @@ static int handle_tx_event(struct xhci_hcd *xhci,  			goto cleanup;  		} +		/* We've skipped all the TDs on the ep ring when ep->skip set */ +		if (ep->skip && td_num == 0) { +			ep->skip = false; +			xhci_dbg(xhci, "All tds on the ep_ring skipped. " +						"Clear skip flag.\n"); +			ret = 0; +			goto cleanup; +		} +  		td = list_entry(ep_ring->td_list.next, struct xhci_td, td_list); +		if (ep->skip) +			td_num--;  		/* Is this a TRB in the currently executing TD? */  		event_seg = trb_in_td(ep_ring->deq_seg, ep_ring->dequeue, diff --git a/drivers/watchdog/hpwdt.c b/drivers/watchdog/hpwdt.c index 410fba45378..809cbda03d7 100644 --- a/drivers/watchdog/hpwdt.c +++ b/drivers/watchdog/hpwdt.c @@ -494,15 +494,16 @@ static int hpwdt_pretimeout(struct notifier_block *nb, unsigned long ulReason,  		asminline_call(&cmn_regs, cru_rom_addr);  	die_nmi_called = 1;  	spin_unlock_irqrestore(&rom_lock, rom_pl); + +	if (allow_kdump) +		hpwdt_stop(); +  	if (!is_icru) {  		if (cmn_regs.u1.ral == 0) { -			printk(KERN_WARNING "hpwdt: An NMI occurred, " +			panic("An NMI occurred, "  				"but unable to determine source.\n");  		}  	} - -	if (allow_kdump) -		hpwdt_stop();  	panic("An NMI occurred, please see the Integrated "  		"Management Log for details.\n"); diff --git a/drivers/watchdog/lantiq_wdt.c b/drivers/watchdog/lantiq_wdt.c index 7d82adac1cb..102aed0efbf 100644 --- a/drivers/watchdog/lantiq_wdt.c +++ b/drivers/watchdog/lantiq_wdt.c @@ -51,16 +51,16 @@ static int ltq_wdt_ok_to_close;  static void  ltq_wdt_enable(void)  { -	ltq_wdt_timeout = ltq_wdt_timeout * +	unsigned long int timeout = ltq_wdt_timeout *  			(ltq_io_region_clk_rate / LTQ_WDT_DIVIDER) + 0x1000; -	if (ltq_wdt_timeout > LTQ_MAX_TIMEOUT) -		ltq_wdt_timeout = LTQ_MAX_TIMEOUT; +	if (timeout > LTQ_MAX_TIMEOUT) +		timeout = LTQ_MAX_TIMEOUT;  	/* write the first password magic */  	ltq_w32(LTQ_WDT_PW1, ltq_wdt_membase + LTQ_WDT_CR);  	/* write the second magic plus the configuration and new timeout */  	ltq_w32(LTQ_WDT_SR_EN | LTQ_WDT_SR_PWD | LTQ_WDT_SR_CLKDIV | -		LTQ_WDT_PW2 | ltq_wdt_timeout, ltq_wdt_membase + LTQ_WDT_CR); +		LTQ_WDT_PW2 | timeout, ltq_wdt_membase + LTQ_WDT_CR);  }  static void diff --git a/drivers/watchdog/sbc_epx_c3.c b/drivers/watchdog/sbc_epx_c3.c index 3066a5127ca..eaca366b723 100644 --- a/drivers/watchdog/sbc_epx_c3.c +++ b/drivers/watchdog/sbc_epx_c3.c @@ -173,7 +173,7 @@ static struct notifier_block epx_c3_notifier = {  	.notifier_call = epx_c3_notify_sys,  }; -static const char banner[] __initdata = KERN_INFO PFX +static const char banner[] __initconst = KERN_INFO PFX  	"Hardware Watchdog Timer for Winsystems EPX-C3 SBC: 0.1\n";  static int __init watchdog_init(void) diff --git a/drivers/watchdog/watchdog_dev.c b/drivers/watchdog/watchdog_dev.c index d33520d0b4c..1199da0f98c 100644 --- a/drivers/watchdog/watchdog_dev.c +++ b/drivers/watchdog/watchdog_dev.c @@ -59,7 +59,7 @@ static struct watchdog_device *wdd;  static int watchdog_ping(struct watchdog_device *wddev)  { -	if (test_bit(WDOG_ACTIVE, &wdd->status)) { +	if (test_bit(WDOG_ACTIVE, &wddev->status)) {  		if (wddev->ops->ping)  			return wddev->ops->ping(wddev);  /* ping the watchdog */  		else @@ -81,12 +81,12 @@ static int watchdog_start(struct watchdog_device *wddev)  {  	int err; -	if (!test_bit(WDOG_ACTIVE, &wdd->status)) { +	if (!test_bit(WDOG_ACTIVE, &wddev->status)) {  		err = wddev->ops->start(wddev);  		if (err < 0)  			return err; -		set_bit(WDOG_ACTIVE, &wdd->status); +		set_bit(WDOG_ACTIVE, &wddev->status);  	}  	return 0;  } @@ -105,18 +105,18 @@ static int watchdog_stop(struct watchdog_device *wddev)  {  	int err = -EBUSY; -	if (test_bit(WDOG_NO_WAY_OUT, &wdd->status)) { +	if (test_bit(WDOG_NO_WAY_OUT, &wddev->status)) {  		pr_info("%s: nowayout prevents watchdog to be stopped!\n", -							wdd->info->identity); +							wddev->info->identity);  		return err;  	} -	if (test_bit(WDOG_ACTIVE, &wdd->status)) { +	if (test_bit(WDOG_ACTIVE, &wddev->status)) {  		err = wddev->ops->stop(wddev);  		if (err < 0)  			return err; -		clear_bit(WDOG_ACTIVE, &wdd->status); +		clear_bit(WDOG_ACTIVE, &wddev->status);  	}  	return 0;  } diff --git a/drivers/xen/events.c b/drivers/xen/events.c index da70f5c32eb..7523719bf8a 100644 --- a/drivers/xen/events.c +++ b/drivers/xen/events.c @@ -54,7 +54,7 @@   * This lock protects updates to the following mapping and reference-count   * arrays. The lock does not need to be acquired to read the mapping tables.   */ -static DEFINE_SPINLOCK(irq_mapping_update_lock); +static DEFINE_MUTEX(irq_mapping_update_lock);  static LIST_HEAD(xen_irq_list_head); @@ -631,7 +631,7 @@ int xen_bind_pirq_gsi_to_irq(unsigned gsi,  	int irq = -1;  	struct physdev_irq irq_op; -	spin_lock(&irq_mapping_update_lock); +	mutex_lock(&irq_mapping_update_lock);  	irq = find_irq_by_gsi(gsi);  	if (irq != -1) { @@ -684,7 +684,7 @@ int xen_bind_pirq_gsi_to_irq(unsigned gsi,  				handle_edge_irq, name);  out: -	spin_unlock(&irq_mapping_update_lock); +	mutex_unlock(&irq_mapping_update_lock);  	return irq;  } @@ -710,7 +710,7 @@ int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc,  {  	int irq, ret; -	spin_lock(&irq_mapping_update_lock); +	mutex_lock(&irq_mapping_update_lock);  	irq = xen_allocate_irq_dynamic();  	if (irq == -1) @@ -724,10 +724,10 @@ int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc,  	if (ret < 0)  		goto error_irq;  out: -	spin_unlock(&irq_mapping_update_lock); +	mutex_unlock(&irq_mapping_update_lock);  	return irq;  error_irq: -	spin_unlock(&irq_mapping_update_lock); +	mutex_unlock(&irq_mapping_update_lock);  	xen_free_irq(irq);  	return -1;  } @@ -740,7 +740,7 @@ int xen_destroy_irq(int irq)  	struct irq_info *info = info_for_irq(irq);  	int rc = -ENOENT; -	spin_lock(&irq_mapping_update_lock); +	mutex_lock(&irq_mapping_update_lock);  	desc = irq_to_desc(irq);  	if (!desc) @@ -766,7 +766,7 @@ int xen_destroy_irq(int irq)  	xen_free_irq(irq);  out: -	spin_unlock(&irq_mapping_update_lock); +	mutex_unlock(&irq_mapping_update_lock);  	return rc;  } @@ -776,7 +776,7 @@ int xen_irq_from_pirq(unsigned pirq)  	struct irq_info *info; -	spin_lock(&irq_mapping_update_lock); +	mutex_lock(&irq_mapping_update_lock);  	list_for_each_entry(info, &xen_irq_list_head, list) {  		if (info == NULL || info->type != IRQT_PIRQ) @@ -787,7 +787,7 @@ int xen_irq_from_pirq(unsigned pirq)  	}  	irq = -1;  out: -	spin_unlock(&irq_mapping_update_lock); +	mutex_unlock(&irq_mapping_update_lock);  	return irq;  } @@ -802,7 +802,7 @@ int bind_evtchn_to_irq(unsigned int evtchn)  {  	int irq; -	spin_lock(&irq_mapping_update_lock); +	mutex_lock(&irq_mapping_update_lock);  	irq = evtchn_to_irq[evtchn]; @@ -818,7 +818,7 @@ int bind_evtchn_to_irq(unsigned int evtchn)  	}  out: -	spin_unlock(&irq_mapping_update_lock); +	mutex_unlock(&irq_mapping_update_lock);  	return irq;  } @@ -829,7 +829,7 @@ static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)  	struct evtchn_bind_ipi bind_ipi;  	int evtchn, irq; -	spin_lock(&irq_mapping_update_lock); +	mutex_lock(&irq_mapping_update_lock);  	irq = per_cpu(ipi_to_irq, cpu)[ipi]; @@ -853,7 +853,7 @@ static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)  	}   out: -	spin_unlock(&irq_mapping_update_lock); +	mutex_unlock(&irq_mapping_update_lock);  	return irq;  } @@ -878,7 +878,7 @@ int bind_virq_to_irq(unsigned int virq, unsigned int cpu)  	struct evtchn_bind_virq bind_virq;  	int evtchn, irq; -	spin_lock(&irq_mapping_update_lock); +	mutex_lock(&irq_mapping_update_lock);  	irq = per_cpu(virq_to_irq, cpu)[virq]; @@ -903,7 +903,7 @@ int bind_virq_to_irq(unsigned int virq, unsigned int cpu)  	}  out: -	spin_unlock(&irq_mapping_update_lock); +	mutex_unlock(&irq_mapping_update_lock);  	return irq;  } @@ -913,7 +913,7 @@ static void unbind_from_irq(unsigned int irq)  	struct evtchn_close close;  	int evtchn = evtchn_from_irq(irq); -	spin_lock(&irq_mapping_update_lock); +	mutex_lock(&irq_mapping_update_lock);  	if (VALID_EVTCHN(evtchn)) {  		close.port = evtchn; @@ -943,7 +943,7 @@ static void unbind_from_irq(unsigned int irq)  	xen_free_irq(irq); -	spin_unlock(&irq_mapping_update_lock); +	mutex_unlock(&irq_mapping_update_lock);  }  int bind_evtchn_to_irqhandler(unsigned int evtchn, @@ -1279,7 +1279,7 @@ void rebind_evtchn_irq(int evtchn, int irq)  	   will also be masked. */  	disable_irq(irq); -	spin_lock(&irq_mapping_update_lock); +	mutex_lock(&irq_mapping_update_lock);  	/* After resume the irq<->evtchn mappings are all cleared out */  	BUG_ON(evtchn_to_irq[evtchn] != -1); @@ -1289,7 +1289,7 @@ void rebind_evtchn_irq(int evtchn, int irq)  	xen_irq_info_evtchn_init(irq, evtchn); -	spin_unlock(&irq_mapping_update_lock); +	mutex_unlock(&irq_mapping_update_lock);  	/* new event channels are always bound to cpu 0 */  	irq_set_affinity(irq, cpumask_of(0)); diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index 3c3abff731a..a381cd22f51 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -1817,6 +1817,11 @@ static loff_t btrfs_file_llseek(struct file *file, loff_t offset, int origin)  		goto out;  	case SEEK_DATA:  	case SEEK_HOLE: +		if (offset >= i_size_read(inode)) { +			mutex_unlock(&inode->i_mutex); +			return -ENXIO; +		} +  		ret = find_desired_extent(inode, &offset, origin);  		if (ret) {  			mutex_unlock(&inode->i_mutex); @@ -1825,11 +1830,11 @@ static loff_t btrfs_file_llseek(struct file *file, loff_t offset, int origin)  	}  	if (offset < 0 && !(file->f_mode & FMODE_UNSIGNED_OFFSET)) { -		ret = -EINVAL; +		offset = -EINVAL;  		goto out;  	}  	if (offset > inode->i_sb->s_maxbytes) { -		ret = -EINVAL; +		offset = -EINVAL;  		goto out;  	} diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 4d14de6d121..b2d004ad66a 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -4018,7 +4018,8 @@ struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry)  		memcpy(&location, dentry->d_fsdata, sizeof(struct btrfs_key));  		kfree(dentry->d_fsdata);  		dentry->d_fsdata = NULL; -		d_clear_need_lookup(dentry); +		/* This thing is hashed, drop it for now */ +		d_drop(dentry);  	} else {  		ret = btrfs_inode_by_name(dir, dentry, &location);  	} @@ -4085,7 +4086,15 @@ static void btrfs_dentry_release(struct dentry *dentry)  static struct dentry *btrfs_lookup(struct inode *dir, struct dentry *dentry,  				   struct nameidata *nd)  { -	return d_splice_alias(btrfs_lookup_dentry(dir, dentry), dentry); +	struct dentry *ret; + +	ret = d_splice_alias(btrfs_lookup_dentry(dir, dentry), dentry); +	if (unlikely(d_need_lookup(dentry))) { +		spin_lock(&dentry->d_lock); +		dentry->d_flags &= ~DCACHE_NEED_LOOKUP; +		spin_unlock(&dentry->d_lock); +	} +	return ret;  }  unsigned char btrfs_filetype_table[] = { @@ -4125,7 +4134,8 @@ static int btrfs_real_readdir(struct file *filp, void *dirent,  	/* special case for "." */  	if (filp->f_pos == 0) { -		over = filldir(dirent, ".", 1, 1, btrfs_ino(inode), DT_DIR); +		over = filldir(dirent, ".", 1, +			       filp->f_pos, btrfs_ino(inode), DT_DIR);  		if (over)  			return 0;  		filp->f_pos = 1; @@ -4134,7 +4144,7 @@ static int btrfs_real_readdir(struct file *filp, void *dirent,  	if (filp->f_pos == 1) {  		u64 pino = parent_ino(filp->f_path.dentry);  		over = filldir(dirent, "..", 2, -			       2, pino, DT_DIR); +			       filp->f_pos, pino, DT_DIR);  		if (over)  			return 0;  		filp->f_pos = 2; diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index 3351b1b2457..538f65a79ec 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -2177,6 +2177,11 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,  	if (!(src_file->f_mode & FMODE_READ))  		goto out_fput; +	/* don't make the dst file partly checksummed */ +	if ((BTRFS_I(src)->flags & BTRFS_INODE_NODATASUM) != +	    (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) +		goto out_fput; +  	ret = -EISDIR;  	if (S_ISDIR(src->i_mode) || S_ISDIR(inode->i_mode))  		goto out_fput; @@ -2226,6 +2231,10 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,  			goto out_unlock;  	} +	/* truncate page cache pages from target inode range */ +	truncate_inode_pages_range(&inode->i_data, destoff, +				   PAGE_CACHE_ALIGN(destoff + len) - 1); +  	/* do any pending delalloc/csum calc on src, one way or  	   another, and lock file content */  	while (1) { @@ -2242,10 +2251,6 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,  		btrfs_wait_ordered_range(src, off, len);  	} -	/* truncate page cache pages from target inode range */ -	truncate_inode_pages_range(&inode->i_data, off, -				   ALIGN(off + len, PAGE_CACHE_SIZE) - 1); -  	/* clone data */  	key.objectid = btrfs_ino(src);  	key.type = BTRFS_EXTENT_DATA_KEY; @@ -2323,7 +2328,12 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,  			else  				new_key.offset = destoff; -			trans = btrfs_start_transaction(root, 1); +			/* +			 * 1 - adjusting old extent (we may have to split it) +			 * 1 - add new extent +			 * 1 - inode update +			 */ +			trans = btrfs_start_transaction(root, 3);  			if (IS_ERR(trans)) {  				ret = PTR_ERR(trans);  				goto out; @@ -2442,7 +2452,6 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,  			if (endoff > inode->i_size)  				btrfs_i_size_write(inode, endoff); -			BTRFS_I(inode)->flags = BTRFS_I(src)->flags;  			ret = btrfs_update_inode(trans, root, inode);  			BUG_ON(ret);  			btrfs_end_transaction(trans, root); diff --git a/fs/cifs/cifsencrypt.c b/fs/cifs/cifsencrypt.c index e76bfeb6826..30acd22147e 100644 --- a/fs/cifs/cifsencrypt.c +++ b/fs/cifs/cifsencrypt.c @@ -351,9 +351,7 @@ static int  build_avpair_blob(struct cifs_ses *ses, const struct nls_table *nls_cp)  {  	unsigned int dlen; -	unsigned int wlen; -	unsigned int size = 6 * sizeof(struct ntlmssp2_name); -	__le64  curtime; +	unsigned int size = 2 * sizeof(struct ntlmssp2_name);  	char *defdmname = "WORKGROUP";  	unsigned char *blobptr;  	struct ntlmssp2_name *attrptr; @@ -365,15 +363,14 @@ build_avpair_blob(struct cifs_ses *ses, const struct nls_table *nls_cp)  	}  	dlen = strlen(ses->domainName); -	wlen = strlen(ses->server->hostname); -	/* The length of this blob is a size which is -	 * six times the size of a structure which holds name/size + -	 * two times the unicode length of a domain name + -	 * two times the unicode length of a server name + -	 * size of a timestamp (which is 8 bytes). +	/* +	 * The length of this blob is two times the size of a +	 * structure (av pair) which holds name/size +	 * ( for NTLMSSP_AV_NB_DOMAIN_NAME followed by NTLMSSP_AV_EOL ) + +	 * unicode length of a netbios domain name  	 */ -	ses->auth_key.len = size + 2 * (2 * dlen) + 2 * (2 * wlen) + 8; +	ses->auth_key.len = size + 2 * dlen;  	ses->auth_key.response = kzalloc(ses->auth_key.len, GFP_KERNEL);  	if (!ses->auth_key.response) {  		ses->auth_key.len = 0; @@ -384,44 +381,15 @@ build_avpair_blob(struct cifs_ses *ses, const struct nls_table *nls_cp)  	blobptr = ses->auth_key.response;  	attrptr = (struct ntlmssp2_name *) blobptr; +	/* +	 * As defined in MS-NTLM 3.3.2, just this av pair field +	 * is sufficient as part of the temp +	 */  	attrptr->type = cpu_to_le16(NTLMSSP_AV_NB_DOMAIN_NAME);  	attrptr->length = cpu_to_le16(2 * dlen);  	blobptr = (unsigned char *)attrptr + sizeof(struct ntlmssp2_name);  	cifs_strtoUCS((__le16 *)blobptr, ses->domainName, dlen, nls_cp); -	blobptr += 2 * dlen; -	attrptr = (struct ntlmssp2_name *) blobptr; - -	attrptr->type = cpu_to_le16(NTLMSSP_AV_NB_COMPUTER_NAME); -	attrptr->length = cpu_to_le16(2 * wlen); -	blobptr = (unsigned char *)attrptr + sizeof(struct ntlmssp2_name); -	cifs_strtoUCS((__le16 *)blobptr, ses->server->hostname, wlen, nls_cp); - -	blobptr += 2 * wlen; -	attrptr = (struct ntlmssp2_name *) blobptr; - -	attrptr->type = cpu_to_le16(NTLMSSP_AV_DNS_DOMAIN_NAME); -	attrptr->length = cpu_to_le16(2 * dlen); -	blobptr = (unsigned char *)attrptr + sizeof(struct ntlmssp2_name); -	cifs_strtoUCS((__le16 *)blobptr, ses->domainName, dlen, nls_cp); - -	blobptr += 2 * dlen; -	attrptr = (struct ntlmssp2_name *) blobptr; - -	attrptr->type = cpu_to_le16(NTLMSSP_AV_DNS_COMPUTER_NAME); -	attrptr->length = cpu_to_le16(2 * wlen); -	blobptr = (unsigned char *)attrptr + sizeof(struct ntlmssp2_name); -	cifs_strtoUCS((__le16 *)blobptr, ses->server->hostname, wlen, nls_cp); - -	blobptr += 2 * wlen; -	attrptr = (struct ntlmssp2_name *) blobptr; - -	attrptr->type = cpu_to_le16(NTLMSSP_AV_TIMESTAMP); -	attrptr->length = cpu_to_le16(sizeof(__le64)); -	blobptr = (unsigned char *)attrptr + sizeof(struct ntlmssp2_name); -	curtime = cpu_to_le64(cifs_UnixTimeToNT(CURRENT_TIME)); -	memcpy(blobptr, &curtime, sizeof(__le64)); -  	return 0;  } diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c index f93eb948d07..54b8f1e7da9 100644 --- a/fs/cifs/cifsfs.c +++ b/fs/cifs/cifsfs.c @@ -548,6 +548,12 @@ cifs_get_root(struct smb_vol *vol, struct super_block *sb)  		struct inode *dir = dentry->d_inode;  		struct dentry *child; +		if (!dir) { +			dput(dentry); +			dentry = ERR_PTR(-ENOENT); +			break; +		} +  		/* skip separators */  		while (*s == sep)  			s++; @@ -563,10 +569,6 @@ cifs_get_root(struct smb_vol *vol, struct super_block *sb)  		mutex_unlock(&dir->i_mutex);  		dput(dentry);  		dentry = child; -		if (!dentry->d_inode) { -			dput(dentry); -			dentry = ERR_PTR(-ENOENT); -		}  	} while (!IS_ERR(dentry));  	_FreeXid(xid);  	kfree(full_path); diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c index aac37d99a48..a80f7bd97b9 100644 --- a/fs/cifs/cifssmb.c +++ b/fs/cifs/cifssmb.c @@ -4079,7 +4079,8 @@ int CIFSFindNext(const int xid, struct cifs_tcon *tcon,  	T2_FNEXT_RSP_PARMS *parms;  	char *response_data;  	int rc = 0; -	int bytes_returned, name_len; +	int bytes_returned; +	unsigned int name_len;  	__u16 params, byte_count;  	cFYI(1, "In FindNext"); diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index 633c246b677..f4af4cc3750 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c @@ -1298,7 +1298,7 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,  			/* ignore */  		} else if (strnicmp(data, "guest", 5) == 0) {  			/* ignore */ -		} else if (strnicmp(data, "rw", 2) == 0) { +		} else if (strnicmp(data, "rw", 2) == 0 && strlen(data) == 2) {  			/* ignore */  		} else if (strnicmp(data, "ro", 2) == 0) {  			/* ignore */ @@ -1401,7 +1401,7 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,  			vol->server_ino = 1;  		} else if (strnicmp(data, "noserverino", 9) == 0) {  			vol->server_ino = 0; -		} else if (strnicmp(data, "rwpidforward", 4) == 0) { +		} else if (strnicmp(data, "rwpidforward", 12) == 0) {  			vol->rwpidforward = 1;  		} else if (strnicmp(data, "cifsacl", 7) == 0) {  			vol->cifs_acl = 1; diff --git a/fs/ext3/inode.c b/fs/ext3/inode.c index 04da6acde85..12661e1deed 100644 --- a/fs/ext3/inode.c +++ b/fs/ext3/inode.c @@ -1134,7 +1134,7 @@ struct buffer_head *ext3_bread(handle_t *handle, struct inode *inode,  		return bh;  	if (buffer_uptodate(bh))  		return bh; -	ll_rw_block(READ_META, 1, &bh); +	ll_rw_block(READ | REQ_META | REQ_PRIO, 1, &bh);  	wait_on_buffer(bh);  	if (buffer_uptodate(bh))  		return bh; @@ -2807,7 +2807,7 @@ make_io:  		trace_ext3_load_inode(inode);  		get_bh(bh);  		bh->b_end_io = end_buffer_read_sync; -		submit_bh(READ_META, bh); +		submit_bh(READ | REQ_META | REQ_PRIO, bh);  		wait_on_buffer(bh);  		if (!buffer_uptodate(bh)) {  			ext3_error(inode->i_sb, "ext3_get_inode_loc", diff --git a/fs/ext3/namei.c b/fs/ext3/namei.c index 5571708b6a5..0629e09f651 100644 --- a/fs/ext3/namei.c +++ b/fs/ext3/namei.c @@ -922,7 +922,8 @@ restart:  				bh = ext3_getblk(NULL, dir, b++, 0, &err);  				bh_use[ra_max] = bh;  				if (bh) -					ll_rw_block(READ_META, 1, &bh); +					ll_rw_block(READ | REQ_META | REQ_PRIO, +						    1, &bh);  			}  		}  		if ((bh = bh_use[ra_ptr++]) == NULL) diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 18d2558b762..986e2388f03 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -647,7 +647,7 @@ struct buffer_head *ext4_bread(handle_t *handle, struct inode *inode,  		return bh;  	if (buffer_uptodate(bh))  		return bh; -	ll_rw_block(READ_META, 1, &bh); +	ll_rw_block(READ | REQ_META | REQ_PRIO, 1, &bh);  	wait_on_buffer(bh);  	if (buffer_uptodate(bh))  		return bh; @@ -3298,7 +3298,7 @@ make_io:  		trace_ext4_load_inode(inode);  		get_bh(bh);  		bh->b_end_io = end_buffer_read_sync; -		submit_bh(READ_META, bh); +		submit_bh(READ | REQ_META | REQ_PRIO, bh);  		wait_on_buffer(bh);  		if (!buffer_uptodate(bh)) {  			EXT4_ERROR_INODE_BLOCK(inode, block, diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c index f8068c7bae9..1c924faeb6c 100644 --- a/fs/ext4/namei.c +++ b/fs/ext4/namei.c @@ -922,7 +922,8 @@ restart:  				bh = ext4_getblk(NULL, dir, b++, 0, &err);  				bh_use[ra_max] = bh;  				if (bh) -					ll_rw_block(READ_META, 1, &bh); +					ll_rw_block(READ | REQ_META | REQ_PRIO, +						    1, &bh);  			}  		}  		if ((bh = bh_use[ra_ptr++]) == NULL) diff --git a/fs/gfs2/log.c b/fs/gfs2/log.c index 85c62923ee2..59864643436 100644 --- a/fs/gfs2/log.c +++ b/fs/gfs2/log.c @@ -624,9 +624,9 @@ static void log_write_header(struct gfs2_sbd *sdp, u32 flags, int pull)  	bh->b_end_io = end_buffer_write_sync;  	get_bh(bh);  	if (test_bit(SDF_NOBARRIERS, &sdp->sd_flags)) -		submit_bh(WRITE_SYNC | REQ_META, bh); +		submit_bh(WRITE_SYNC | REQ_META | REQ_PRIO, bh);  	else -		submit_bh(WRITE_FLUSH_FUA | REQ_META, bh); +		submit_bh(WRITE_FLUSH_FUA | REQ_META | REQ_PRIO, bh);  	wait_on_buffer(bh);  	if (!buffer_uptodate(bh)) diff --git a/fs/gfs2/meta_io.c b/fs/gfs2/meta_io.c index 747238cd9f9..be29858900f 100644 --- a/fs/gfs2/meta_io.c +++ b/fs/gfs2/meta_io.c @@ -37,7 +37,7 @@ static int gfs2_aspace_writepage(struct page *page, struct writeback_control *wb  {  	struct buffer_head *bh, *head;  	int nr_underway = 0; -	int write_op = REQ_META | +	int write_op = REQ_META | REQ_PRIO |  		(wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : WRITE);  	BUG_ON(!PageLocked(page)); @@ -225,7 +225,7 @@ int gfs2_meta_read(struct gfs2_glock *gl, u64 blkno, int flags,  	}  	bh->b_end_io = end_buffer_read_sync;  	get_bh(bh); -	submit_bh(READ_SYNC | REQ_META, bh); +	submit_bh(READ_SYNC | REQ_META | REQ_PRIO, bh);  	if (!(flags & DIO_WAIT))  		return 0; @@ -435,7 +435,7 @@ struct buffer_head *gfs2_meta_ra(struct gfs2_glock *gl, u64 dblock, u32 extlen)  	if (buffer_uptodate(first_bh))  		goto out;  	if (!buffer_locked(first_bh)) -		ll_rw_block(READ_SYNC | REQ_META, 1, &first_bh); +		ll_rw_block(READ_SYNC | REQ_META | REQ_PRIO, 1, &first_bh);  	dblock++;  	extlen--; diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c index 3bc073a4cf8..079587e5384 100644 --- a/fs/gfs2/ops_fstype.c +++ b/fs/gfs2/ops_fstype.c @@ -224,7 +224,7 @@ static int gfs2_read_super(struct gfs2_sbd *sdp, sector_t sector, int silent)  	bio->bi_end_io = end_bio_io_page;  	bio->bi_private = page; -	submit_bio(READ_SYNC | REQ_META, bio); +	submit_bio(READ_SYNC | REQ_META | REQ_PRIO, bio);  	wait_on_page_locked(page);  	bio_put(bio);  	if (!PageUptodate(page)) { diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c index 42e8d23bc04..0e8bb13381e 100644 --- a/fs/gfs2/quota.c +++ b/fs/gfs2/quota.c @@ -709,7 +709,7 @@ get_a_page:  		set_buffer_uptodate(bh);  	if (!buffer_uptodate(bh)) { -		ll_rw_block(READ_META, 1, &bh); +		ll_rw_block(READ | REQ_META | REQ_PRIO, 1, &bh);  		wait_on_buffer(bh);  		if (!buffer_uptodate(bh))  			goto unlock_out; diff --git a/fs/hfsplus/super.c b/fs/hfsplus/super.c index c106ca22e81..d24a9b666a2 100644 --- a/fs/hfsplus/super.c +++ b/fs/hfsplus/super.c @@ -344,6 +344,7 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent)  	struct inode *root, *inode;  	struct qstr str;  	struct nls_table *nls = NULL; +	u64 last_fs_block, last_fs_page;  	int err;  	err = -EINVAL; @@ -399,9 +400,13 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent)  	if (!sbi->rsrc_clump_blocks)  		sbi->rsrc_clump_blocks = 1; -	err = generic_check_addressable(sbi->alloc_blksz_shift, -					sbi->total_blocks); -	if (err) { +	err = -EFBIG; +	last_fs_block = sbi->total_blocks - 1; +	last_fs_page = (last_fs_block << sbi->alloc_blksz_shift) >> +			PAGE_CACHE_SHIFT; + +	if ((last_fs_block > (sector_t)(~0ULL) >> (sbi->alloc_blksz_shift - 9)) || +	    (last_fs_page > (pgoff_t)(~0ULL))) {  		printk(KERN_ERR "hfs: filesystem size too large.\n");  		goto out_free_vhdr;  	} @@ -525,8 +530,8 @@ out_close_cat_tree:  out_close_ext_tree:  	hfs_btree_close(sbi->ext_tree);  out_free_vhdr: -	kfree(sbi->s_vhdr); -	kfree(sbi->s_backup_vhdr); +	kfree(sbi->s_vhdr_buf); +	kfree(sbi->s_backup_vhdr_buf);  out_unload_nls:  	unload_nls(sbi->nls);  	unload_nls(nls); diff --git a/fs/hfsplus/wrapper.c b/fs/hfsplus/wrapper.c index 10e515a0d45..7daf4b852d1 100644 --- a/fs/hfsplus/wrapper.c +++ b/fs/hfsplus/wrapper.c @@ -272,9 +272,9 @@ reread:  	return 0;  out_free_backup_vhdr: -	kfree(sbi->s_backup_vhdr); +	kfree(sbi->s_backup_vhdr_buf);  out_free_vhdr: -	kfree(sbi->s_vhdr); +	kfree(sbi->s_vhdr_buf);  out:  	return error;  } diff --git a/fs/namei.c b/fs/namei.c index b52bc685465..f4788365ea2 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -2616,6 +2616,7 @@ int vfs_rmdir(struct inode *dir, struct dentry *dentry)  	if (!dir->i_op->rmdir)  		return -EPERM; +	dget(dentry);  	mutex_lock(&dentry->d_inode->i_mutex);  	error = -EBUSY; @@ -2636,6 +2637,7 @@ int vfs_rmdir(struct inode *dir, struct dentry *dentry)  out:  	mutex_unlock(&dentry->d_inode->i_mutex); +	dput(dentry);  	if (!error)  		d_delete(dentry);  	return error; @@ -3025,6 +3027,7 @@ static int vfs_rename_dir(struct inode *old_dir, struct dentry *old_dentry,  	if (error)  		return error; +	dget(new_dentry);  	if (target)  		mutex_lock(&target->i_mutex); @@ -3045,6 +3048,7 @@ static int vfs_rename_dir(struct inode *old_dir, struct dentry *old_dentry,  out:  	if (target)  		mutex_unlock(&target->i_mutex); +	dput(new_dentry);  	if (!error)  		if (!(old_dir->i_sb->s_type->fs_flags & FS_RENAME_DOES_D_MOVE))  			d_move(old_dentry,new_dentry); diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h index 1ec1a85fa71..3e93e9a1bee 100644 --- a/fs/nfs/nfs4_fs.h +++ b/fs/nfs/nfs4_fs.h @@ -56,6 +56,9 @@ enum nfs4_session_state {  	NFS4_SESSION_DRAINING,  }; +#define NFS4_RENEW_TIMEOUT		0x01 +#define NFS4_RENEW_DELEGATION_CB	0x02 +  struct nfs4_minor_version_ops {  	u32	minor_version; @@ -225,7 +228,7 @@ struct nfs4_state_recovery_ops {  };  struct nfs4_state_maintenance_ops { -	int (*sched_state_renewal)(struct nfs_client *, struct rpc_cred *); +	int (*sched_state_renewal)(struct nfs_client *, struct rpc_cred *, unsigned);  	struct rpc_cred * (*get_state_renewal_cred_locked)(struct nfs_client *);  	int (*renew_lease)(struct nfs_client *, struct rpc_cred *);  }; @@ -237,8 +240,6 @@ extern const struct inode_operations nfs4_dir_inode_operations;  extern int nfs4_proc_setclientid(struct nfs_client *, u32, unsigned short, struct rpc_cred *, struct nfs4_setclientid_res *);  extern int nfs4_proc_setclientid_confirm(struct nfs_client *, struct nfs4_setclientid_res *arg, struct rpc_cred *);  extern int nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred); -extern int nfs4_proc_async_renew(struct nfs_client *, struct rpc_cred *); -extern int nfs4_proc_renew(struct nfs_client *, struct rpc_cred *);  extern int nfs4_init_clientid(struct nfs_client *, struct rpc_cred *);  extern int nfs41_init_clientid(struct nfs_client *, struct rpc_cred *);  extern int nfs4_do_close(struct nfs4_state *state, gfp_t gfp_mask, int wait, bool roc); @@ -349,6 +350,7 @@ extern void nfs4_close_sync(struct nfs4_state *, fmode_t);  extern void nfs4_state_set_mode_locked(struct nfs4_state *, fmode_t);  extern void nfs4_schedule_lease_recovery(struct nfs_client *);  extern void nfs4_schedule_state_manager(struct nfs_client *); +extern void nfs4_schedule_path_down_recovery(struct nfs_client *clp);  extern void nfs4_schedule_stateid_recovery(const struct nfs_server *, struct nfs4_state *);  extern void nfs41_handle_sequence_flag_errors(struct nfs_client *clp, u32 flags);  extern void nfs41_handle_recall_slot(struct nfs_client *clp); diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 8c77039e7a8..4700fae1ada 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -3374,9 +3374,13 @@ static void nfs4_renew_done(struct rpc_task *task, void *calldata)  	if (task->tk_status < 0) {  		/* Unless we're shutting down, schedule state recovery! */ -		if (test_bit(NFS_CS_RENEWD, &clp->cl_res_state) != 0) +		if (test_bit(NFS_CS_RENEWD, &clp->cl_res_state) == 0) +			return; +		if (task->tk_status != NFS4ERR_CB_PATH_DOWN) {  			nfs4_schedule_lease_recovery(clp); -		return; +			return; +		} +		nfs4_schedule_path_down_recovery(clp);  	}  	do_renew_lease(clp, timestamp);  } @@ -3386,7 +3390,7 @@ static const struct rpc_call_ops nfs4_renew_ops = {  	.rpc_release = nfs4_renew_release,  }; -int nfs4_proc_async_renew(struct nfs_client *clp, struct rpc_cred *cred) +static int nfs4_proc_async_renew(struct nfs_client *clp, struct rpc_cred *cred, unsigned renew_flags)  {  	struct rpc_message msg = {  		.rpc_proc	= &nfs4_procedures[NFSPROC4_CLNT_RENEW], @@ -3395,9 +3399,11 @@ int nfs4_proc_async_renew(struct nfs_client *clp, struct rpc_cred *cred)  	};  	struct nfs4_renewdata *data; +	if (renew_flags == 0) +		return 0;  	if (!atomic_inc_not_zero(&clp->cl_count))  		return -EIO; -	data = kmalloc(sizeof(*data), GFP_KERNEL); +	data = kmalloc(sizeof(*data), GFP_NOFS);  	if (data == NULL)  		return -ENOMEM;  	data->client = clp; @@ -3406,7 +3412,7 @@ int nfs4_proc_async_renew(struct nfs_client *clp, struct rpc_cred *cred)  			&nfs4_renew_ops, data);  } -int nfs4_proc_renew(struct nfs_client *clp, struct rpc_cred *cred) +static int nfs4_proc_renew(struct nfs_client *clp, struct rpc_cred *cred)  {  	struct rpc_message msg = {  		.rpc_proc	= &nfs4_procedures[NFSPROC4_CLNT_RENEW], @@ -5504,11 +5510,13 @@ static struct rpc_task *_nfs41_proc_sequence(struct nfs_client *clp, struct rpc_  	return rpc_run_task(&task_setup_data);  } -static int nfs41_proc_async_sequence(struct nfs_client *clp, struct rpc_cred *cred) +static int nfs41_proc_async_sequence(struct nfs_client *clp, struct rpc_cred *cred, unsigned renew_flags)  {  	struct rpc_task *task;  	int ret = 0; +	if ((renew_flags & NFS4_RENEW_TIMEOUT) == 0) +		return 0;  	task = _nfs41_proc_sequence(clp, cred);  	if (IS_ERR(task))  		ret = PTR_ERR(task); diff --git a/fs/nfs/nfs4renewd.c b/fs/nfs/nfs4renewd.c index df8e7f3ca56..dc484c0eae7 100644 --- a/fs/nfs/nfs4renewd.c +++ b/fs/nfs/nfs4renewd.c @@ -60,6 +60,7 @@ nfs4_renew_state(struct work_struct *work)  	struct rpc_cred *cred;  	long lease;  	unsigned long last, now; +	unsigned renew_flags = 0;  	ops = clp->cl_mvops->state_renewal_ops;  	dprintk("%s: start\n", __func__); @@ -72,18 +73,23 @@ nfs4_renew_state(struct work_struct *work)  	last = clp->cl_last_renewal;  	now = jiffies;  	/* Are we close to a lease timeout? */ -	if (time_after(now, last + lease/3)) { +	if (time_after(now, last + lease/3)) +		renew_flags |= NFS4_RENEW_TIMEOUT; +	if (nfs_delegations_present(clp)) +		renew_flags |= NFS4_RENEW_DELEGATION_CB; + +	if (renew_flags != 0) {  		cred = ops->get_state_renewal_cred_locked(clp);  		spin_unlock(&clp->cl_lock);  		if (cred == NULL) { -			if (!nfs_delegations_present(clp)) { +			if (!(renew_flags & NFS4_RENEW_DELEGATION_CB)) {  				set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);  				goto out;  			}  			nfs_expire_all_delegations(clp);  		} else {  			/* Queue an asynchronous RENEW. */ -			ops->sched_state_renewal(clp, cred); +			ops->sched_state_renewal(clp, cred, renew_flags);  			put_rpccred(cred);  			goto out_exp;  		} diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c index 72ab97ef3d6..39914be40b0 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c @@ -1038,6 +1038,12 @@ void nfs4_schedule_lease_recovery(struct nfs_client *clp)  	nfs4_schedule_state_manager(clp);  } +void nfs4_schedule_path_down_recovery(struct nfs_client *clp) +{ +	nfs_handle_cb_pathdown(clp); +	nfs4_schedule_state_manager(clp); +} +  static int nfs4_state_mark_reclaim_reboot(struct nfs_client *clp, struct nfs4_state *state)  { diff --git a/fs/nfs/super.c b/fs/nfs/super.c index b961ceac66b..9b7dd7013b1 100644 --- a/fs/nfs/super.c +++ b/fs/nfs/super.c @@ -2035,9 +2035,6 @@ static inline void nfs_initialise_sb(struct super_block *sb)  		sb->s_blocksize = nfs_block_bits(server->wsize,  						 &sb->s_blocksize_bits); -	if (server->flags & NFS_MOUNT_NOAC) -		sb->s_flags |= MS_SYNCHRONOUS; -  	sb->s_bdi = &server->backing_dev_info;  	nfs_super_set_maxbytes(sb, server->maxfilesize); @@ -2249,6 +2246,10 @@ static struct dentry *nfs_fs_mount(struct file_system_type *fs_type,  	if (server->flags & NFS_MOUNT_UNSHARED)  		compare_super = NULL; +	/* -o noac implies -o sync */ +	if (server->flags & NFS_MOUNT_NOAC) +		sb_mntdata.mntflags |= MS_SYNCHRONOUS; +  	/* Get a superblock - note that we may end up sharing one that already exists */  	s = sget(fs_type, compare_super, nfs_set_super, &sb_mntdata);  	if (IS_ERR(s)) { @@ -2361,6 +2362,10 @@ nfs_xdev_mount(struct file_system_type *fs_type, int flags,  	if (server->flags & NFS_MOUNT_UNSHARED)  		compare_super = NULL; +	/* -o noac implies -o sync */ +	if (server->flags & NFS_MOUNT_NOAC) +		sb_mntdata.mntflags |= MS_SYNCHRONOUS; +  	/* Get a superblock - note that we may end up sharing one that already exists */  	s = sget(&nfs_fs_type, compare_super, nfs_set_super, &sb_mntdata);  	if (IS_ERR(s)) { @@ -2628,6 +2633,10 @@ nfs4_remote_mount(struct file_system_type *fs_type, int flags,  	if (server->flags & NFS4_MOUNT_UNSHARED)  		compare_super = NULL; +	/* -o noac implies -o sync */ +	if (server->flags & NFS_MOUNT_NOAC) +		sb_mntdata.mntflags |= MS_SYNCHRONOUS; +  	/* Get a superblock - note that we may end up sharing one that already exists */  	s = sget(&nfs4_fs_type, compare_super, nfs_set_super, &sb_mntdata);  	if (IS_ERR(s)) { @@ -2916,6 +2925,10 @@ nfs4_xdev_mount(struct file_system_type *fs_type, int flags,  	if (server->flags & NFS4_MOUNT_UNSHARED)  		compare_super = NULL; +	/* -o noac implies -o sync */ +	if (server->flags & NFS_MOUNT_NOAC) +		sb_mntdata.mntflags |= MS_SYNCHRONOUS; +  	/* Get a superblock - note that we may end up sharing one that already exists */  	s = sget(&nfs4_fs_type, compare_super, nfs_set_super, &sb_mntdata);  	if (IS_ERR(s)) { @@ -3003,6 +3016,10 @@ nfs4_remote_referral_mount(struct file_system_type *fs_type, int flags,  	if (server->flags & NFS4_MOUNT_UNSHARED)  		compare_super = NULL; +	/* -o noac implies -o sync */ +	if (server->flags & NFS_MOUNT_NOAC) +		sb_mntdata.mntflags |= MS_SYNCHRONOUS; +  	/* Get a superblock - note that we may end up sharing one that already exists */  	s = sget(&nfs4_fs_type, compare_super, nfs_set_super, &sb_mntdata);  	if (IS_ERR(s)) { diff --git a/fs/nfs/write.c b/fs/nfs/write.c index b39b37f8091..c9bd2a6b7d4 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c @@ -958,7 +958,7 @@ static int nfs_flush_multi(struct nfs_pageio_descriptor *desc, struct list_head  		if (!data)  			goto out_bad;  		data->pagevec[0] = page; -		nfs_write_rpcsetup(req, data, wsize, offset, desc->pg_ioflags); +		nfs_write_rpcsetup(req, data, len, offset, desc->pg_ioflags);  		list_add(&data->list, res);  		requests++;  		nbytes -= len; diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index 25b6a887adb..5afaa58a863 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c @@ -877,30 +877,54 @@ struct numa_maps_private {  	struct numa_maps md;  }; -static void gather_stats(struct page *page, struct numa_maps *md, int pte_dirty) +static void gather_stats(struct page *page, struct numa_maps *md, int pte_dirty, +			unsigned long nr_pages)  {  	int count = page_mapcount(page); -	md->pages++; +	md->pages += nr_pages;  	if (pte_dirty || PageDirty(page)) -		md->dirty++; +		md->dirty += nr_pages;  	if (PageSwapCache(page)) -		md->swapcache++; +		md->swapcache += nr_pages;  	if (PageActive(page) || PageUnevictable(page)) -		md->active++; +		md->active += nr_pages;  	if (PageWriteback(page)) -		md->writeback++; +		md->writeback += nr_pages;  	if (PageAnon(page)) -		md->anon++; +		md->anon += nr_pages;  	if (count > md->mapcount_max)  		md->mapcount_max = count; -	md->node[page_to_nid(page)]++; +	md->node[page_to_nid(page)] += nr_pages; +} + +static struct page *can_gather_numa_stats(pte_t pte, struct vm_area_struct *vma, +		unsigned long addr) +{ +	struct page *page; +	int nid; + +	if (!pte_present(pte)) +		return NULL; + +	page = vm_normal_page(vma, addr, pte); +	if (!page) +		return NULL; + +	if (PageReserved(page)) +		return NULL; + +	nid = page_to_nid(page); +	if (!node_isset(nid, node_states[N_HIGH_MEMORY])) +		return NULL; + +	return page;  }  static int gather_pte_stats(pmd_t *pmd, unsigned long addr, @@ -912,26 +936,32 @@ static int gather_pte_stats(pmd_t *pmd, unsigned long addr,  	pte_t *pte;  	md = walk->private; -	orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); -	do { -		struct page *page; -		int nid; +	spin_lock(&walk->mm->page_table_lock); +	if (pmd_trans_huge(*pmd)) { +		if (pmd_trans_splitting(*pmd)) { +			spin_unlock(&walk->mm->page_table_lock); +			wait_split_huge_page(md->vma->anon_vma, pmd); +		} else { +			pte_t huge_pte = *(pte_t *)pmd; +			struct page *page; -		if (!pte_present(*pte)) -			continue; +			page = can_gather_numa_stats(huge_pte, md->vma, addr); +			if (page) +				gather_stats(page, md, pte_dirty(huge_pte), +						HPAGE_PMD_SIZE/PAGE_SIZE); +			spin_unlock(&walk->mm->page_table_lock); +			return 0; +		} +	} else { +		spin_unlock(&walk->mm->page_table_lock); +	} -		page = vm_normal_page(md->vma, addr, *pte); +	orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); +	do { +		struct page *page = can_gather_numa_stats(*pte, md->vma, addr);  		if (!page)  			continue; - -		if (PageReserved(page)) -			continue; - -		nid = page_to_nid(page); -		if (!node_isset(nid, node_states[N_HIGH_MEMORY])) -			continue; - -		gather_stats(page, md, pte_dirty(*pte)); +		gather_stats(page, md, pte_dirty(*pte), 1);  	} while (pte++, addr += PAGE_SIZE, addr != end);  	pte_unmap_unlock(orig_pte, ptl); @@ -952,7 +982,7 @@ static int gather_hugetbl_stats(pte_t *pte, unsigned long hmask,  		return 0;  	md = walk->private; -	gather_stats(page, md, pte_dirty(*pte)); +	gather_stats(page, md, pte_dirty(*pte), 1);  	return 0;  } diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c index 63e971e2b83..8c37dde4c52 100644 --- a/fs/xfs/xfs_aops.c +++ b/fs/xfs/xfs_aops.c @@ -1300,6 +1300,7 @@ xfs_end_io_direct_write(  	bool			is_async)  {  	struct xfs_ioend	*ioend = iocb->private; +	struct inode		*inode = ioend->io_inode;  	/*  	 * blockdev_direct_IO can return an error even after the I/O @@ -1331,7 +1332,7 @@ xfs_end_io_direct_write(  	}  	/* XXX: probably should move into the real I/O completion handler */ -	inode_dio_done(ioend->io_inode); +	inode_dio_done(inode);  }  STATIC ssize_t diff --git a/include/linux/basic_mmio_gpio.h b/include/linux/basic_mmio_gpio.h index 98999cf107c..feb91219674 100644 --- a/include/linux/basic_mmio_gpio.h +++ b/include/linux/basic_mmio_gpio.h @@ -63,15 +63,10 @@ static inline struct bgpio_chip *to_bgpio_chip(struct gpio_chip *gc)  	return container_of(gc, struct bgpio_chip, gc);  } -int __devexit bgpio_remove(struct bgpio_chip *bgc); -int __devinit bgpio_init(struct bgpio_chip *bgc, -			 struct device *dev, -			 unsigned long sz, -			 void __iomem *dat, -			 void __iomem *set, -			 void __iomem *clr, -			 void __iomem *dirout, -			 void __iomem *dirin, -			 bool big_endian); +int bgpio_remove(struct bgpio_chip *bgc); +int bgpio_init(struct bgpio_chip *bgc, struct device *dev, +	       unsigned long sz, void __iomem *dat, void __iomem *set, +	       void __iomem *clr, void __iomem *dirout, void __iomem *dirin, +	       bool big_endian);  #endif /* __BASIC_MMIO_GPIO_H */ diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h index 32f0076e844..71fc53bb8f1 100644 --- a/include/linux/blk_types.h +++ b/include/linux/blk_types.h @@ -124,6 +124,7 @@ enum rq_flag_bits {  	__REQ_SYNC,		/* request is sync (sync write or read) */  	__REQ_META,		/* metadata io request */ +	__REQ_PRIO,		/* boost priority in cfq */  	__REQ_DISCARD,		/* request to discard sectors */  	__REQ_SECURE,		/* secure discard (used with __REQ_DISCARD) */ @@ -161,14 +162,15 @@ enum rq_flag_bits {  #define REQ_FAILFAST_DRIVER	(1 << __REQ_FAILFAST_DRIVER)  #define REQ_SYNC		(1 << __REQ_SYNC)  #define REQ_META		(1 << __REQ_META) +#define REQ_PRIO		(1 << __REQ_PRIO)  #define REQ_DISCARD		(1 << __REQ_DISCARD)  #define REQ_NOIDLE		(1 << __REQ_NOIDLE)  #define REQ_FAILFAST_MASK \  	(REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER)  #define REQ_COMMON_MASK \ -	(REQ_WRITE | REQ_FAILFAST_MASK | REQ_SYNC | REQ_META | REQ_DISCARD | \ -	 REQ_NOIDLE | REQ_FLUSH | REQ_FUA | REQ_SECURE) +	(REQ_WRITE | REQ_FAILFAST_MASK | REQ_SYNC | REQ_META | REQ_PRIO | \ +	 REQ_DISCARD | REQ_NOIDLE | REQ_FLUSH | REQ_FUA | REQ_SECURE)  #define REQ_CLONE_MASK		REQ_COMMON_MASK  #define REQ_RAHEAD		(1 << __REQ_RAHEAD) diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 84b15d54f8c..7fbaa910334 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -873,7 +873,6 @@ struct blk_plug {  	struct list_head list;  	struct list_head cb_list;  	unsigned int should_sort; -	unsigned int count;  };  #define BLK_MAX_REQUEST_COUNT 16 diff --git a/include/linux/fs.h b/include/linux/fs.h index c2bd68f2277..277f497923a 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -162,10 +162,8 @@ struct inodes_stat_t {  #define READA			RWA_MASK  #define READ_SYNC		(READ | REQ_SYNC) -#define READ_META		(READ | REQ_META)  #define WRITE_SYNC		(WRITE | REQ_SYNC | REQ_NOIDLE)  #define WRITE_ODIRECT		(WRITE | REQ_SYNC) -#define WRITE_META		(WRITE | REQ_META)  #define WRITE_FLUSH		(WRITE | REQ_SYNC | REQ_NOIDLE | REQ_FLUSH)  #define WRITE_FUA		(WRITE | REQ_SYNC | REQ_NOIDLE | REQ_FUA)  #define WRITE_FLUSH_FUA		(WRITE | REQ_SYNC | REQ_NOIDLE | REQ_FLUSH | REQ_FUA) diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 3b535db00a9..343bd7661f2 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -39,16 +39,6 @@ extern unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,  					struct mem_cgroup *mem_cont,  					int active, int file); -struct memcg_scanrecord { -	struct mem_cgroup *mem; /* scanend memory cgroup */ -	struct mem_cgroup *root; /* scan target hierarchy root */ -	int context;		/* scanning context (see memcontrol.c) */ -	unsigned long nr_scanned[2]; /* the number of scanned pages */ -	unsigned long nr_rotated[2]; /* the number of rotated pages */ -	unsigned long nr_freed[2]; /* the number of freed pages */ -	unsigned long elapsed; /* nsec of time elapsed while scanning */ -}; -  #ifdef CONFIG_CGROUP_MEM_RES_CTLR  /*   * All "charge" functions with gfp_mask should use GFP_KERNEL or @@ -127,15 +117,6 @@ mem_cgroup_get_reclaim_stat_from_page(struct page *page);  extern void mem_cgroup_print_oom_info(struct mem_cgroup *memcg,  					struct task_struct *p); -extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem, -						  gfp_t gfp_mask, bool noswap, -						  struct memcg_scanrecord *rec); -extern unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem, -						gfp_t gfp_mask, bool noswap, -						struct zone *zone, -						struct memcg_scanrecord *rec, -						unsigned long *nr_scanned); -  #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP  extern int do_swap_account;  #endif diff --git a/include/linux/mfd/wm8994/pdata.h b/include/linux/mfd/wm8994/pdata.h index d12f8d635a8..97cf4f27d64 100644 --- a/include/linux/mfd/wm8994/pdata.h +++ b/include/linux/mfd/wm8994/pdata.h @@ -26,7 +26,7 @@ struct wm8994_ldo_pdata {  	struct regulator_init_data *init_data;  }; -#define WM8994_CONFIGURE_GPIO 0x8000 +#define WM8994_CONFIGURE_GPIO 0x10000  #define WM8994_DRC_REGS 5  #define WM8994_EQ_REGS  20 diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 7b996ed86d5..8bd383caa36 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -524,6 +524,7 @@ static inline struct sk_buff *alloc_skb_fclone(unsigned int size,  extern bool skb_recycle_check(struct sk_buff *skb, int skb_size);  extern struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src); +extern int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask);  extern struct sk_buff *skb_clone(struct sk_buff *skb,  				 gfp_t priority);  extern struct sk_buff *skb_copy(const struct sk_buff *skb, diff --git a/include/linux/snmp.h b/include/linux/snmp.h index 12b2b18e50c..e16557a357e 100644 --- a/include/linux/snmp.h +++ b/include/linux/snmp.h @@ -231,6 +231,8 @@ enum  	LINUX_MIB_TCPDEFERACCEPTDROP,  	LINUX_MIB_IPRPFILTER, /* IP Reverse Path Filter (rp_filter) */  	LINUX_MIB_TCPTIMEWAITOVERFLOW,		/* TCPTimeWaitOverflow */ +	LINUX_MIB_TCPREQQFULLDOCOOKIES,		/* TCPReqQFullDoCookies */ +	LINUX_MIB_TCPREQQFULLDROP,		/* TCPReqQFullDrop */  	__LINUX_MIB_MAX  }; diff --git a/include/linux/swap.h b/include/linux/swap.h index 14d62490922..c71f84bb62e 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -252,6 +252,12 @@ static inline void lru_cache_add_file(struct page *page)  extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order,  					gfp_t gfp_mask, nodemask_t *mask);  extern int __isolate_lru_page(struct page *page, int mode, int file); +extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem, +						  gfp_t gfp_mask, bool noswap); +extern unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem, +						gfp_t gfp_mask, bool noswap, +						struct zone *zone, +						unsigned long *nr_scanned);  extern unsigned long shrink_all_memory(unsigned long nr_pages);  extern int vm_swappiness;  extern int remove_mapping(struct address_space *mapping, struct page *page); diff --git a/include/net/flow.h b/include/net/flow.h index 78113daadd6..a09447749e2 100644 --- a/include/net/flow.h +++ b/include/net/flow.h @@ -7,6 +7,7 @@  #ifndef _NET_FLOW_H  #define _NET_FLOW_H +#include <linux/socket.h>  #include <linux/in6.h>  #include <linux/atomic.h> @@ -68,7 +69,7 @@ struct flowi4 {  #define fl4_ipsec_spi		uli.spi  #define fl4_mh_type		uli.mht.type  #define fl4_gre_key		uli.gre_key -}; +} __attribute__((__aligned__(BITS_PER_LONG/8)));  static inline void flowi4_init_output(struct flowi4 *fl4, int oif,  				      __u32 mark, __u8 tos, __u8 scope, @@ -112,7 +113,7 @@ struct flowi6 {  #define fl6_ipsec_spi		uli.spi  #define fl6_mh_type		uli.mht.type  #define fl6_gre_key		uli.gre_key -}; +} __attribute__((__aligned__(BITS_PER_LONG/8)));  struct flowidn {  	struct flowi_common	__fl_common; @@ -127,7 +128,7 @@ struct flowidn {  	union flowi_uli		uli;  #define fld_sport		uli.ports.sport  #define fld_dport		uli.ports.dport -}; +} __attribute__((__aligned__(BITS_PER_LONG/8)));  struct flowi {  	union { @@ -161,6 +162,24 @@ static inline struct flowi *flowidn_to_flowi(struct flowidn *fldn)  	return container_of(fldn, struct flowi, u.dn);  } +typedef unsigned long flow_compare_t; + +static inline size_t flow_key_size(u16 family) +{ +	switch (family) { +	case AF_INET: +		BUILD_BUG_ON(sizeof(struct flowi4) % sizeof(flow_compare_t)); +		return sizeof(struct flowi4) / sizeof(flow_compare_t); +	case AF_INET6: +		BUILD_BUG_ON(sizeof(struct flowi6) % sizeof(flow_compare_t)); +		return sizeof(struct flowi6) / sizeof(flow_compare_t); +	case AF_DECnet: +		BUILD_BUG_ON(sizeof(struct flowidn) % sizeof(flow_compare_t)); +		return sizeof(struct flowidn) / sizeof(flow_compare_t); +	} +	return 0; +} +  #define FLOW_DIR_IN	0  #define FLOW_DIR_OUT	1  #define FLOW_DIR_FWD	2 diff --git a/include/net/request_sock.h b/include/net/request_sock.h index 99e6e19b57c..4c0766e201e 100644 --- a/include/net/request_sock.h +++ b/include/net/request_sock.h @@ -96,7 +96,8 @@ extern int sysctl_max_syn_backlog;   */  struct listen_sock {  	u8			max_qlen_log; -	/* 3 bytes hole, try to use */ +	u8			synflood_warned; +	/* 2 bytes hole, try to use */  	int			qlen;  	int			qlen_young;  	int			clock_hand; diff --git a/include/net/sctp/command.h b/include/net/sctp/command.h index 6506458ccd3..712b3bebeda 100644 --- a/include/net/sctp/command.h +++ b/include/net/sctp/command.h @@ -109,6 +109,7 @@ typedef enum {  	SCTP_CMD_SEND_MSG,	 /* Send the whole use message */  	SCTP_CMD_SEND_NEXT_ASCONF, /* Send the next ASCONF after ACK */  	SCTP_CMD_PURGE_ASCONF_QUEUE, /* Purge all asconf queues.*/ +	SCTP_CMD_SET_ASOC,	 /* Restore association context */  	SCTP_CMD_LAST  } sctp_verb_t; diff --git a/include/net/tcp.h b/include/net/tcp.h index 149a415d1e0..acc620a4a45 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -431,17 +431,34 @@ extern int tcp_disconnect(struct sock *sk, int flags);  extern __u32 syncookie_secret[2][16-4+SHA_DIGEST_WORDS];  extern struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,   				    struct ip_options *opt); +#ifdef CONFIG_SYN_COOKIES  extern __u32 cookie_v4_init_sequence(struct sock *sk, struct sk_buff *skb,   				     __u16 *mss); +#else +static inline __u32 cookie_v4_init_sequence(struct sock *sk, +					    struct sk_buff *skb, +					    __u16 *mss) +{ +	return 0; +} +#endif  extern __u32 cookie_init_timestamp(struct request_sock *req);  extern bool cookie_check_timestamp(struct tcp_options_received *opt, bool *);  /* From net/ipv6/syncookies.c */  extern struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb); +#ifdef CONFIG_SYN_COOKIES  extern __u32 cookie_v6_init_sequence(struct sock *sk, struct sk_buff *skb,  				     __u16 *mss); - +#else +static inline __u32 cookie_v6_init_sequence(struct sock *sk, +					    struct sk_buff *skb, +					    __u16 *mss) +{ +	return 0; +} +#endif  /* tcp_output.c */  extern void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss, @@ -460,6 +477,9 @@ extern int tcp_write_wakeup(struct sock *);  extern void tcp_send_fin(struct sock *sk);  extern void tcp_send_active_reset(struct sock *sk, gfp_t priority);  extern int tcp_send_synack(struct sock *); +extern int tcp_syn_flood_action(struct sock *sk, +				const struct sk_buff *skb, +				const char *proto);  extern void tcp_push_one(struct sock *, unsigned int mss_now);  extern void tcp_send_ack(struct sock *sk);  extern void tcp_send_delayed_ack(struct sock *sk); diff --git a/include/net/transp_v6.h b/include/net/transp_v6.h index 5271a741c3a..498433dd067 100644 --- a/include/net/transp_v6.h +++ b/include/net/transp_v6.h @@ -39,6 +39,7 @@ extern int			datagram_recv_ctl(struct sock *sk,  						  struct sk_buff *skb);  extern int			datagram_send_ctl(struct net *net, +						  struct sock *sk,  						  struct msghdr *msg,  						  struct flowi6 *fl6,  						  struct ipv6_txoptions *opt, diff --git a/init/main.c b/init/main.c index 9c51ee7adf3..2a9b88aa5e7 100644 --- a/init/main.c +++ b/init/main.c @@ -209,8 +209,19 @@ early_param("quiet", quiet_kernel);  static int __init loglevel(char *str)  { -	get_option(&str, &console_loglevel); -	return 0; +	int newlevel; + +	/* +	 * Only update loglevel value when a correct setting was passed, +	 * to prevent blind crashes (when loglevel being set to 0) that +	 * are quite hard to debug +	 */ +	if (get_option(&str, &newlevel)) { +		console_loglevel = newlevel; +		return 0; +	} + +	return -EINVAL;  }  early_param("loglevel", loglevel); diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c index d5a3009da71..dc5114b4c16 100644 --- a/kernel/irq/chip.c +++ b/kernel/irq/chip.c @@ -178,7 +178,7 @@ void irq_shutdown(struct irq_desc *desc)  	desc->depth = 1;  	if (desc->irq_data.chip->irq_shutdown)  		desc->irq_data.chip->irq_shutdown(&desc->irq_data); -	if (desc->irq_data.chip->irq_disable) +	else if (desc->irq_data.chip->irq_disable)  		desc->irq_data.chip->irq_disable(&desc->irq_data);  	else  		desc->irq_data.chip->irq_mask(&desc->irq_data); diff --git a/kernel/taskstats.c b/kernel/taskstats.c index e19ce1454ee..e66046456f4 100644 --- a/kernel/taskstats.c +++ b/kernel/taskstats.c @@ -655,6 +655,7 @@ static struct genl_ops taskstats_ops = {  	.cmd		= TASKSTATS_CMD_GET,  	.doit		= taskstats_user_cmd,  	.policy		= taskstats_cmd_get_policy, +	.flags		= GENL_ADMIN_PERM,  };  static struct genl_ops cgroupstats_ops = { diff --git a/kernel/tsacct.c b/kernel/tsacct.c index 24dc60d9fa1..5bbfac85866 100644 --- a/kernel/tsacct.c +++ b/kernel/tsacct.c @@ -78,6 +78,7 @@ void bacct_add_tsk(struct taskstats *stats, struct task_struct *tsk)  #define KB 1024  #define MB (1024*KB) +#define KB_MASK (~(KB-1))  /*   * fill in extended accounting fields   */ @@ -95,14 +96,14 @@ void xacct_add_tsk(struct taskstats *stats, struct task_struct *p)  		stats->hiwater_vm    = get_mm_hiwater_vm(mm)  * PAGE_SIZE / KB;  		mmput(mm);  	} -	stats->read_char	= p->ioac.rchar; -	stats->write_char	= p->ioac.wchar; -	stats->read_syscalls	= p->ioac.syscr; -	stats->write_syscalls	= p->ioac.syscw; +	stats->read_char	= p->ioac.rchar & KB_MASK; +	stats->write_char	= p->ioac.wchar & KB_MASK; +	stats->read_syscalls	= p->ioac.syscr & KB_MASK; +	stats->write_syscalls	= p->ioac.syscw & KB_MASK;  #ifdef CONFIG_TASK_IO_ACCOUNTING -	stats->read_bytes	= p->ioac.read_bytes; -	stats->write_bytes	= p->ioac.write_bytes; -	stats->cancelled_write_bytes = p->ioac.cancelled_write_bytes; +	stats->read_bytes	= p->ioac.read_bytes & KB_MASK; +	stats->write_bytes	= p->ioac.write_bytes & KB_MASK; +	stats->cancelled_write_bytes = p->ioac.cancelled_write_bytes & KB_MASK;  #else  	stats->read_bytes	= 0;  	stats->write_bytes	= 0; diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 25fb1b0e53f..1783aabc612 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -2412,8 +2412,13 @@ reflush:  	for_each_cwq_cpu(cpu, wq) {  		struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); +		bool drained; -		if (!cwq->nr_active && list_empty(&cwq->delayed_works)) +		spin_lock_irq(&cwq->gcwq->lock); +		drained = !cwq->nr_active && list_empty(&cwq->delayed_works); +		spin_unlock_irq(&cwq->gcwq->lock); + +		if (drained)  			continue;  		if (++flush_cnt == 10 || diff --git a/lib/sha1.c b/lib/sha1.c index f33271dd00c..1de509a159c 100644 --- a/lib/sha1.c +++ b/lib/sha1.c @@ -8,6 +8,7 @@  #include <linux/kernel.h>  #include <linux/module.h>  #include <linux/bitops.h> +#include <linux/cryptohash.h>  #include <asm/unaligned.h>  /* diff --git a/lib/xz/xz_dec_bcj.c b/lib/xz/xz_dec_bcj.c index e51e2558ca9..a768e6d28bb 100644 --- a/lib/xz/xz_dec_bcj.c +++ b/lib/xz/xz_dec_bcj.c @@ -441,8 +441,12 @@ XZ_EXTERN enum xz_ret xz_dec_bcj_run(struct xz_dec_bcj *s,  	 * next filter in the chain. Apply the BCJ filter on the new data  	 * in the output buffer. If everything cannot be filtered, copy it  	 * to temp and rewind the output buffer position accordingly. +	 * +	 * This needs to be always run when temp.size == 0 to handle a special +	 * case where the output buffer is full and the next filter has no +	 * more output coming but hasn't returned XZ_STREAM_END yet.  	 */ -	if (s->temp.size < b->out_size - b->out_pos) { +	if (s->temp.size < b->out_size - b->out_pos || s->temp.size == 0) {  		out_start = b->out_pos;  		memcpy(b->out + b->out_pos, s->temp.buf, s->temp.size);  		b->out_pos += s->temp.size; @@ -465,16 +469,25 @@ XZ_EXTERN enum xz_ret xz_dec_bcj_run(struct xz_dec_bcj *s,  		s->temp.size = b->out_pos - out_start;  		b->out_pos -= s->temp.size;  		memcpy(s->temp.buf, b->out + b->out_pos, s->temp.size); + +		/* +		 * If there wasn't enough input to the next filter to fill +		 * the output buffer with unfiltered data, there's no point +		 * to try decoding more data to temp. +		 */ +		if (b->out_pos + s->temp.size < b->out_size) +			return XZ_OK;  	}  	/* -	 * If we have unfiltered data in temp, try to fill by decoding more -	 * data from the next filter. Apply the BCJ filter on temp. Then we -	 * hopefully can fill the actual output buffer by copying filtered -	 * data from temp. A mix of filtered and unfiltered data may be left -	 * in temp; it will be taken care on the next call to this function. +	 * We have unfiltered data in temp. If the output buffer isn't full +	 * yet, try to fill the temp buffer by decoding more data from the +	 * next filter. Apply the BCJ filter on temp. Then we hopefully can +	 * fill the actual output buffer by copying filtered data from temp. +	 * A mix of filtered and unfiltered data may be left in temp; it will +	 * be taken care on the next call to this function.  	 */ -	if (s->temp.size > 0) { +	if (b->out_pos < b->out_size) {  		/* Make b->out{,_pos,_size} temporarily point to s->temp. */  		s->out = b->out;  		s->out_pos = b->out_pos; diff --git a/mm/backing-dev.c b/mm/backing-dev.c index d6edf8d14f9..a87da524a4a 100644 --- a/mm/backing-dev.c +++ b/mm/backing-dev.c @@ -359,6 +359,17 @@ static unsigned long bdi_longest_inactive(void)  	return max(5UL * 60 * HZ, interval);  } +/* + * Clear pending bit and wakeup anybody waiting for flusher thread creation or + * shutdown + */ +static void bdi_clear_pending(struct backing_dev_info *bdi) +{ +	clear_bit(BDI_pending, &bdi->state); +	smp_mb__after_clear_bit(); +	wake_up_bit(&bdi->state, BDI_pending); +} +  static int bdi_forker_thread(void *ptr)  {  	struct bdi_writeback *me = ptr; @@ -390,6 +401,13 @@ static int bdi_forker_thread(void *ptr)  		}  		spin_lock_bh(&bdi_lock); +		/* +		 * In the following loop we are going to check whether we have +		 * some work to do without any synchronization with tasks +		 * waking us up to do work for them. So we have to set task +		 * state already here so that we don't miss wakeups coming +		 * after we verify some condition. +		 */  		set_current_state(TASK_INTERRUPTIBLE);  		list_for_each_entry(bdi, &bdi_list, bdi_list) { @@ -469,11 +487,13 @@ static int bdi_forker_thread(void *ptr)  				spin_unlock_bh(&bdi->wb_lock);  				wake_up_process(task);  			} +			bdi_clear_pending(bdi);  			break;  		case KILL_THREAD:  			__set_current_state(TASK_RUNNING);  			kthread_stop(task); +			bdi_clear_pending(bdi);  			break;  		case NO_ACTION: @@ -489,16 +509,8 @@ static int bdi_forker_thread(void *ptr)  			else  				schedule_timeout(msecs_to_jiffies(dirty_writeback_interval * 10));  			try_to_freeze(); -			/* Back to the main loop */ -			continue; +			break;  		} - -		/* -		 * Clear pending bit and wakeup anybody waiting to tear us down. -		 */ -		clear_bit(BDI_pending, &bdi->state); -		smp_mb__after_clear_bit(); -		wake_up_bit(&bdi->state, BDI_pending);  	}  	return 0; diff --git a/mm/filemap.c b/mm/filemap.c index 645a080ba4d..7771871fa35 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -827,13 +827,14 @@ unsigned find_get_pages(struct address_space *mapping, pgoff_t start,  {  	unsigned int i;  	unsigned int ret; -	unsigned int nr_found; +	unsigned int nr_found, nr_skip;  	rcu_read_lock();  restart:  	nr_found = radix_tree_gang_lookup_slot(&mapping->page_tree,  				(void ***)pages, NULL, start, nr_pages);  	ret = 0; +	nr_skip = 0;  	for (i = 0; i < nr_found; i++) {  		struct page *page;  repeat: @@ -856,6 +857,7 @@ repeat:  			 * here as an exceptional entry: so skip over it -  			 * we only reach this from invalidate_mapping_pages().  			 */ +			nr_skip++;  			continue;  		} @@ -876,7 +878,7 @@ repeat:  	 * If all entries were removed before we could secure them,  	 * try again, because callers stop trying once 0 is returned.  	 */ -	if (unlikely(!ret && nr_found)) +	if (unlikely(!ret && nr_found > nr_skip))  		goto restart;  	rcu_read_unlock();  	return ret; diff --git a/mm/memcontrol.c b/mm/memcontrol.c index ebd1e86bef1..3508777837c 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -204,50 +204,6 @@ struct mem_cgroup_eventfd_list {  static void mem_cgroup_threshold(struct mem_cgroup *mem);  static void mem_cgroup_oom_notify(struct mem_cgroup *mem); -enum { -	SCAN_BY_LIMIT, -	SCAN_BY_SYSTEM, -	NR_SCAN_CONTEXT, -	SCAN_BY_SHRINK,	/* not recorded now */ -}; - -enum { -	SCAN, -	SCAN_ANON, -	SCAN_FILE, -	ROTATE, -	ROTATE_ANON, -	ROTATE_FILE, -	FREED, -	FREED_ANON, -	FREED_FILE, -	ELAPSED, -	NR_SCANSTATS, -}; - -struct scanstat { -	spinlock_t	lock; -	unsigned long	stats[NR_SCAN_CONTEXT][NR_SCANSTATS]; -	unsigned long	rootstats[NR_SCAN_CONTEXT][NR_SCANSTATS]; -}; - -const char *scanstat_string[NR_SCANSTATS] = { -	"scanned_pages", -	"scanned_anon_pages", -	"scanned_file_pages", -	"rotated_pages", -	"rotated_anon_pages", -	"rotated_file_pages", -	"freed_pages", -	"freed_anon_pages", -	"freed_file_pages", -	"elapsed_ns", -}; -#define SCANSTAT_WORD_LIMIT	"_by_limit" -#define SCANSTAT_WORD_SYSTEM	"_by_system" -#define SCANSTAT_WORD_HIERARCHY	"_under_hierarchy" - -  /*   * The memory controller data structure. The memory controller controls both   * page cache and RSS per cgroup. We would eventually like to provide @@ -313,8 +269,7 @@ struct mem_cgroup {  	/* For oom notifier event fd */  	struct list_head oom_notify; -	/* For recording LRU-scan statistics */ -	struct scanstat scanstat; +  	/*  	 * Should we move charges of a task when a task is moved into this  	 * mem_cgroup ? And what type of charges should we move ? @@ -1678,44 +1633,6 @@ bool mem_cgroup_reclaimable(struct mem_cgroup *mem, bool noswap)  }  #endif -static void __mem_cgroup_record_scanstat(unsigned long *stats, -			   struct memcg_scanrecord *rec) -{ - -	stats[SCAN] += rec->nr_scanned[0] + rec->nr_scanned[1]; -	stats[SCAN_ANON] += rec->nr_scanned[0]; -	stats[SCAN_FILE] += rec->nr_scanned[1]; - -	stats[ROTATE] += rec->nr_rotated[0] + rec->nr_rotated[1]; -	stats[ROTATE_ANON] += rec->nr_rotated[0]; -	stats[ROTATE_FILE] += rec->nr_rotated[1]; - -	stats[FREED] += rec->nr_freed[0] + rec->nr_freed[1]; -	stats[FREED_ANON] += rec->nr_freed[0]; -	stats[FREED_FILE] += rec->nr_freed[1]; - -	stats[ELAPSED] += rec->elapsed; -} - -static void mem_cgroup_record_scanstat(struct memcg_scanrecord *rec) -{ -	struct mem_cgroup *mem; -	int context = rec->context; - -	if (context >= NR_SCAN_CONTEXT) -		return; - -	mem = rec->mem; -	spin_lock(&mem->scanstat.lock); -	__mem_cgroup_record_scanstat(mem->scanstat.stats[context], rec); -	spin_unlock(&mem->scanstat.lock); - -	mem = rec->root; -	spin_lock(&mem->scanstat.lock); -	__mem_cgroup_record_scanstat(mem->scanstat.rootstats[context], rec); -	spin_unlock(&mem->scanstat.lock); -} -  /*   * Scan the hierarchy if needed to reclaim memory. We remember the last child   * we reclaimed from, so that we don't end up penalizing one child extensively @@ -1740,9 +1657,8 @@ static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem,  	bool noswap = reclaim_options & MEM_CGROUP_RECLAIM_NOSWAP;  	bool shrink = reclaim_options & MEM_CGROUP_RECLAIM_SHRINK;  	bool check_soft = reclaim_options & MEM_CGROUP_RECLAIM_SOFT; -	struct memcg_scanrecord rec;  	unsigned long excess; -	unsigned long scanned; +	unsigned long nr_scanned;  	excess = res_counter_soft_limit_excess(&root_mem->res) >> PAGE_SHIFT; @@ -1750,15 +1666,6 @@ static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem,  	if (!check_soft && !shrink && root_mem->memsw_is_minimum)  		noswap = true; -	if (shrink) -		rec.context = SCAN_BY_SHRINK; -	else if (check_soft) -		rec.context = SCAN_BY_SYSTEM; -	else -		rec.context = SCAN_BY_LIMIT; - -	rec.root = root_mem; -  	while (1) {  		victim = mem_cgroup_select_victim(root_mem);  		if (victim == root_mem) { @@ -1799,23 +1706,14 @@ static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem,  			css_put(&victim->css);  			continue;  		} -		rec.mem = victim; -		rec.nr_scanned[0] = 0; -		rec.nr_scanned[1] = 0; -		rec.nr_rotated[0] = 0; -		rec.nr_rotated[1] = 0; -		rec.nr_freed[0] = 0; -		rec.nr_freed[1] = 0; -		rec.elapsed = 0;  		/* we use swappiness of local cgroup */  		if (check_soft) {  			ret = mem_cgroup_shrink_node_zone(victim, gfp_mask, -				noswap, zone, &rec, &scanned); -			*total_scanned += scanned; +				noswap, zone, &nr_scanned); +			*total_scanned += nr_scanned;  		} else  			ret = try_to_free_mem_cgroup_pages(victim, gfp_mask, -						noswap, &rec); -		mem_cgroup_record_scanstat(&rec); +						noswap);  		css_put(&victim->css);  		/*  		 * At shrinking usage, we can't check we should stop here or @@ -3854,18 +3752,14 @@ try_to_free:  	/* try to free all pages in this cgroup */  	shrink = 1;  	while (nr_retries && mem->res.usage > 0) { -		struct memcg_scanrecord rec;  		int progress;  		if (signal_pending(current)) {  			ret = -EINTR;  			goto out;  		} -		rec.context = SCAN_BY_SHRINK; -		rec.mem = mem; -		rec.root = mem;  		progress = try_to_free_mem_cgroup_pages(mem, GFP_KERNEL, -						false, &rec); +						false);  		if (!progress) {  			nr_retries--;  			/* maybe some writeback is necessary */ @@ -4709,54 +4603,6 @@ static int mem_control_numa_stat_open(struct inode *unused, struct file *file)  }  #endif /* CONFIG_NUMA */ -static int mem_cgroup_vmscan_stat_read(struct cgroup *cgrp, -				struct cftype *cft, -				struct cgroup_map_cb *cb) -{ -	struct mem_cgroup *mem = mem_cgroup_from_cont(cgrp); -	char string[64]; -	int i; - -	for (i = 0; i < NR_SCANSTATS; i++) { -		strcpy(string, scanstat_string[i]); -		strcat(string, SCANSTAT_WORD_LIMIT); -		cb->fill(cb, string,  mem->scanstat.stats[SCAN_BY_LIMIT][i]); -	} - -	for (i = 0; i < NR_SCANSTATS; i++) { -		strcpy(string, scanstat_string[i]); -		strcat(string, SCANSTAT_WORD_SYSTEM); -		cb->fill(cb, string,  mem->scanstat.stats[SCAN_BY_SYSTEM][i]); -	} - -	for (i = 0; i < NR_SCANSTATS; i++) { -		strcpy(string, scanstat_string[i]); -		strcat(string, SCANSTAT_WORD_LIMIT); -		strcat(string, SCANSTAT_WORD_HIERARCHY); -		cb->fill(cb, string,  mem->scanstat.rootstats[SCAN_BY_LIMIT][i]); -	} -	for (i = 0; i < NR_SCANSTATS; i++) { -		strcpy(string, scanstat_string[i]); -		strcat(string, SCANSTAT_WORD_SYSTEM); -		strcat(string, SCANSTAT_WORD_HIERARCHY); -		cb->fill(cb, string,  mem->scanstat.rootstats[SCAN_BY_SYSTEM][i]); -	} -	return 0; -} - -static int mem_cgroup_reset_vmscan_stat(struct cgroup *cgrp, -				unsigned int event) -{ -	struct mem_cgroup *mem = mem_cgroup_from_cont(cgrp); - -	spin_lock(&mem->scanstat.lock); -	memset(&mem->scanstat.stats, 0, sizeof(mem->scanstat.stats)); -	memset(&mem->scanstat.rootstats, 0, sizeof(mem->scanstat.rootstats)); -	spin_unlock(&mem->scanstat.lock); -	return 0; -} - -  static struct cftype mem_cgroup_files[] = {  	{  		.name = "usage_in_bytes", @@ -4827,11 +4673,6 @@ static struct cftype mem_cgroup_files[] = {  		.mode = S_IRUGO,  	},  #endif -	{ -		.name = "vmscan_stat", -		.read_map = mem_cgroup_vmscan_stat_read, -		.trigger = mem_cgroup_reset_vmscan_stat, -	},  };  #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP @@ -5095,7 +4936,6 @@ mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)  	atomic_set(&mem->refcnt, 1);  	mem->move_charge_at_immigrate = 0;  	mutex_init(&mem->thresholds_lock); -	spin_lock_init(&mem->scanstat.lock);  	return &mem->css;  free_out:  	__mem_cgroup_free(mem); diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 8b57173c1dd..9c51f9f58ca 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -636,7 +636,6 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,  	struct vm_area_struct *prev;  	struct vm_area_struct *vma;  	int err = 0; -	pgoff_t pgoff;  	unsigned long vmstart;  	unsigned long vmend; @@ -649,9 +648,9 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,  		vmstart = max(start, vma->vm_start);  		vmend   = min(end, vma->vm_end); -		pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);  		prev = vma_merge(mm, prev, vmstart, vmend, vma->vm_flags, -				  vma->anon_vma, vma->vm_file, pgoff, new_pol); +				  vma->anon_vma, vma->vm_file, vma->vm_pgoff, +				  new_pol);  		if (prev) {  			vma = prev;  			next = vma->vm_next; @@ -1412,7 +1411,9 @@ asmlinkage long compat_sys_get_mempolicy(int __user *policy,  	err = sys_get_mempolicy(policy, nm, nr_bits+1, addr, flags);  	if (!err && nmask) { -		err = copy_from_user(bm, nm, alloc_size); +		unsigned long copy_size; +		copy_size = min_t(unsigned long, sizeof(bm), alloc_size); +		err = copy_from_user(bm, nm, copy_size);  		/* ensure entire bitmap is zeroed */  		err |= clear_user(nmask, ALIGN(maxnode-1, 8) / 8);  		err |= compat_put_bitmap(nmask, bm, nr_bits); diff --git a/mm/slub.c b/mm/slub.c index 9f662d70eb4..7c54fe83a90 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -2377,7 +2377,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,  		 */  		if (unlikely(!prior)) {  			remove_full(s, page); -			add_partial(n, page, 0); +			add_partial(n, page, 1);  			stat(s, FREE_ADD_PARTIAL);  		}  	} diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 7ef0903058e..5016f19e166 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -2140,6 +2140,14 @@ struct vm_struct *alloc_vm_area(size_t size)  		return NULL;  	} +	/* +	 * If the allocated address space is passed to a hypercall +	 * before being used then we cannot rely on a page fault to +	 * trigger an update of the page tables.  So sync all the page +	 * tables here. +	 */ +	vmalloc_sync_all(); +  	return area;  }  EXPORT_SYMBOL_GPL(alloc_vm_area); diff --git a/mm/vmscan.c b/mm/vmscan.c index b7719ec10dc..b55699cd906 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -105,7 +105,6 @@ struct scan_control {  	/* Which cgroup do we reclaim from */  	struct mem_cgroup *mem_cgroup; -	struct memcg_scanrecord *memcg_record;  	/*  	 * Nodemask of nodes allowed by the caller. If NULL, all nodes @@ -1349,8 +1348,6 @@ putback_lru_pages(struct zone *zone, struct scan_control *sc,  			int file = is_file_lru(lru);  			int numpages = hpage_nr_pages(page);  			reclaim_stat->recent_rotated[file] += numpages; -			if (!scanning_global_lru(sc)) -				sc->memcg_record->nr_rotated[file] += numpages;  		}  		if (!pagevec_add(&pvec, page)) {  			spin_unlock_irq(&zone->lru_lock); @@ -1394,10 +1391,6 @@ static noinline_for_stack void update_isolated_counts(struct zone *zone,  	reclaim_stat->recent_scanned[0] += *nr_anon;  	reclaim_stat->recent_scanned[1] += *nr_file; -	if (!scanning_global_lru(sc)) { -		sc->memcg_record->nr_scanned[0] += *nr_anon; -		sc->memcg_record->nr_scanned[1] += *nr_file; -	}  }  /* @@ -1511,9 +1504,6 @@ shrink_inactive_list(unsigned long nr_to_scan, struct zone *zone,  		nr_reclaimed += shrink_page_list(&page_list, zone, sc);  	} -	if (!scanning_global_lru(sc)) -		sc->memcg_record->nr_freed[file] += nr_reclaimed; -  	local_irq_disable();  	if (current_is_kswapd())  		__count_vm_events(KSWAPD_STEAL, nr_reclaimed); @@ -1613,8 +1603,6 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,  	}  	reclaim_stat->recent_scanned[file] += nr_taken; -	if (!scanning_global_lru(sc)) -		sc->memcg_record->nr_scanned[file] += nr_taken;  	__count_zone_vm_events(PGREFILL, zone, pgscanned);  	if (file) @@ -1666,8 +1654,6 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,  	 * get_scan_ratio.  	 */  	reclaim_stat->recent_rotated[file] += nr_rotated; -	if (!scanning_global_lru(sc)) -		sc->memcg_record->nr_rotated[file] += nr_rotated;  	move_active_pages_to_lru(zone, &l_active,  						LRU_ACTIVE + file * LRU_FILE); @@ -1808,23 +1794,15 @@ static void get_scan_count(struct zone *zone, struct scan_control *sc,  	u64 fraction[2], denominator;  	enum lru_list l;  	int noswap = 0; -	int force_scan = 0; +	bool force_scan = false;  	unsigned long nr_force_scan[2]; - -	anon  = zone_nr_lru_pages(zone, sc, LRU_ACTIVE_ANON) + -		zone_nr_lru_pages(zone, sc, LRU_INACTIVE_ANON); -	file  = zone_nr_lru_pages(zone, sc, LRU_ACTIVE_FILE) + -		zone_nr_lru_pages(zone, sc, LRU_INACTIVE_FILE); - -	if (((anon + file) >> priority) < SWAP_CLUSTER_MAX) { -		/* kswapd does zone balancing and need to scan this zone */ -		if (scanning_global_lru(sc) && current_is_kswapd()) -			force_scan = 1; -		/* memcg may have small limit and need to avoid priority drop */ -		if (!scanning_global_lru(sc)) -			force_scan = 1; -	} +	/* kswapd does zone balancing and needs to scan this zone */ +	if (scanning_global_lru(sc) && current_is_kswapd()) +		force_scan = true; +	/* memcg may have small limit and need to avoid priority drop */ +	if (!scanning_global_lru(sc)) +		force_scan = true;  	/* If we have no swap space, do not bother scanning anon pages. */  	if (!sc->may_swap || (nr_swap_pages <= 0)) { @@ -1837,6 +1815,11 @@ static void get_scan_count(struct zone *zone, struct scan_control *sc,  		goto out;  	} +	anon  = zone_nr_lru_pages(zone, sc, LRU_ACTIVE_ANON) + +		zone_nr_lru_pages(zone, sc, LRU_INACTIVE_ANON); +	file  = zone_nr_lru_pages(zone, sc, LRU_ACTIVE_FILE) + +		zone_nr_lru_pages(zone, sc, LRU_INACTIVE_FILE); +  	if (scanning_global_lru(sc)) {  		free  = zone_page_state(zone, NR_FREE_PAGES);  		/* If we have very few page cache pages, @@ -2268,10 +2251,9 @@ unsigned long try_to_free_pages(struct zonelist *zonelist, int order,  #ifdef CONFIG_CGROUP_MEM_RES_CTLR  unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem, -					gfp_t gfp_mask, bool noswap, -					struct zone *zone, -					struct memcg_scanrecord *rec, -					unsigned long *scanned) +						gfp_t gfp_mask, bool noswap, +						struct zone *zone, +						unsigned long *nr_scanned)  {  	struct scan_control sc = {  		.nr_scanned = 0, @@ -2281,9 +2263,7 @@ unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem,  		.may_swap = !noswap,  		.order = 0,  		.mem_cgroup = mem, -		.memcg_record = rec,  	}; -	ktime_t start, end;  	sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |  			(GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK); @@ -2292,7 +2272,6 @@ unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem,  						      sc.may_writepage,  						      sc.gfp_mask); -	start = ktime_get();  	/*  	 * NOTE: Although we can get the priority field, using it  	 * here is not a good idea, since it limits the pages we can scan. @@ -2301,25 +2280,19 @@ unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem,  	 * the priority and make it zero.  	 */  	shrink_zone(0, zone, &sc); -	end = ktime_get(); - -	if (rec) -		rec->elapsed += ktime_to_ns(ktime_sub(end, start)); -	*scanned = sc.nr_scanned;  	trace_mm_vmscan_memcg_softlimit_reclaim_end(sc.nr_reclaimed); +	*nr_scanned = sc.nr_scanned;  	return sc.nr_reclaimed;  }  unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont,  					   gfp_t gfp_mask, -					   bool noswap, -					   struct memcg_scanrecord *rec) +					   bool noswap)  {  	struct zonelist *zonelist;  	unsigned long nr_reclaimed; -	ktime_t start, end;  	int nid;  	struct scan_control sc = {  		.may_writepage = !laptop_mode, @@ -2328,7 +2301,6 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont,  		.nr_to_reclaim = SWAP_CLUSTER_MAX,  		.order = 0,  		.mem_cgroup = mem_cont, -		.memcg_record = rec,  		.nodemask = NULL, /* we don't care the placement */  		.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |  				(GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK), @@ -2337,7 +2309,6 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont,  		.gfp_mask = sc.gfp_mask,  	}; -	start = ktime_get();  	/*  	 * Unlike direct reclaim via alloc_pages(), memcg's reclaim doesn't  	 * take care of from where we get pages. So the node where we start the @@ -2352,9 +2323,6 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont,  					    sc.gfp_mask);  	nr_reclaimed = do_try_to_free_pages(zonelist, &sc, &shrink); -	end = ktime_get(); -	if (rec) -		rec->elapsed += ktime_to_ns(ktime_sub(end, start));  	trace_mm_vmscan_memcg_reclaim_end(nr_reclaimed); diff --git a/mm/vmstat.c b/mm/vmstat.c index 20c18b7694b..d52b13d28e8 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c @@ -659,7 +659,7 @@ static void walk_zones_in_node(struct seq_file *m, pg_data_t *pgdat,  }  #endif -#if defined(CONFIG_PROC_FS) || defined(CONFIG_SYSFS) +#if defined(CONFIG_PROC_FS) || defined(CONFIG_SYSFS) || defined(CONFIG_NUMA)  #ifdef CONFIG_ZONE_DMA  #define TEXT_FOR_DMA(xx) xx "_dma",  #else @@ -788,7 +788,7 @@ const char * const vmstat_text[] = {  #endif /* CONFIG_VM_EVENTS_COUNTERS */  }; -#endif /* CONFIG_PROC_FS || CONFIG_SYSFS */ +#endif /* CONFIG_PROC_FS || CONFIG_SYSFS || CONFIG_NUMA */  #ifdef CONFIG_PROC_FS diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c index a40170e022e..7ef4eb4435f 100644 --- a/net/bluetooth/hci_event.c +++ b/net/bluetooth/hci_event.c @@ -58,8 +58,8 @@ static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)  	if (status)  		return; -	if (test_bit(HCI_MGMT, &hdev->flags) && -				test_and_clear_bit(HCI_INQUIRY, &hdev->flags)) +	if (test_and_clear_bit(HCI_INQUIRY, &hdev->flags) && +			test_bit(HCI_MGMT, &hdev->flags))  		mgmt_discovering(hdev->id, 0);  	hci_req_complete(hdev, HCI_OP_INQUIRY_CANCEL, status); @@ -76,8 +76,8 @@ static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)  	if (status)  		return; -	if (test_bit(HCI_MGMT, &hdev->flags) && -				test_and_clear_bit(HCI_INQUIRY, &hdev->flags)) +	if (test_and_clear_bit(HCI_INQUIRY, &hdev->flags) && +				test_bit(HCI_MGMT, &hdev->flags))  		mgmt_discovering(hdev->id, 0);  	hci_conn_check_pending(hdev); @@ -959,9 +959,8 @@ static inline void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)  		return;  	} -	if (test_bit(HCI_MGMT, &hdev->flags) && -					!test_and_set_bit(HCI_INQUIRY, -							&hdev->flags)) +	if (!test_and_set_bit(HCI_INQUIRY, &hdev->flags) && +				test_bit(HCI_MGMT, &hdev->flags))  		mgmt_discovering(hdev->id, 1);  } @@ -1340,8 +1339,8 @@ static inline void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff  	BT_DBG("%s status %d", hdev->name, status); -	if (test_bit(HCI_MGMT, &hdev->flags) && -				test_and_clear_bit(HCI_INQUIRY, &hdev->flags)) +	if (test_and_clear_bit(HCI_INQUIRY, &hdev->flags) && +				test_bit(HCI_MGMT, &hdev->flags))  		mgmt_discovering(hdev->id, 0);  	hci_req_complete(hdev, HCI_OP_INQUIRY, status); diff --git a/net/bridge/netfilter/Kconfig b/net/bridge/netfilter/Kconfig index ba6f73eb06c..a9aff9c7d02 100644 --- a/net/bridge/netfilter/Kconfig +++ b/net/bridge/netfilter/Kconfig @@ -4,7 +4,7 @@  menuconfig BRIDGE_NF_EBTABLES  	tristate "Ethernet Bridge tables (ebtables) support" -	depends on BRIDGE && BRIDGE_NETFILTER +	depends on BRIDGE && NETFILTER  	select NETFILTER_XTABLES  	help  	  ebtables is a general, extensible frame/packet identification diff --git a/net/caif/caif_dev.c b/net/caif/caif_dev.c index 7c2fa0a0814..7f9ac0742d1 100644 --- a/net/caif/caif_dev.c +++ b/net/caif/caif_dev.c @@ -93,10 +93,14 @@ static struct caif_device_entry *caif_device_alloc(struct net_device *dev)  	caifdevs = caif_device_list(dev_net(dev));  	BUG_ON(!caifdevs); -	caifd = kzalloc(sizeof(*caifd), GFP_ATOMIC); +	caifd = kzalloc(sizeof(*caifd), GFP_KERNEL);  	if (!caifd)  		return NULL;  	caifd->pcpu_refcnt = alloc_percpu(int); +	if (!caifd->pcpu_refcnt) { +		kfree(caifd); +		return NULL; +	}  	caifd->netdev = dev;  	dev_hold(dev);  	return caifd; diff --git a/net/can/af_can.c b/net/can/af_can.c index 8ce926d3b2c..9b0c32a2690 100644 --- a/net/can/af_can.c +++ b/net/can/af_can.c @@ -857,7 +857,7 @@ static __exit void can_exit(void)  	struct net_device *dev;  	if (stats_timer) -		del_timer(&can_stattimer); +		del_timer_sync(&can_stattimer);  	can_remove_proc(); diff --git a/net/core/dev.c b/net/core/dev.c index 17d67b579be..b10ff0a7185 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -1515,6 +1515,14 @@ static inline bool is_skb_forwardable(struct net_device *dev,   */  int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)  { +	if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) { +		if (skb_copy_ubufs(skb, GFP_ATOMIC)) { +			atomic_long_inc(&dev->rx_dropped); +			kfree_skb(skb); +			return NET_RX_DROP; +		} +	} +  	skb_orphan(skb);  	nf_reset(skb); diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c index e7ab0c0285b..3231b468bb7 100644 --- a/net/core/fib_rules.c +++ b/net/core/fib_rules.c @@ -384,8 +384,8 @@ static int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)  		 */  		list_for_each_entry(r, &ops->rules_list, list) {  			if (r->action == FR_ACT_GOTO && -			    r->target == rule->pref) { -				BUG_ON(rtnl_dereference(r->ctarget) != NULL); +			    r->target == rule->pref && +			    rtnl_dereference(r->ctarget) == NULL) {  				rcu_assign_pointer(r->ctarget, rule);  				if (--ops->unresolved_rules == 0)  					break; diff --git a/net/core/flow.c b/net/core/flow.c index bf32c33cad3..555a456efb0 100644 --- a/net/core/flow.c +++ b/net/core/flow.c @@ -30,6 +30,7 @@ struct flow_cache_entry {  		struct hlist_node	hlist;  		struct list_head	gc_list;  	} u; +	struct net			*net;  	u16				family;  	u8				dir;  	u32				genid; @@ -172,29 +173,26 @@ static void flow_new_hash_rnd(struct flow_cache *fc,  static u32 flow_hash_code(struct flow_cache *fc,  			  struct flow_cache_percpu *fcp, -			  const struct flowi *key) +			  const struct flowi *key, +			  size_t keysize)  {  	const u32 *k = (const u32 *) key; +	const u32 length = keysize * sizeof(flow_compare_t) / sizeof(u32); -	return jhash2(k, (sizeof(*key) / sizeof(u32)), fcp->hash_rnd) +	return jhash2(k, length, fcp->hash_rnd)  		& (flow_cache_hash_size(fc) - 1);  } -typedef unsigned long flow_compare_t; -  /* I hear what you're saying, use memcmp.  But memcmp cannot make - * important assumptions that we can here, such as alignment and - * constant size. + * important assumptions that we can here, such as alignment.   */ -static int flow_key_compare(const struct flowi *key1, const struct flowi *key2) +static int flow_key_compare(const struct flowi *key1, const struct flowi *key2, +			    size_t keysize)  {  	const flow_compare_t *k1, *k1_lim, *k2; -	const int n_elem = sizeof(struct flowi) / sizeof(flow_compare_t); - -	BUILD_BUG_ON(sizeof(struct flowi) % sizeof(flow_compare_t));  	k1 = (const flow_compare_t *) key1; -	k1_lim = k1 + n_elem; +	k1_lim = k1 + keysize;  	k2 = (const flow_compare_t *) key2; @@ -215,6 +213,7 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,  	struct flow_cache_entry *fle, *tfle;  	struct hlist_node *entry;  	struct flow_cache_object *flo; +	size_t keysize;  	unsigned int hash;  	local_bh_disable(); @@ -222,6 +221,11 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,  	fle = NULL;  	flo = NULL; + +	keysize = flow_key_size(family); +	if (!keysize) +		goto nocache; +  	/* Packet really early in init?  Making flow_cache_init a  	 * pre-smp initcall would solve this.  --RR */  	if (!fcp->hash_table) @@ -230,11 +234,12 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,  	if (fcp->hash_rnd_recalc)  		flow_new_hash_rnd(fc, fcp); -	hash = flow_hash_code(fc, fcp, key); +	hash = flow_hash_code(fc, fcp, key, keysize);  	hlist_for_each_entry(tfle, entry, &fcp->hash_table[hash], u.hlist) { -		if (tfle->family == family && +		if (tfle->net == net && +		    tfle->family == family &&  		    tfle->dir == dir && -		    flow_key_compare(key, &tfle->key) == 0) { +		    flow_key_compare(key, &tfle->key, keysize) == 0) {  			fle = tfle;  			break;  		} @@ -246,9 +251,10 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,  		fle = kmem_cache_alloc(flow_cachep, GFP_ATOMIC);  		if (fle) { +			fle->net = net;  			fle->family = family;  			fle->dir = dir; -			memcpy(&fle->key, key, sizeof(*key)); +			memcpy(&fle->key, key, keysize * sizeof(flow_compare_t));  			fle->object = NULL;  			hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]);  			fcp->hash_count++; diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 27002dffe7e..387703f56fc 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -611,8 +611,21 @@ struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src)  }  EXPORT_SYMBOL_GPL(skb_morph); -/* skb frags copy userspace buffers to kernel */ -static int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask) +/*	skb_copy_ubufs	-	copy userspace skb frags buffers to kernel + *	@skb: the skb to modify + *	@gfp_mask: allocation priority + * + *	This must be called on SKBTX_DEV_ZEROCOPY skb. + *	It will copy all frags into kernel and drop the reference + *	to userspace pages. + * + *	If this function is called from an interrupt gfp_mask() must be + *	%GFP_ATOMIC. + * + *	Returns 0 on success or a negative error code on failure + *	to allocate kernel memory to copy to. + */ +int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask)  {  	int i;  	int num_frags = skb_shinfo(skb)->nr_frags; @@ -652,6 +665,8 @@ static int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask)  		skb_shinfo(skb)->frags[i - 1].page = head;  		head = (struct page *)head->private;  	} + +	skb_shinfo(skb)->tx_flags &= ~SKBTX_DEV_ZEROCOPY;  	return 0;  } @@ -677,7 +692,6 @@ struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask)  	if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {  		if (skb_copy_ubufs(skb, gfp_mask))  			return NULL; -		skb_shinfo(skb)->tx_flags &= ~SKBTX_DEV_ZEROCOPY;  	}  	n = skb + 1; @@ -803,7 +817,6 @@ struct sk_buff *pskb_copy(struct sk_buff *skb, gfp_t gfp_mask)  				n = NULL;  				goto out;  			} -			skb_shinfo(skb)->tx_flags &= ~SKBTX_DEV_ZEROCOPY;  		}  		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {  			skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i]; @@ -896,7 +909,6 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,  		if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {  			if (skb_copy_ubufs(skb, gfp_mask))  				goto nofrags; -			skb_shinfo(skb)->tx_flags &= ~SKBTX_DEV_ZEROCOPY;  		}  		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)  			get_page(skb_shinfo(skb)->frags[i].page); diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c index 27997d35ebd..a2468363978 100644 --- a/net/ethernet/eth.c +++ b/net/ethernet/eth.c @@ -340,7 +340,7 @@ void ether_setup(struct net_device *dev)  	dev->addr_len		= ETH_ALEN;  	dev->tx_queue_len	= 1000;	/* Ethernet wants good queues */  	dev->flags		= IFF_BROADCAST|IFF_MULTICAST; -	dev->priv_flags		= IFF_TX_SKB_SHARING; +	dev->priv_flags		|= IFF_TX_SKB_SHARING;  	memset(dev->broadcast, 0xFF, ETH_ALEN); diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index 1b745d412cf..dd2b9478ddd 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c @@ -466,8 +466,13 @@ int inet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)  		goto out;  	if (addr->sin_family != AF_INET) { +		/* Compatibility games : accept AF_UNSPEC (mapped to AF_INET) +		 * only if s_addr is INADDR_ANY. +		 */  		err = -EAFNOSUPPORT; -		goto out; +		if (addr->sin_family != AF_UNSPEC || +		    addr->sin_addr.s_addr != htonl(INADDR_ANY)) +			goto out;  	}  	chk_addr_ret = inet_addr_type(sock_net(sk), addr->sin_addr.s_addr); diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c index 33e2c35b74b..80106d89d54 100644 --- a/net/ipv4/fib_semantics.c +++ b/net/ipv4/fib_semantics.c @@ -142,6 +142,14 @@ const struct fib_prop fib_props[RTN_MAX + 1] = {  };  /* Release a nexthop info record */ +static void free_fib_info_rcu(struct rcu_head *head) +{ +	struct fib_info *fi = container_of(head, struct fib_info, rcu); + +	if (fi->fib_metrics != (u32 *) dst_default_metrics) +		kfree(fi->fib_metrics); +	kfree(fi); +}  void free_fib_info(struct fib_info *fi)  { @@ -156,7 +164,7 @@ void free_fib_info(struct fib_info *fi)  	} endfor_nexthops(fi);  	fib_info_cnt--;  	release_net(fi->fib_net); -	kfree_rcu(fi, rcu); +	call_rcu(&fi->rcu, free_fib_info_rcu);  }  void fib_release_info(struct fib_info *fi) diff --git a/net/ipv4/netfilter/ip_queue.c b/net/ipv4/netfilter/ip_queue.c index 5c9b9d96391..e59aabd0eae 100644 --- a/net/ipv4/netfilter/ip_queue.c +++ b/net/ipv4/netfilter/ip_queue.c @@ -218,6 +218,7 @@ ipq_build_packet_message(struct nf_queue_entry *entry, int *errp)  	return skb;  nlmsg_failure: +	kfree_skb(skb);  	*errp = -EINVAL;  	printk(KERN_ERR "ip_queue: error creating packet message\n");  	return NULL; @@ -313,7 +314,7 @@ ipq_set_verdict(struct ipq_verdict_msg *vmsg, unsigned int len)  {  	struct nf_queue_entry *entry; -	if (vmsg->value > NF_MAX_VERDICT) +	if (vmsg->value > NF_MAX_VERDICT || vmsg->value == NF_STOLEN)  		return -EINVAL;  	entry = ipq_find_dequeue_entry(vmsg->id); @@ -358,12 +359,9 @@ ipq_receive_peer(struct ipq_peer_msg *pmsg,  		break;  	case IPQM_VERDICT: -		if (pmsg->msg.verdict.value > NF_MAX_VERDICT) -			status = -EINVAL; -		else -			status = ipq_set_verdict(&pmsg->msg.verdict, -						 len - sizeof(*pmsg)); -			break; +		status = ipq_set_verdict(&pmsg->msg.verdict, +					 len - sizeof(*pmsg)); +		break;  	default:  		status = -EINVAL;  	} diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c index b14ec7d03b6..4bfad5da94f 100644 --- a/net/ipv4/proc.c +++ b/net/ipv4/proc.c @@ -254,6 +254,8 @@ static const struct snmp_mib snmp4_net_list[] = {  	SNMP_MIB_ITEM("TCPDeferAcceptDrop", LINUX_MIB_TCPDEFERACCEPTDROP),  	SNMP_MIB_ITEM("IPReversePathFilter", LINUX_MIB_IPRPFILTER),  	SNMP_MIB_ITEM("TCPTimeWaitOverflow", LINUX_MIB_TCPTIMEWAITOVERFLOW), +	SNMP_MIB_ITEM("TCPReqQFullDoCookies", LINUX_MIB_TCPREQQFULLDOCOOKIES), +	SNMP_MIB_ITEM("TCPReqQFullDrop", LINUX_MIB_TCPREQQFULLDROP),  	SNMP_MIB_SENTINEL  }; diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index ea0d2183df4..21fab3edb92 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -1124,7 +1124,7 @@ static int tcp_is_sackblock_valid(struct tcp_sock *tp, int is_dsack,  		return 0;  	/* ...Then it's D-SACK, and must reside below snd_una completely */ -	if (!after(end_seq, tp->snd_una)) +	if (after(end_seq, tp->snd_una))  		return 0;  	if (!before(start_seq, tp->undo_marker)) diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 1c12b8ec849..c34f0151394 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -808,20 +808,38 @@ static void tcp_v4_reqsk_destructor(struct request_sock *req)  	kfree(inet_rsk(req)->opt);  } -static void syn_flood_warning(const struct sk_buff *skb) +/* + * Return 1 if a syncookie should be sent + */ +int tcp_syn_flood_action(struct sock *sk, +			 const struct sk_buff *skb, +			 const char *proto)  { -	const char *msg; +	const char *msg = "Dropping request"; +	int want_cookie = 0; +	struct listen_sock *lopt; + +  #ifdef CONFIG_SYN_COOKIES -	if (sysctl_tcp_syncookies) +	if (sysctl_tcp_syncookies) {  		msg = "Sending cookies"; -	else +		want_cookie = 1; +		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDOCOOKIES); +	} else  #endif -		msg = "Dropping request"; +		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDROP); -	pr_info("TCP: Possible SYN flooding on port %d. %s.\n", -				ntohs(tcp_hdr(skb)->dest), msg); +	lopt = inet_csk(sk)->icsk_accept_queue.listen_opt; +	if (!lopt->synflood_warned) { +		lopt->synflood_warned = 1; +		pr_info("%s: Possible SYN flooding on port %d. %s. " +			" Check SNMP counters.\n", +			proto, ntohs(tcp_hdr(skb)->dest), msg); +	} +	return want_cookie;  } +EXPORT_SYMBOL(tcp_syn_flood_action);  /*   * Save and compile IPv4 options into the request_sock if needed. @@ -1235,11 +1253,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)  	__be32 saddr = ip_hdr(skb)->saddr;  	__be32 daddr = ip_hdr(skb)->daddr;  	__u32 isn = TCP_SKB_CB(skb)->when; -#ifdef CONFIG_SYN_COOKIES  	int want_cookie = 0; -#else -#define want_cookie 0 /* Argh, why doesn't gcc optimize this :( */ -#endif  	/* Never answer to SYNs send to broadcast or multicast */  	if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) @@ -1250,14 +1264,9 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)  	 * evidently real one.  	 */  	if (inet_csk_reqsk_queue_is_full(sk) && !isn) { -		if (net_ratelimit()) -			syn_flood_warning(skb); -#ifdef CONFIG_SYN_COOKIES -		if (sysctl_tcp_syncookies) { -			want_cookie = 1; -		} else -#endif -		goto drop; +		want_cookie = tcp_syn_flood_action(sk, skb, "TCP"); +		if (!want_cookie) +			goto drop;  	}  	/* Accept backlog is full. If we have already queued enough @@ -1303,9 +1312,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)  		while (l-- > 0)  			*c++ ^= *hash_location++; -#ifdef CONFIG_SYN_COOKIES  		want_cookie = 0;	/* not our kind of cookie */ -#endif  		tmp_ext.cookie_out_never = 0; /* false */  		tmp_ext.cookie_plus = tmp_opt.cookie_plus;  	} else if (!tp->rx_opt.cookie_in_always) { diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index f012ebd87b4..12368c58606 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -374,8 +374,8 @@ static struct inet6_dev * ipv6_add_dev(struct net_device *dev)  			"%s(): cannot allocate memory for statistics; dev=%s.\n",  			__func__, dev->name));  		neigh_parms_release(&nd_tbl, ndev->nd_parms); -		ndev->dead = 1; -		in6_dev_finish_destroy(ndev); +		dev_put(dev); +		kfree(ndev);  		return NULL;  	} diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c index 9ef1831746e..b46e9f88ce3 100644 --- a/net/ipv6/datagram.c +++ b/net/ipv6/datagram.c @@ -599,7 +599,7 @@ int datagram_recv_ctl(struct sock *sk, struct msghdr *msg, struct sk_buff *skb)  	return 0;  } -int datagram_send_ctl(struct net *net, +int datagram_send_ctl(struct net *net, struct sock *sk,  		      struct msghdr *msg, struct flowi6 *fl6,  		      struct ipv6_txoptions *opt,  		      int *hlimit, int *tclass, int *dontfrag) @@ -658,7 +658,8 @@ int datagram_send_ctl(struct net *net,  			if (addr_type != IPV6_ADDR_ANY) {  				int strict = __ipv6_addr_src_scope(addr_type) <= IPV6_ADDR_SCOPE_LINKLOCAL; -				if (!ipv6_chk_addr(net, &src_info->ipi6_addr, +				if (!inet_sk(sk)->transparent && +				    !ipv6_chk_addr(net, &src_info->ipi6_addr,  						   strict ? dev : NULL, 0))  					err = -EINVAL;  				else diff --git a/net/ipv6/ip6_flowlabel.c b/net/ipv6/ip6_flowlabel.c index f3caf1b8d57..54303945019 100644 --- a/net/ipv6/ip6_flowlabel.c +++ b/net/ipv6/ip6_flowlabel.c @@ -322,8 +322,8 @@ static int fl6_renew(struct ip6_flowlabel *fl, unsigned long linger, unsigned lo  }  static struct ip6_flowlabel * -fl_create(struct net *net, struct in6_flowlabel_req *freq, char __user *optval, -	  int optlen, int *err_p) +fl_create(struct net *net, struct sock *sk, struct in6_flowlabel_req *freq, +	  char __user *optval, int optlen, int *err_p)  {  	struct ip6_flowlabel *fl = NULL;  	int olen; @@ -360,7 +360,7 @@ fl_create(struct net *net, struct in6_flowlabel_req *freq, char __user *optval,  		msg.msg_control = (void*)(fl->opt+1);  		memset(&flowi6, 0, sizeof(flowi6)); -		err = datagram_send_ctl(net, &msg, &flowi6, fl->opt, &junk, +		err = datagram_send_ctl(net, sk, &msg, &flowi6, fl->opt, &junk,  					&junk, &junk);  		if (err)  			goto done; @@ -528,7 +528,7 @@ int ipv6_flowlabel_opt(struct sock *sk, char __user *optval, int optlen)  		if (freq.flr_label & ~IPV6_FLOWLABEL_MASK)  			return -EINVAL; -		fl = fl_create(net, &freq, optval, optlen, &err); +		fl = fl_create(net, sk, &freq, optval, optlen, &err);  		if (fl == NULL)  			return err;  		sfl1 = kmalloc(sizeof(*sfl1), GFP_KERNEL); diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c index 147ede38ab4..2fbda5fc4cc 100644 --- a/net/ipv6/ipv6_sockglue.c +++ b/net/ipv6/ipv6_sockglue.c @@ -475,7 +475,7 @@ sticky_done:  		msg.msg_controllen = optlen;  		msg.msg_control = (void*)(opt+1); -		retv = datagram_send_ctl(net, &msg, &fl6, opt, &junk, &junk, +		retv = datagram_send_ctl(net, sk, &msg, &fl6, opt, &junk, &junk,  					 &junk);  		if (retv)  			goto done; diff --git a/net/ipv6/netfilter/ip6_queue.c b/net/ipv6/netfilter/ip6_queue.c index 24939486328..e63c3972a73 100644 --- a/net/ipv6/netfilter/ip6_queue.c +++ b/net/ipv6/netfilter/ip6_queue.c @@ -218,6 +218,7 @@ ipq_build_packet_message(struct nf_queue_entry *entry, int *errp)  	return skb;  nlmsg_failure: +	kfree_skb(skb);  	*errp = -EINVAL;  	printk(KERN_ERR "ip6_queue: error creating packet message\n");  	return NULL; @@ -313,7 +314,7 @@ ipq_set_verdict(struct ipq_verdict_msg *vmsg, unsigned int len)  {  	struct nf_queue_entry *entry; -	if (vmsg->value > NF_MAX_VERDICT) +	if (vmsg->value > NF_MAX_VERDICT || vmsg->value == NF_STOLEN)  		return -EINVAL;  	entry = ipq_find_dequeue_entry(vmsg->id); @@ -358,12 +359,9 @@ ipq_receive_peer(struct ipq_peer_msg *pmsg,  		break;  	case IPQM_VERDICT: -		if (pmsg->msg.verdict.value > NF_MAX_VERDICT) -			status = -EINVAL; -		else -			status = ipq_set_verdict(&pmsg->msg.verdict, -						 len - sizeof(*pmsg)); -			break; +		status = ipq_set_verdict(&pmsg->msg.verdict, +					 len - sizeof(*pmsg)); +		break;  	default:  		status = -EINVAL;  	} diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c index 6a79f3081bd..343852e5c70 100644 --- a/net/ipv6/raw.c +++ b/net/ipv6/raw.c @@ -817,8 +817,8 @@ static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk,  		memset(opt, 0, sizeof(struct ipv6_txoptions));  		opt->tot_len = sizeof(struct ipv6_txoptions); -		err = datagram_send_ctl(sock_net(sk), msg, &fl6, opt, &hlimit, -					&tclass, &dontfrag); +		err = datagram_send_ctl(sock_net(sk), sk, msg, &fl6, opt, +					&hlimit, &tclass, &dontfrag);  		if (err < 0) {  			fl6_sock_release(flowlabel);  			return err; diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 9e69eb0ec6d..1250f902067 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -104,6 +104,9 @@ static u32 *ipv6_cow_metrics(struct dst_entry *dst, unsigned long old)  	struct inet_peer *peer;  	u32 *p = NULL; +	if (!(rt->dst.flags & DST_HOST)) +		return NULL; +  	if (!rt->rt6i_peer)  		rt6_bind_peer(rt, 1); @@ -252,6 +255,9 @@ static void ip6_dst_destroy(struct dst_entry *dst)  	struct inet6_dev *idev = rt->rt6i_idev;  	struct inet_peer *peer = rt->rt6i_peer; +	if (!(rt->dst.flags & DST_HOST)) +		dst_destroy_metrics_generic(dst); +  	if (idev != NULL) {  		rt->rt6i_idev = NULL;  		in6_dev_put(idev); @@ -723,9 +729,7 @@ static struct rt6_info *rt6_alloc_cow(const struct rt6_info *ort,  			ipv6_addr_copy(&rt->rt6i_gateway, daddr);  		} -		rt->rt6i_dst.plen = 128;  		rt->rt6i_flags |= RTF_CACHE; -		rt->dst.flags |= DST_HOST;  #ifdef CONFIG_IPV6_SUBTREES  		if (rt->rt6i_src.plen && saddr) { @@ -775,9 +779,7 @@ static struct rt6_info *rt6_alloc_clone(struct rt6_info *ort,  	struct rt6_info *rt = ip6_rt_copy(ort, daddr);  	if (rt) { -		rt->rt6i_dst.plen = 128;  		rt->rt6i_flags |= RTF_CACHE; -		rt->dst.flags |= DST_HOST;  		dst_set_neighbour(&rt->dst, neigh_clone(dst_get_neighbour_raw(&ort->dst)));  	}  	return rt; @@ -1078,12 +1080,15 @@ struct dst_entry *icmp6_dst_alloc(struct net_device *dev,  			neigh = NULL;  	} -	rt->rt6i_idev     = idev; +	rt->dst.flags |= DST_HOST; +	rt->dst.output  = ip6_output;  	dst_set_neighbour(&rt->dst, neigh);  	atomic_set(&rt->dst.__refcnt, 1); -	ipv6_addr_copy(&rt->rt6i_dst.addr, addr);  	dst_metric_set(&rt->dst, RTAX_HOPLIMIT, 255); -	rt->dst.output  = ip6_output; + +	ipv6_addr_copy(&rt->rt6i_dst.addr, addr); +	rt->rt6i_dst.plen = 128; +	rt->rt6i_idev     = idev;  	spin_lock_bh(&icmp6_dst_lock);  	rt->dst.next = icmp6_dst_gc_list; @@ -1261,6 +1266,14 @@ int ip6_route_add(struct fib6_config *cfg)  	if (rt->rt6i_dst.plen == 128)  	       rt->dst.flags |= DST_HOST; +	if (!(rt->dst.flags & DST_HOST) && cfg->fc_mx) { +		u32 *metrics = kzalloc(sizeof(u32) * RTAX_MAX, GFP_KERNEL); +		if (!metrics) { +			err = -ENOMEM; +			goto out; +		} +		dst_init_metrics(&rt->dst, metrics, 0); +	}  #ifdef CONFIG_IPV6_SUBTREES  	ipv6_addr_prefix(&rt->rt6i_src.addr, &cfg->fc_src, cfg->fc_src_len);  	rt->rt6i_src.plen = cfg->fc_src_len; @@ -1607,9 +1620,6 @@ void rt6_redirect(const struct in6_addr *dest, const struct in6_addr *src,  	if (on_link)  		nrt->rt6i_flags &= ~RTF_GATEWAY; -	nrt->rt6i_dst.plen = 128; -	nrt->dst.flags |= DST_HOST; -  	ipv6_addr_copy(&nrt->rt6i_gateway, (struct in6_addr*)neigh->primary_key);  	dst_set_neighbour(&nrt->dst, neigh_clone(neigh)); @@ -1754,9 +1764,10 @@ static struct rt6_info *ip6_rt_copy(const struct rt6_info *ort,  	if (rt) {  		rt->dst.input = ort->dst.input;  		rt->dst.output = ort->dst.output; +		rt->dst.flags |= DST_HOST;  		ipv6_addr_copy(&rt->rt6i_dst.addr, dest); -		rt->rt6i_dst.plen = ort->rt6i_dst.plen; +		rt->rt6i_dst.plen = 128;  		dst_copy_metrics(&rt->dst, &ort->dst);  		rt->dst.error = ort->dst.error;  		rt->rt6i_idev = ort->rt6i_idev; diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index d1fb63f4aeb..3c9fa618b69 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -531,20 +531,6 @@ static int tcp_v6_rtx_synack(struct sock *sk, struct request_sock *req,  	return tcp_v6_send_synack(sk, req, rvp);  } -static inline void syn_flood_warning(struct sk_buff *skb) -{ -#ifdef CONFIG_SYN_COOKIES -	if (sysctl_tcp_syncookies) -		printk(KERN_INFO -		       "TCPv6: Possible SYN flooding on port %d. " -		       "Sending cookies.\n", ntohs(tcp_hdr(skb)->dest)); -	else -#endif -		printk(KERN_INFO -		       "TCPv6: Possible SYN flooding on port %d. " -		       "Dropping request.\n", ntohs(tcp_hdr(skb)->dest)); -} -  static void tcp_v6_reqsk_destructor(struct request_sock *req)  {  	kfree_skb(inet6_rsk(req)->pktopts); @@ -1179,11 +1165,7 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)  	struct tcp_sock *tp = tcp_sk(sk);  	__u32 isn = TCP_SKB_CB(skb)->when;  	struct dst_entry *dst = NULL; -#ifdef CONFIG_SYN_COOKIES  	int want_cookie = 0; -#else -#define want_cookie 0 -#endif  	if (skb->protocol == htons(ETH_P_IP))  		return tcp_v4_conn_request(sk, skb); @@ -1192,14 +1174,9 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)  		goto drop;  	if (inet_csk_reqsk_queue_is_full(sk) && !isn) { -		if (net_ratelimit()) -			syn_flood_warning(skb); -#ifdef CONFIG_SYN_COOKIES -		if (sysctl_tcp_syncookies) -			want_cookie = 1; -		else -#endif -		goto drop; +		want_cookie = tcp_syn_flood_action(sk, skb, "TCPv6"); +		if (!want_cookie) +			goto drop;  	}  	if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1) @@ -1249,9 +1226,7 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)  		while (l-- > 0)  			*c++ ^= *hash_location++; -#ifdef CONFIG_SYN_COOKIES  		want_cookie = 0;	/* not our kind of cookie */ -#endif  		tmp_ext.cookie_out_never = 0; /* false */  		tmp_ext.cookie_plus = tmp_opt.cookie_plus;  	} else if (!tp->rx_opt.cookie_in_always) { diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index 29213b51c49..bb95e8e1c6f 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@ -1090,8 +1090,8 @@ do_udp_sendmsg:  		memset(opt, 0, sizeof(struct ipv6_txoptions));  		opt->tot_len = sizeof(*opt); -		err = datagram_send_ctl(sock_net(sk), msg, &fl6, opt, &hlimit, -					&tclass, &dontfrag); +		err = datagram_send_ctl(sock_net(sk), sk, msg, &fl6, opt, +					&hlimit, &tclass, &dontfrag);  		if (err < 0) {  			fl6_sock_release(flowlabel);  			return err; diff --git a/net/irda/irsysctl.c b/net/irda/irsysctl.c index d0b70dadf73..2615ffc8e78 100644 --- a/net/irda/irsysctl.c +++ b/net/irda/irsysctl.c @@ -40,9 +40,9 @@ extern int  sysctl_slot_timeout;  extern int  sysctl_fast_poll_increase;  extern char sysctl_devname[];  extern int  sysctl_max_baud_rate; -extern int  sysctl_min_tx_turn_time; -extern int  sysctl_max_tx_data_size; -extern int  sysctl_max_tx_window; +extern unsigned int sysctl_min_tx_turn_time; +extern unsigned int sysctl_max_tx_data_size; +extern unsigned int sysctl_max_tx_window;  extern int  sysctl_max_noreply_time;  extern int  sysctl_warn_noreply_time;  extern int  sysctl_lap_keepalive_time; diff --git a/net/irda/qos.c b/net/irda/qos.c index 1b51bcf4239..4369f7f41bc 100644 --- a/net/irda/qos.c +++ b/net/irda/qos.c @@ -60,7 +60,7 @@ int sysctl_max_noreply_time = 12;   * Default is 10us which means using the unmodified value given by the   * peer except if it's 0 (0 is likely a bug in the other stack).   */ -unsigned sysctl_min_tx_turn_time = 10; +unsigned int sysctl_min_tx_turn_time = 10;  /*   * Maximum data size to be used in transmission in payload of LAP frame.   * There is a bit of confusion in the IrDA spec : @@ -75,13 +75,13 @@ unsigned sysctl_min_tx_turn_time = 10;   * bytes frames or all negotiated frame sizes, but you can use the sysctl   * to play with this value anyway.   * Jean II */ -unsigned sysctl_max_tx_data_size = 2042; +unsigned int sysctl_max_tx_data_size = 2042;  /*   * Maximum transmit window, i.e. number of LAP frames between turn-around.   * This allow to override what the peer told us. Some peers are buggy and   * don't always support what they tell us.   * Jean II */ -unsigned sysctl_max_tx_window = 7; +unsigned int sysctl_max_tx_window = 7;  static int irlap_param_baud_rate(void *instance, irda_param_t *param, int get);  static int irlap_param_link_disconnect(void *instance, irda_param_t *parm, diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c index 3db78b696c5..21070e9bc8d 100644 --- a/net/mac80211/sta_info.c +++ b/net/mac80211/sta_info.c @@ -665,7 +665,7 @@ static int __must_check __sta_info_destroy(struct sta_info *sta)  		BUG_ON(!sdata->bss);  		atomic_dec(&sdata->bss->num_sta_ps); -		__sta_info_clear_tim_bit(sdata->bss, sta); +		sta_info_clear_tim_bit(sta);  	}  	local->num_sta--; diff --git a/net/netfilter/nf_conntrack_pptp.c b/net/netfilter/nf_conntrack_pptp.c index 2fd4565144d..31d56b23b9e 100644 --- a/net/netfilter/nf_conntrack_pptp.c +++ b/net/netfilter/nf_conntrack_pptp.c @@ -364,6 +364,7 @@ pptp_inbound_pkt(struct sk_buff *skb,  		break;  	case PPTP_WAN_ERROR_NOTIFY: +	case PPTP_SET_LINK_INFO:  	case PPTP_ECHO_REQUEST:  	case PPTP_ECHO_REPLY:  		/* I don't have to explain these ;) */ diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c index 37bf94394be..8235b86b4e8 100644 --- a/net/netfilter/nf_conntrack_proto_tcp.c +++ b/net/netfilter/nf_conntrack_proto_tcp.c @@ -409,7 +409,7 @@ static void tcp_options(const struct sk_buff *skb,  			if (opsize < 2) /* "silly options" */  				return;  			if (opsize > length) -				break;	/* don't parse partial options */ +				return;	/* don't parse partial options */  			if (opcode == TCPOPT_SACK_PERM  			    && opsize == TCPOLEN_SACK_PERM) @@ -447,7 +447,7 @@ static void tcp_sack(const struct sk_buff *skb, unsigned int dataoff,  	BUG_ON(ptr == NULL);  	/* Fast path for timestamp-only option */ -	if (length == TCPOLEN_TSTAMP_ALIGNED*4 +	if (length == TCPOLEN_TSTAMP_ALIGNED  	    && *(__be32 *)ptr == htonl((TCPOPT_NOP << 24)  				       | (TCPOPT_NOP << 16)  				       | (TCPOPT_TIMESTAMP << 8) @@ -469,7 +469,7 @@ static void tcp_sack(const struct sk_buff *skb, unsigned int dataoff,  			if (opsize < 2) /* "silly options" */  				return;  			if (opsize > length) -				break;	/* don't parse partial options */ +				return;	/* don't parse partial options */  			if (opcode == TCPOPT_SACK  			    && opsize >= (TCPOLEN_SACK_BASE diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c index 00bd475eab4..a80b0cb03f1 100644 --- a/net/netfilter/nfnetlink_queue.c +++ b/net/netfilter/nfnetlink_queue.c @@ -646,8 +646,8 @@ verdicthdr_get(const struct nlattr * const nfqa[])  		return NULL;  	vhdr = nla_data(nfqa[NFQA_VERDICT_HDR]); -	verdict = ntohl(vhdr->verdict); -	if ((verdict & NF_VERDICT_MASK) > NF_MAX_VERDICT) +	verdict = ntohl(vhdr->verdict) & NF_VERDICT_MASK; +	if (verdict > NF_MAX_VERDICT || verdict == NF_STOLEN)  		return NULL;  	return vhdr;  } diff --git a/net/netfilter/xt_rateest.c b/net/netfilter/xt_rateest.c index 76a083184d8..ed0db15ab00 100644 --- a/net/netfilter/xt_rateest.c +++ b/net/netfilter/xt_rateest.c @@ -78,7 +78,7 @@ static int xt_rateest_mt_checkentry(const struct xt_mtchk_param *par)  {  	struct xt_rateest_match_info *info = par->matchinfo;  	struct xt_rateest *est1, *est2; -	int ret = false; +	int ret = -EINVAL;  	if (hweight32(info->flags & (XT_RATEEST_MATCH_ABS |  				     XT_RATEEST_MATCH_REL)) != 1) @@ -101,13 +101,12 @@ static int xt_rateest_mt_checkentry(const struct xt_mtchk_param *par)  	if (!est1)  		goto err1; +	est2 = NULL;  	if (info->flags & XT_RATEEST_MATCH_REL) {  		est2 = xt_rateest_lookup(info->name2);  		if (!est2)  			goto err2; -	} else -		est2 = NULL; - +	}  	info->est1 = est1;  	info->est2 = est2; @@ -116,7 +115,7 @@ static int xt_rateest_mt_checkentry(const struct xt_mtchk_param *par)  err2:  	xt_rateest_put(est1);  err1: -	return -EINVAL; +	return ret;  }  static void xt_rateest_mt_destroy(const struct xt_mtdtor_param *par) diff --git a/net/sched/cls_rsvp.h b/net/sched/cls_rsvp.h index be4505ee67a..b01427924f8 100644 --- a/net/sched/cls_rsvp.h +++ b/net/sched/cls_rsvp.h @@ -425,7 +425,7 @@ static int rsvp_change(struct tcf_proto *tp, unsigned long base,  	struct rsvp_filter *f, **fp;  	struct rsvp_session *s, **sp;  	struct tc_rsvp_pinfo *pinfo = NULL; -	struct nlattr *opt = tca[TCA_OPTIONS-1]; +	struct nlattr *opt = tca[TCA_OPTIONS];  	struct nlattr *tb[TCA_RSVP_MAX + 1];  	struct tcf_exts e;  	unsigned int h1, h2; @@ -439,7 +439,7 @@ static int rsvp_change(struct tcf_proto *tp, unsigned long base,  	if (err < 0)  		return err; -	err = tcf_exts_validate(tp, tb, tca[TCA_RATE-1], &e, &rsvp_ext_map); +	err = tcf_exts_validate(tp, tb, tca[TCA_RATE], &e, &rsvp_ext_map);  	if (err < 0)  		return err; @@ -449,8 +449,8 @@ static int rsvp_change(struct tcf_proto *tp, unsigned long base,  		if (f->handle != handle && handle)  			goto errout2; -		if (tb[TCA_RSVP_CLASSID-1]) { -			f->res.classid = nla_get_u32(tb[TCA_RSVP_CLASSID-1]); +		if (tb[TCA_RSVP_CLASSID]) { +			f->res.classid = nla_get_u32(tb[TCA_RSVP_CLASSID]);  			tcf_bind_filter(tp, &f->res, base);  		} @@ -462,7 +462,7 @@ static int rsvp_change(struct tcf_proto *tp, unsigned long base,  	err = -EINVAL;  	if (handle)  		goto errout2; -	if (tb[TCA_RSVP_DST-1] == NULL) +	if (tb[TCA_RSVP_DST] == NULL)  		goto errout2;  	err = -ENOBUFS; @@ -471,19 +471,19 @@ static int rsvp_change(struct tcf_proto *tp, unsigned long base,  		goto errout2;  	h2 = 16; -	if (tb[TCA_RSVP_SRC-1]) { -		memcpy(f->src, nla_data(tb[TCA_RSVP_SRC-1]), sizeof(f->src)); +	if (tb[TCA_RSVP_SRC]) { +		memcpy(f->src, nla_data(tb[TCA_RSVP_SRC]), sizeof(f->src));  		h2 = hash_src(f->src);  	} -	if (tb[TCA_RSVP_PINFO-1]) { -		pinfo = nla_data(tb[TCA_RSVP_PINFO-1]); +	if (tb[TCA_RSVP_PINFO]) { +		pinfo = nla_data(tb[TCA_RSVP_PINFO]);  		f->spi = pinfo->spi;  		f->tunnelhdr = pinfo->tunnelhdr;  	} -	if (tb[TCA_RSVP_CLASSID-1]) -		f->res.classid = nla_get_u32(tb[TCA_RSVP_CLASSID-1]); +	if (tb[TCA_RSVP_CLASSID]) +		f->res.classid = nla_get_u32(tb[TCA_RSVP_CLASSID]); -	dst = nla_data(tb[TCA_RSVP_DST-1]); +	dst = nla_data(tb[TCA_RSVP_DST]);  	h1 = hash_dst(dst, pinfo ? pinfo->protocol : 0, pinfo ? pinfo->tunnelid : 0);  	err = -ENOMEM; @@ -642,8 +642,7 @@ nla_put_failure:  	return -1;  } -static struct tcf_proto_ops RSVP_OPS = { -	.next		=	NULL, +static struct tcf_proto_ops RSVP_OPS __read_mostly = {  	.kind		=	RSVP_ID,  	.classify	=	rsvp_classify,  	.init		=	rsvp_init, diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c index 167c880cf8d..76388b083f2 100644 --- a/net/sctp/sm_sideeffect.c +++ b/net/sctp/sm_sideeffect.c @@ -1689,6 +1689,11 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,  		case SCTP_CMD_PURGE_ASCONF_QUEUE:  			sctp_asconf_queue_teardown(asoc);  			break; + +		case SCTP_CMD_SET_ASOC: +			asoc = cmd->obj.asoc; +			break; +  		default:  			pr_warn("Impossible command: %u, %p\n",  				cmd->verb, cmd->obj.ptr); diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c index 49b847b00f9..a0f31e6c1c6 100644 --- a/net/sctp/sm_statefuns.c +++ b/net/sctp/sm_statefuns.c @@ -2047,6 +2047,12 @@ sctp_disposition_t sctp_sf_do_5_2_4_dupcook(const struct sctp_endpoint *ep,  	sctp_add_cmd_sf(commands, SCTP_CMD_NEW_ASOC, SCTP_ASOC(new_asoc));  	sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL()); +	/* Restore association pointer to provide SCTP command interpeter +	 * with a valid context in case it needs to manipulate +	 * the queues */ +	sctp_add_cmd_sf(commands, SCTP_CMD_SET_ASOC, +			 SCTP_ASOC((struct sctp_association *)asoc)); +  	return retval;  nomem: diff --git a/net/wireless/reg.c b/net/wireless/reg.c index 02751dbc5a9..68a471ba193 100644 --- a/net/wireless/reg.c +++ b/net/wireless/reg.c @@ -852,6 +852,7 @@ static void handle_channel(struct wiphy *wiphy,  		return;  	} +	chan->beacon_found = false;  	chan->flags = flags | bw_flags | map_regdom_flags(reg_rule->flags);  	chan->max_antenna_gain = min(chan->orig_mag,  		(int) MBI_TO_DBI(power_rule->max_antenna_gain)); diff --git a/net/wireless/sme.c b/net/wireless/sme.c index b7b6ff8be55..dec0fa28372 100644 --- a/net/wireless/sme.c +++ b/net/wireless/sme.c @@ -118,6 +118,8 @@ static int cfg80211_conn_scan(struct wireless_dev *wdev)  			     i++, j++)  				request->channels[i] =  					&wdev->wiphy->bands[band]->channels[j]; +			request->rates[band] = +				(1 << wdev->wiphy->bands[band]->n_bitrates) - 1;  		}  	}  	request->n_channels = n_channels; diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c index a026b0ef244..54a0dc2e2f8 100644 --- a/net/xfrm/xfrm_input.c +++ b/net/xfrm/xfrm_input.c @@ -212,6 +212,11 @@ resume:  		/* only the first xfrm gets the encap type */  		encap_type = 0; +		if (async && x->repl->check(x, skb, seq)) { +			XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATESEQERROR); +			goto drop_unlock; +		} +  		x->repl->advance(x, seq);  		x->curlft.bytes += skb->len; diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c index 86d0caf91b3..62e90b862a0 100644 --- a/sound/core/pcm_lib.c +++ b/sound/core/pcm_lib.c @@ -1761,6 +1761,10 @@ static int wait_for_avail(struct snd_pcm_substream *substream,  	snd_pcm_uframes_t avail = 0;  	long wait_time, tout; +	init_waitqueue_entry(&wait, current); +	set_current_state(TASK_INTERRUPTIBLE); +	add_wait_queue(&runtime->tsleep, &wait); +  	if (runtime->no_period_wakeup)  		wait_time = MAX_SCHEDULE_TIMEOUT;  	else { @@ -1771,16 +1775,32 @@ static int wait_for_avail(struct snd_pcm_substream *substream,  		}  		wait_time = msecs_to_jiffies(wait_time * 1000);  	} -	init_waitqueue_entry(&wait, current); -	add_wait_queue(&runtime->tsleep, &wait); +  	for (;;) {  		if (signal_pending(current)) {  			err = -ERESTARTSYS;  			break;  		} + +		/* +		 * We need to check if space became available already +		 * (and thus the wakeup happened already) first to close +		 * the race of space already having become available. +		 * This check must happen after been added to the waitqueue +		 * and having current state be INTERRUPTIBLE. +		 */ +		if (is_playback) +			avail = snd_pcm_playback_avail(runtime); +		else +			avail = snd_pcm_capture_avail(runtime); +		if (avail >= runtime->twake) +			break;  		snd_pcm_stream_unlock_irq(substream); -		tout = schedule_timeout_interruptible(wait_time); + +		tout = schedule_timeout(wait_time); +  		snd_pcm_stream_lock_irq(substream); +		set_current_state(TASK_INTERRUPTIBLE);  		switch (runtime->status->state) {  		case SNDRV_PCM_STATE_SUSPENDED:  			err = -ESTRPIPE; @@ -1806,14 +1826,9 @@ static int wait_for_avail(struct snd_pcm_substream *substream,  			err = -EIO;  			break;  		} -		if (is_playback) -			avail = snd_pcm_playback_avail(runtime); -		else -			avail = snd_pcm_capture_avail(runtime); -		if (avail >= runtime->twake) -			break;  	}   _endloop: +	set_current_state(TASK_RUNNING);  	remove_wait_queue(&runtime->tsleep, &wait);  	*availp = avail;  	return err; diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c index 3e7850c238c..f3aefef3721 100644 --- a/sound/pci/hda/hda_codec.c +++ b/sound/pci/hda/hda_codec.c @@ -579,9 +579,13 @@ int snd_hda_get_conn_index(struct hda_codec *codec, hda_nid_t mux,  		return -1;  	}  	recursive++; -	for (i = 0; i < nums; i++) +	for (i = 0; i < nums; i++) { +		unsigned int type = get_wcaps_type(get_wcaps(codec, conn[i])); +		if (type == AC_WID_PIN || type == AC_WID_AUD_OUT) +			continue;  		if (snd_hda_get_conn_index(codec, conn[i], nid, recursive) >= 0)  			return i; +	}  	return -1;  }  EXPORT_SYMBOL_HDA(snd_hda_get_conn_index); diff --git a/sound/pci/hda/patch_cirrus.c b/sound/pci/hda/patch_cirrus.c index d6c93d92b55..c45f3e69bcf 100644 --- a/sound/pci/hda/patch_cirrus.c +++ b/sound/pci/hda/patch_cirrus.c @@ -535,7 +535,7 @@ static int add_volume(struct hda_codec *codec, const char *name,  		      int index, unsigned int pval, int dir,  		      struct snd_kcontrol **kctlp)  { -	char tmp[32]; +	char tmp[44];  	struct snd_kcontrol_new knew =  		HDA_CODEC_VOLUME_IDX(tmp, index, 0, 0, HDA_OUTPUT);  	knew.private_value = pval; diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 7cabd731716..0503c999e7d 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -168,7 +168,7 @@ struct alc_spec {  	unsigned int auto_mic_valid_imux:1;	/* valid imux for auto-mic */  	unsigned int automute:1;	/* HP automute enabled */  	unsigned int detect_line:1;	/* Line-out detection enabled */ -	unsigned int automute_lines:1;	/* automute line-out as well */ +	unsigned int automute_lines:1;	/* automute line-out as well; NOP when automute_hp_lo isn't set */  	unsigned int automute_hp_lo:1;	/* both HP and LO available */  	/* other flags */ @@ -551,7 +551,7 @@ static void update_speakers(struct hda_codec *codec)  	if (spec->autocfg.line_out_pins[0] == spec->autocfg.hp_pins[0] ||  	    spec->autocfg.line_out_pins[0] == spec->autocfg.speaker_pins[0])  		return; -	if (!spec->automute_lines || !spec->automute) +	if (!spec->automute || (spec->automute_hp_lo && !spec->automute_lines))  		on = 0;  	else  		on = spec->jack_present; @@ -803,7 +803,7 @@ static int alc_automute_mode_get(struct snd_kcontrol *kcontrol,  	unsigned int val;  	if (!spec->automute)  		val = 0; -	else if (!spec->automute_lines) +	else if (!spec->automute_hp_lo || !spec->automute_lines)  		val = 1;  	else  		val = 2; @@ -824,7 +824,8 @@ static int alc_automute_mode_put(struct snd_kcontrol *kcontrol,  		spec->automute = 0;  		break;  	case 1: -		if (spec->automute && !spec->automute_lines) +		if (spec->automute && +		    (!spec->automute_hp_lo || !spec->automute_lines))  			return 0;  		spec->automute = 1;  		spec->automute_lines = 0; diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c index 5145b663ef6..1b7c11432aa 100644 --- a/sound/pci/hda/patch_sigmatel.c +++ b/sound/pci/hda/patch_sigmatel.c @@ -6573,6 +6573,7 @@ static const struct hda_codec_preset snd_hda_preset_sigmatel[] = {  	{ .id = 0x111d76cc, .name = "92HD89F3", .patch = patch_stac92hd73xx },  	{ .id = 0x111d76cd, .name = "92HD89F2", .patch = patch_stac92hd73xx },  	{ .id = 0x111d76ce, .name = "92HD89F1", .patch = patch_stac92hd73xx }, +	{ .id = 0x111d76df, .name = "92HD93BXX", .patch = patch_stac92hd83xxx},  	{ .id = 0x111d76e0, .name = "92HD91BXX", .patch = patch_stac92hd83xxx},  	{ .id = 0x111d76e3, .name = "92HD98BXX", .patch = patch_stac92hd83xxx},  	{ .id = 0x111d76e5, .name = "92HD99BXX", .patch = patch_stac92hd83xxx}, diff --git a/sound/soc/blackfin/bf5xx-ad193x.c b/sound/soc/blackfin/bf5xx-ad193x.c index a118a0fb9d8..5956584ea3a 100644 --- a/sound/soc/blackfin/bf5xx-ad193x.c +++ b/sound/soc/blackfin/bf5xx-ad193x.c @@ -103,7 +103,7 @@ static struct snd_soc_dai_link bf5xx_ad193x_dai[] = {  		.cpu_dai_name = "bfin-tdm.0",  		.codec_dai_name ="ad193x-hifi",  		.platform_name = "bfin-tdm-pcm-audio", -		.codec_name = "ad193x.5", +		.codec_name = "spi0.5",  		.ops = &bf5xx_ad193x_ops,  	},  	{ @@ -112,7 +112,7 @@ static struct snd_soc_dai_link bf5xx_ad193x_dai[] = {  		.cpu_dai_name = "bfin-tdm.1",  		.codec_dai_name ="ad193x-hifi",  		.platform_name = "bfin-tdm-pcm-audio", -		.codec_name = "ad193x.5", +		.codec_name = "spi0.5",  		.ops = &bf5xx_ad193x_ops,  	},  }; diff --git a/sound/soc/fsl/mpc5200_dma.c b/sound/soc/fsl/mpc5200_dma.c index fd0dc46afc3..5c6c2457386 100644 --- a/sound/soc/fsl/mpc5200_dma.c +++ b/sound/soc/fsl/mpc5200_dma.c @@ -369,7 +369,7 @@ static struct snd_soc_platform_driver mpc5200_audio_dma_platform = {  	.pcm_free	= &psc_dma_free,  }; -static int mpc5200_hpcd_probe(struct of_device *op) +static int mpc5200_hpcd_probe(struct platform_device *op)  {  	phys_addr_t fifo;  	struct psc_dma *psc_dma; @@ -487,7 +487,7 @@ out_unmap:  	return ret;  } -static int mpc5200_hpcd_remove(struct of_device *op) +static int mpc5200_hpcd_remove(struct platform_device *op)  {  	struct psc_dma *psc_dma = dev_get_drvdata(&op->dev); @@ -519,7 +519,7 @@ MODULE_DEVICE_TABLE(of, mpc5200_hpcd_match);  static struct platform_driver mpc5200_hpcd_of_driver = {  	.probe		= mpc5200_hpcd_probe,  	.remove		= mpc5200_hpcd_remove, -	.dev = { +	.driver = {  		.owner		= THIS_MODULE,  		.name		= "mpc5200-pcm-audio",  		.of_match_table    = mpc5200_hpcd_match, diff --git a/sound/soc/imx/imx-pcm-fiq.c b/sound/soc/imx/imx-pcm-fiq.c index 309c59e6fb6..7945625e0e0 100644 --- a/sound/soc/imx/imx-pcm-fiq.c +++ b/sound/soc/imx/imx-pcm-fiq.c @@ -240,7 +240,6 @@ static int ssi_irq = 0;  static int imx_pcm_fiq_new(struct snd_soc_pcm_runtime *rtd)  { -	struct snd_card *card = rtd->card->snd_card;  	struct snd_soc_dai *dai = rtd->cpu_dai;  	struct snd_pcm *pcm = rtd->pcm;  	int ret; diff --git a/sound/soc/kirkwood/kirkwood-i2s.c b/sound/soc/kirkwood/kirkwood-i2s.c index 8f16cd37c2a..d0bcf3fcea0 100644 --- a/sound/soc/kirkwood/kirkwood-i2s.c +++ b/sound/soc/kirkwood/kirkwood-i2s.c @@ -424,7 +424,7 @@ static __devinit int kirkwood_i2s_dev_probe(struct platform_device *pdev)  	if (!priv->mem) {  		dev_err(&pdev->dev, "request_mem_region failed\n");  		err = -EBUSY; -		goto error_alloc; +		goto err_alloc;  	}  	priv->io = ioremap(priv->mem->start, SZ_16K); diff --git a/sound/soc/soc-cache.c b/sound/soc/soc-cache.c index d9f8aded51f..20b7f3b003a 100644 --- a/sound/soc/soc-cache.c +++ b/sound/soc/soc-cache.c @@ -203,14 +203,14 @@ static int snd_soc_rbtree_cache_sync(struct snd_soc_codec *codec)  		rbnode = rb_entry(node, struct snd_soc_rbtree_node, node);  		for (i = 0; i < rbnode->blklen; ++i) {  			regtmp = rbnode->base_reg + i; -			WARN_ON(codec->writable_register && -				codec->writable_register(codec, regtmp));  			val = snd_soc_rbtree_get_register(rbnode, i);  			def = snd_soc_get_cache_val(codec->reg_def_copy, i,  						    rbnode->word_size);  			if (val == def)  				continue; +			WARN_ON(!snd_soc_codec_writable_register(codec, regtmp)); +  			codec->cache_bypass = 1;  			ret = snd_soc_write(codec, regtmp, val);  			codec->cache_bypass = 0; @@ -563,8 +563,7 @@ static int snd_soc_lzo_cache_sync(struct snd_soc_codec *codec)  	lzo_blocks = codec->reg_cache;  	for_each_set_bit(i, lzo_blocks[0]->sync_bmp, lzo_blocks[0]->sync_bmp_nbits) { -		WARN_ON(codec->writable_register && -			codec->writable_register(codec, i)); +		WARN_ON(!snd_soc_codec_writable_register(codec, i));  		ret = snd_soc_cache_read(codec, i, &val);  		if (ret)  			return ret; @@ -823,8 +822,6 @@ static int snd_soc_flat_cache_sync(struct snd_soc_codec *codec)  	codec_drv = codec->driver;  	for (i = 0; i < codec_drv->reg_cache_size; ++i) { -		WARN_ON(codec->writable_register && -			codec->writable_register(codec, i));  		ret = snd_soc_cache_read(codec, i, &val);  		if (ret)  			return ret; @@ -832,6 +829,9 @@ static int snd_soc_flat_cache_sync(struct snd_soc_codec *codec)  			if (snd_soc_get_cache_val(codec->reg_def_copy,  						  i, codec_drv->reg_word_size) == val)  				continue; + +		WARN_ON(!snd_soc_codec_writable_register(codec, i)); +  		ret = snd_soc_write(codec, i, val);  		if (ret)  			return ret; diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c index b085d8e8757..d2ef014af21 100644 --- a/sound/soc/soc-core.c +++ b/sound/soc/soc-core.c @@ -1633,7 +1633,7 @@ int snd_soc_codec_readable_register(struct snd_soc_codec *codec,  	if (codec->readable_register)  		return codec->readable_register(codec, reg);  	else -		return 0; +		return 1;  }  EXPORT_SYMBOL_GPL(snd_soc_codec_readable_register); @@ -1651,7 +1651,7 @@ int snd_soc_codec_writable_register(struct snd_soc_codec *codec,  	if (codec->writable_register)  		return codec->writable_register(codec, reg);  	else -		return 0; +		return 1;  }  EXPORT_SYMBOL_GPL(snd_soc_codec_writable_register); diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c index 7e15914b363..d67c637557a 100644 --- a/sound/soc/soc-dapm.c +++ b/sound/soc/soc-dapm.c @@ -2763,7 +2763,7 @@ EXPORT_SYMBOL_GPL(snd_soc_dapm_ignore_suspend);  /**   * snd_soc_dapm_free - free dapm resources - * @card: SoC device + * @dapm: DAPM context   *   * Free all dapm widgets and resources.   */ diff --git a/sound/soc/soc-jack.c b/sound/soc/soc-jack.c index 38b00131b2f..fa31d9c2abd 100644 --- a/sound/soc/soc-jack.c +++ b/sound/soc/soc-jack.c @@ -105,7 +105,7 @@ void snd_soc_jack_report(struct snd_soc_jack *jack, int status, int mask)  	snd_soc_dapm_sync(dapm); -	snd_jack_report(jack->jack, status); +	snd_jack_report(jack->jack, jack->status);  out:  	mutex_unlock(&codec->mutex);  |