diff options
746 files changed, 7329 insertions, 4517 deletions
diff --git a/Documentation/atomic_ops.txt b/Documentation/atomic_ops.txt index 27f2b21a9d5..d9ca5be9b47 100644 --- a/Documentation/atomic_ops.txt +++ b/Documentation/atomic_ops.txt @@ -253,6 +253,8 @@ This performs an atomic exchange operation on the atomic variable v, setting  the given new value.  It returns the old value that the atomic variable v had  just before the operation. +atomic_xchg requires explicit memory barriers around the operation. +  	int atomic_cmpxchg(atomic_t *v, int old, int new);  This performs an atomic compare exchange operation on the atomic value v, diff --git a/Documentation/device-mapper/dm-raid.txt b/Documentation/device-mapper/dm-raid.txt index 728c38c242d..56fb62b09fc 100644 --- a/Documentation/device-mapper/dm-raid.txt +++ b/Documentation/device-mapper/dm-raid.txt @@ -141,3 +141,4 @@ Version History  1.2.0	Handle creation of arrays that contain failed devices.  1.3.0	Added support for RAID 10  1.3.1	Allow device replacement/rebuild for RAID 10 +1.3.2   Fix/improve redundancy checking for RAID10 diff --git a/Documentation/devicetree/bindings/pinctrl/atmel,at91-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/atmel,at91-pinctrl.txt index 3a268127b05..bc50899e0c8 100644 --- a/Documentation/devicetree/bindings/pinctrl/atmel,at91-pinctrl.txt +++ b/Documentation/devicetree/bindings/pinctrl/atmel,at91-pinctrl.txt @@ -81,7 +81,8 @@ PA31	TXD4  Required properties for pin configuration node:  - atmel,pins: 4 integers array, represents a group of pins mux and config    setting. The format is atmel,pins = <PIN_BANK PIN_BANK_NUM PERIPH CONFIG>. -  The PERIPH 0 means gpio. +  The PERIPH 0 means gpio, PERIPH 1 is periph A, PERIPH 2 is periph B... +  PIN_BANK 0 is pioA, PIN_BANK 1 is pioB...  Bits used for CONFIG:  PULL_UP		(1 << 0): indicate this pin need a pull up. @@ -126,7 +127,7 @@ pinctrl@fffff400 {  		pinctrl_dbgu: dbgu-0 {  			atmel,pins =  				<1 14 0x1 0x0	/* PB14 periph A */ -				 1 15 0x1 0x1>;	/* PB15 periph with pullup */ +				 1 15 0x1 0x1>;	/* PB15 periph A with pullup */  		};  	};  }; diff --git a/Documentation/filesystems/f2fs.txt b/Documentation/filesystems/f2fs.txt index 8fbd8b46ee3..dcf338e62b7 100644 --- a/Documentation/filesystems/f2fs.txt +++ b/Documentation/filesystems/f2fs.txt @@ -175,9 +175,9 @@ consists of multiple segments as described below.                                              align with the zone size <-|                   |-> align with the segment size       _________________________________________________________________________ -    |            |            |    Node     |   Segment   |   Segment  |      | -    | Superblock | Checkpoint |   Address   |    Info.    |   Summary  | Main | -    |    (SB)    |   (CP)     | Table (NAT) | Table (SIT) | Area (SSA) |      | +    |            |            |   Segment   |    Node     |   Segment  |      | +    | Superblock | Checkpoint |    Info.    |   Address   |   Summary  | Main | +    |    (SB)    |   (CP)     | Table (SIT) | Table (NAT) | Area (SSA) |      |      |____________|_____2______|______N______|______N______|______N_____|__N___|                                                                         .      .                                                               .                . @@ -200,14 +200,14 @@ consists of multiple segments as described below.   : It contains file system information, bitmaps for valid NAT/SIT sets, orphan     inode lists, and summary entries of current active segments. -- Node Address Table (NAT) - : It is composed of a block address table for all the node blocks stored in -   Main area. -  - Segment Information Table (SIT)   : It contains segment information such as valid block count and bitmap for the     validity of all the blocks. +- Node Address Table (NAT) + : It is composed of a block address table for all the node blocks stored in +   Main area. +  - Segment Summary Area (SSA)   : It contains summary entries which contains the owner information of all the     data and node blocks stored in Main area. @@ -236,13 +236,13 @@ For file system consistency, each CP points to which NAT and SIT copies are  valid, as shown as below.    +--------+----------+---------+ -  |   CP   |    NAT   |   SIT   | +  |   CP   |    SIT   |   NAT   |    +--------+----------+---------+    .         .          .          .    .            .              .              .    .               .                 .                 .    +-------+-------+--------+--------+--------+--------+ -  | CP #0 | CP #1 | NAT #0 | NAT #1 | SIT #0 | SIT #1 | +  | CP #0 | CP #1 | SIT #0 | SIT #1 | NAT #0 | NAT #1 |    +-------+-------+--------+--------+--------+--------+       |             ^                          ^       |             |                          | diff --git a/Documentation/hid/hid-sensor.txt b/Documentation/hid/hid-sensor.txt index 948b0989c43..948b0989c43 100755..100644 --- a/Documentation/hid/hid-sensor.txt +++ b/Documentation/hid/hid-sensor.txt diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index 363e348bff9..6c723811c0a 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt @@ -2438,7 +2438,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted.  			real-time workloads.  It can also improve energy  			efficiency for asymmetric multiprocessors. -	rcu_nocbs_poll	[KNL,BOOT] +	rcu_nocb_poll	[KNL,BOOT]  			Rather than requiring that offloaded CPUs  			(specified by rcu_nocbs= above) explicitly  			awaken the corresponding "rcuoN" kthreads, diff --git a/Documentation/memory-barriers.txt b/Documentation/memory-barriers.txt index 3c4e1b3b80a..fa5d8a9ae20 100644 --- a/Documentation/memory-barriers.txt +++ b/Documentation/memory-barriers.txt @@ -1685,6 +1685,7 @@ explicit lock operations, described later).  These include:  	xchg();  	cmpxchg(); +	atomic_xchg();  	atomic_cmpxchg();  	atomic_inc_return();  	atomic_dec_return(); diff --git a/Documentation/x86/boot.txt b/Documentation/x86/boot.txt index 406d82d5d2b..b443f1de0e5 100644 --- a/Documentation/x86/boot.txt +++ b/Documentation/x86/boot.txt @@ -57,6 +57,10 @@ Protocol 2.10:	(Kernel 2.6.31) Added a protocol for relaxed alignment  Protocol 2.11:	(Kernel 3.6) Added a field for offset of EFI handover  		protocol entry point. +Protocol 2.12:	(Kernel 3.8) Added the xloadflags field and extension fields +	 	to struct boot_params for for loading bzImage and ramdisk +		above 4G in 64bit. +  **** MEMORY LAYOUT  The traditional memory map for the kernel loader, used for Image or @@ -182,7 +186,7 @@ Offset	Proto	Name		Meaning  0230/4	2.05+	kernel_alignment Physical addr alignment required for kernel  0234/1	2.05+	relocatable_kernel Whether kernel is relocatable or not  0235/1	2.10+	min_alignment	Minimum alignment, as a power of two -0236/2	N/A	pad3		Unused +0236/2	2.12+	xloadflags	Boot protocol option flags  0238/4	2.06+	cmdline_size	Maximum size of the kernel command line  023C/4	2.07+	hardware_subarch Hardware subarchitecture  0240/8	2.07+	hardware_subarch_data Subarchitecture-specific data @@ -386,6 +390,7 @@ Protocol:	2.00+  	F  Special		(0xFF = undefined)         10  Reserved         11  Minimal Linux Bootloader <http://sebastian-plotz.blogspot.de> +       12  OVMF UEFI virtualization stack    Please contact <hpa@zytor.com> if you need a bootloader ID    value assigned. @@ -582,6 +587,27 @@ Protocol:	2.10+    misaligned kernel.  Therefore, a loader should typically try each    power-of-two alignment from kernel_alignment down to this alignment. +Field name:     xloadflags +Type:           read +Offset/size:    0x236/2 +Protocol:       2.12+ + +  This field is a bitmask. + +  Bit 0 (read):	XLF_KERNEL_64 +	- If 1, this kernel has the legacy 64-bit entry point at 0x200. + +  Bit 1 (read): XLF_CAN_BE_LOADED_ABOVE_4G +        - If 1, kernel/boot_params/cmdline/ramdisk can be above 4G. + +  Bit 2 (read):	XLF_EFI_HANDOVER_32 +	- If 1, the kernel supports the 32-bit EFI handoff entry point +          given at handover_offset. + +  Bit 3 (read): XLF_EFI_HANDOVER_64 +	- If 1, the kernel supports the 64-bit EFI handoff entry point +          given at handover_offset + 0x200. +  Field name:	cmdline_size  Type:		read  Offset/size:	0x238/4 diff --git a/Documentation/x86/zero-page.txt b/Documentation/x86/zero-page.txt index cf5437deda8..199f453cb4d 100644 --- a/Documentation/x86/zero-page.txt +++ b/Documentation/x86/zero-page.txt @@ -19,6 +19,9 @@ Offset	Proto	Name		Meaning  090/010	ALL	hd1_info	hd1 disk parameter, OBSOLETE!!  0A0/010	ALL	sys_desc_table	System description table (struct sys_desc_table)  0B0/010	ALL	olpc_ofw_header	OLPC's OpenFirmware CIF and friends +0C0/004	ALL	ext_ramdisk_image ramdisk_image high 32bits +0C4/004	ALL	ext_ramdisk_size  ramdisk_size high 32bits +0C8/004	ALL	ext_cmd_line_ptr  cmd_line_ptr high 32bits  140/080	ALL	edid_info	Video mode setup (struct edid_info)  1C0/020	ALL	efi_info	EFI 32 information (struct efi_info)  1E0/004	ALL	alk_mem_k	Alternative mem check, in KB @@ -27,6 +30,7 @@ Offset	Proto	Name		Meaning  1E9/001	ALL	eddbuf_entries	Number of entries in eddbuf (below)  1EA/001	ALL	edd_mbr_sig_buf_entries	Number of entries in edd_mbr_sig_buffer  				(below) +1EF/001	ALL	sentinel	Used to detect broken bootloaders  290/040	ALL	edd_mbr_sig_buffer EDD MBR signatures  2D0/A00	ALL	e820_map	E820 memory map table  				(array of struct e820entry) diff --git a/MAINTAINERS b/MAINTAINERS index 3105c4868c4..3b955649c32 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -1303,7 +1303,7 @@ F:	include/linux/dmaengine.h  F:	include/linux/async_tx.h  AT24 EEPROM DRIVER -M:	Wolfram Sang <w.sang@pengutronix.de> +M:	Wolfram Sang <wsa@the-dreams.de>  L:	linux-i2c@vger.kernel.org  S:	Maintained  F:	drivers/misc/eeprom/at24.c @@ -1489,7 +1489,7 @@ AVR32 ARCHITECTURE  M:	Haavard Skinnemoen <hskinnemoen@gmail.com>  M:	Hans-Christian Egtvedt <egtvedt@samfundet.no>  W:	http://www.atmel.com/products/AVR32/ -W:	http://avr32linux.org/ +W:	http://mirror.egtvedt.no/avr32linux.org/  W:	http://avrfreaks.net/  S:	Maintained  F:	arch/avr32/ @@ -2966,7 +2966,7 @@ S:	Maintained  F:	drivers/net/ethernet/i825xx/eexpress.*  ETHERNET BRIDGE -M:	Stephen Hemminger <shemminger@vyatta.com> +M:	Stephen Hemminger <stephen@networkplumber.org>  L:	bridge@lists.linux-foundation.org  L:	netdev@vger.kernel.org  W:	http://www.linuxfoundation.org/en/Net:Bridge @@ -3757,12 +3757,11 @@ S:	Maintained  F:	drivers/i2c/i2c-stub.c  I2C SUBSYSTEM -M:	Wolfram Sang <w.sang@pengutronix.de> +M:	Wolfram Sang <wsa@the-dreams.de>  M:	"Ben Dooks (embedded platforms)" <ben-linux@fluff.org>  L:	linux-i2c@vger.kernel.org  W:	http://i2c.wiki.kernel.org/ -T:	quilt kernel.org/pub/linux/kernel/people/jdelvare/linux-2.6/jdelvare-i2c/ -T:	git git://git.pengutronix.de/git/wsa/linux.git +T:	git git://git.kernel.org/pub/scm/linux/kernel/git/wsa/linux.git  S:	Maintained  F:	Documentation/i2c/  F:	drivers/i2c/ @@ -4905,7 +4904,7 @@ S:	Maintained  MARVELL GIGABIT ETHERNET DRIVERS (skge/sky2)  M:	Mirko Lindner <mlindner@marvell.com> -M:	Stephen Hemminger <shemminger@vyatta.com> +M:	Stephen Hemminger <stephen@networkplumber.org>  L:	netdev@vger.kernel.org  S:	Maintained  F:	drivers/net/ethernet/marvell/sk* @@ -5180,7 +5179,7 @@ S:	Supported  F:	drivers/infiniband/hw/nes/  NETEM NETWORK EMULATOR -M:	Stephen Hemminger <shemminger@vyatta.com> +M:	Stephen Hemminger <stephen@networkplumber.org>  L:	netem@lists.linux-foundation.org  S:	Maintained  F:	net/sched/sch_netem.c @@ -5778,15 +5777,6 @@ L:	linux-i2c@vger.kernel.org  S:	Maintained  F:	drivers/i2c/muxes/i2c-mux-pca9541.c -PCA9564/PCA9665 I2C BUS DRIVER -M:	Wolfram Sang <w.sang@pengutronix.de> -L:	linux-i2c@vger.kernel.org -S:	Maintained -F:	drivers/i2c/algos/i2c-algo-pca.c -F:	drivers/i2c/busses/i2c-pca-* -F:	include/linux/i2c-algo-pca.h -F:	include/linux/i2c-pca-platform.h -  PCDP - PRIMARY CONSOLE AND DEBUG PORT  M:	Khalid Aziz <khalid@gonehiking.org>  S:	Maintained @@ -6585,7 +6575,7 @@ F:	drivers/media/platform/s3c-camif/  F:	include/media/s3c_camif.h  SERIAL DRIVERS -M:	Alan Cox <alan@linux.intel.com> +M:	Greg Kroah-Hartman <gregkh@linuxfoundation.org>  L:	linux-serial@vger.kernel.org  S:	Maintained  F:	drivers/tty/serial @@ -7088,7 +7078,7 @@ F:	include/uapi/sound/  F:	sound/  SOUND - SOC LAYER / DYNAMIC AUDIO POWER MANAGEMENT (ASoC) -M:	Liam Girdwood <lrg@ti.com> +M:	Liam Girdwood <lgirdwood@gmail.com>  M:	Mark Brown <broonie@opensource.wolfsonmicro.com>  T:	git git://git.kernel.org/pub/scm/linux/kernel/git/broonie/sound.git  L:	alsa-devel@alsa-project.org (moderated for non-subscribers) @@ -1,8 +1,8 @@  VERSION = 3  PATCHLEVEL = 8  SUBLEVEL = 0 -EXTRAVERSION = -rc4 -NAME = Terrified Chipmunk +EXTRAVERSION = +NAME = Unicycling Gorilla  # *DOCUMENTATION*  # To see a list of typical targets execute "make help" @@ -169,7 +169,7 @@ SUBARCH := $(shell uname -m | sed -e s/i.86/i386/ -e s/sun4u/sparc64/ \  				  -e s/arm.*/arm/ -e s/sa110/arm/ \  				  -e s/s390x/s390/ -e s/parisc64/parisc/ \  				  -e s/ppc.*/powerpc/ -e s/mips.*/mips/ \ -				  -e s/sh[234].*/sh/ ) +				  -e s/sh[234].*/sh/ -e s/aarch64.*/arm64/ )  # Cross compiling and selecting different set of gcc/bin-utils  # --------------------------------------------------------------------------- diff --git a/arch/arm/boot/dts/armada-370-db.dts b/arch/arm/boot/dts/armada-370-db.dts index 00044026ef1..9b82facb256 100644 --- a/arch/arm/boot/dts/armada-370-db.dts +++ b/arch/arm/boot/dts/armada-370-db.dts @@ -26,7 +26,7 @@  	memory {  		device_type = "memory"; -		reg = <0x00000000 0x20000000>; /* 512 MB */ +		reg = <0x00000000 0x40000000>; /* 1 GB */  	};  	soc { diff --git a/arch/arm/boot/dts/armada-xp-mv78230.dtsi b/arch/arm/boot/dts/armada-xp-mv78230.dtsi index 271855a6e22..e041f42ed71 100644 --- a/arch/arm/boot/dts/armada-xp-mv78230.dtsi +++ b/arch/arm/boot/dts/armada-xp-mv78230.dtsi @@ -50,27 +50,25 @@  		};  		gpio0: gpio@d0018100 { -			compatible = "marvell,armadaxp-gpio"; -			reg = <0xd0018100 0x40>, -			    <0xd0018800 0x30>; +			compatible = "marvell,orion-gpio"; +			reg = <0xd0018100 0x40>;  			ngpios = <32>;  			gpio-controller;  			#gpio-cells = <2>;  			interrupt-controller;  			#interrupts-cells = <2>; -			interrupts = <16>, <17>, <18>, <19>; +			interrupts = <82>, <83>, <84>, <85>;  		};  		gpio1: gpio@d0018140 { -			compatible = "marvell,armadaxp-gpio"; -			reg = <0xd0018140 0x40>, -			    <0xd0018840 0x30>; +			compatible = "marvell,orion-gpio"; +			reg = <0xd0018140 0x40>;  			ngpios = <17>;  			gpio-controller;  			#gpio-cells = <2>;  			interrupt-controller;  			#interrupts-cells = <2>; -			interrupts = <20>, <21>, <22>; +			interrupts = <87>, <88>, <89>;  		};  	};  }; diff --git a/arch/arm/boot/dts/armada-xp-mv78260.dtsi b/arch/arm/boot/dts/armada-xp-mv78260.dtsi index 1c1937dbce7..9e23bd8c953 100644 --- a/arch/arm/boot/dts/armada-xp-mv78260.dtsi +++ b/arch/arm/boot/dts/armada-xp-mv78260.dtsi @@ -51,39 +51,36 @@  		};  		gpio0: gpio@d0018100 { -			compatible = "marvell,armadaxp-gpio"; -			reg = <0xd0018100 0x40>, -			    <0xd0018800 0x30>; +			compatible = "marvell,orion-gpio"; +			reg = <0xd0018100 0x40>;  			ngpios = <32>;  			gpio-controller;  			#gpio-cells = <2>;  			interrupt-controller;  			#interrupts-cells = <2>; -			interrupts = <16>, <17>, <18>, <19>; +			interrupts = <82>, <83>, <84>, <85>;  		};  		gpio1: gpio@d0018140 { -			compatible = "marvell,armadaxp-gpio"; -			reg = <0xd0018140 0x40>, -			    <0xd0018840 0x30>; +			compatible = "marvell,orion-gpio"; +			reg = <0xd0018140 0x40>;  			ngpios = <32>;  			gpio-controller;  			#gpio-cells = <2>;  			interrupt-controller;  			#interrupts-cells = <2>; -			interrupts = <20>, <21>, <22>, <23>; +			interrupts = <87>, <88>, <89>, <90>;  		};  		gpio2: gpio@d0018180 { -			compatible = "marvell,armadaxp-gpio"; -			reg = <0xd0018180 0x40>, -			    <0xd0018870 0x30>; +			compatible = "marvell,orion-gpio"; +			reg = <0xd0018180 0x40>;  			ngpios = <3>;  			gpio-controller;  			#gpio-cells = <2>;  			interrupt-controller;  			#interrupts-cells = <2>; -			interrupts = <24>; +			interrupts = <91>;  		};  		ethernet@d0034000 { diff --git a/arch/arm/boot/dts/armada-xp-mv78460.dtsi b/arch/arm/boot/dts/armada-xp-mv78460.dtsi index 4905cf3a5ef..965966110e3 100644 --- a/arch/arm/boot/dts/armada-xp-mv78460.dtsi +++ b/arch/arm/boot/dts/armada-xp-mv78460.dtsi @@ -66,39 +66,36 @@  		};  		gpio0: gpio@d0018100 { -			compatible = "marvell,armadaxp-gpio"; -			reg = <0xd0018100 0x40>, -			    <0xd0018800 0x30>; +			compatible = "marvell,orion-gpio"; +			reg = <0xd0018100 0x40>;  			ngpios = <32>;  			gpio-controller;  			#gpio-cells = <2>;  			interrupt-controller;  			#interrupts-cells = <2>; -			interrupts = <16>, <17>, <18>, <19>; +			interrupts = <82>, <83>, <84>, <85>;  		};  		gpio1: gpio@d0018140 { -			compatible = "marvell,armadaxp-gpio"; -			reg = <0xd0018140 0x40>, -			    <0xd0018840 0x30>; +			compatible = "marvell,orion-gpio"; +			reg = <0xd0018140 0x40>;  			ngpios = <32>;  			gpio-controller;  			#gpio-cells = <2>;  			interrupt-controller;  			#interrupts-cells = <2>; -			interrupts = <20>, <21>, <22>, <23>; +			interrupts = <87>, <88>, <89>, <90>;  		};  		gpio2: gpio@d0018180 { -			compatible = "marvell,armadaxp-gpio"; -			reg = <0xd0018180 0x40>, -			    <0xd0018870 0x30>; +			compatible = "marvell,orion-gpio"; +			reg = <0xd0018180 0x40>;  			ngpios = <3>;  			gpio-controller;  			#gpio-cells = <2>;  			interrupt-controller;  			#interrupts-cells = <2>; -			interrupts = <24>; +			interrupts = <91>;  		};  		ethernet@d0034000 { diff --git a/arch/arm/boot/dts/at91rm9200.dtsi b/arch/arm/boot/dts/at91rm9200.dtsi index e154f242c68..222047f1ece 100644 --- a/arch/arm/boot/dts/at91rm9200.dtsi +++ b/arch/arm/boot/dts/at91rm9200.dtsi @@ -336,8 +336,8 @@  	i2c@0 {  		compatible = "i2c-gpio"; -		gpios = <&pioA 23 0 /* sda */ -			 &pioA 24 0 /* scl */ +		gpios = <&pioA 25 0 /* sda */ +			 &pioA 26 0 /* scl */  			>;  		i2c-gpio,sda-open-drain;  		i2c-gpio,scl-open-drain; diff --git a/arch/arm/boot/dts/at91sam9x5.dtsi b/arch/arm/boot/dts/at91sam9x5.dtsi index 3a47cf95214..8ecca6948d8 100644 --- a/arch/arm/boot/dts/at91sam9x5.dtsi +++ b/arch/arm/boot/dts/at91sam9x5.dtsi @@ -143,6 +143,11 @@  						atmel,pins =  							<0 3 0x1 0x0>;	/* PA3 periph A */  					}; + +					pinctrl_usart0_sck: usart0_sck-0 { +						atmel,pins = +							<0 4 0x1 0x0>;	/* PA4 periph A */ +					};  				};  				usart1 { @@ -154,12 +159,17 @@  					pinctrl_usart1_rts: usart1_rts-0 {  						atmel,pins = -							<3 27 0x3 0x0>;	/* PC27 periph C */ +							<2 27 0x3 0x0>;	/* PC27 periph C */  					};  					pinctrl_usart1_cts: usart1_cts-0 {  						atmel,pins = -							<3 28 0x3 0x0>;	/* PC28 periph C */ +							<2 28 0x3 0x0>;	/* PC28 periph C */ +					}; + +					pinctrl_usart1_sck: usart1_sck-0 { +						atmel,pins = +							<2 28 0x3 0x0>;	/* PC29 periph C */  					};  				}; @@ -172,46 +182,56 @@  					pinctrl_uart2_rts: uart2_rts-0 {  						atmel,pins = -							<0 0 0x2 0x0>;	/* PB0 periph B */ +							<1 0 0x2 0x0>;	/* PB0 periph B */  					};  					pinctrl_uart2_cts: uart2_cts-0 {  						atmel,pins = -							<0 1 0x2 0x0>;	/* PB1 periph B */ +							<1 1 0x2 0x0>;	/* PB1 periph B */ +					}; + +					pinctrl_usart2_sck: usart2_sck-0 { +						atmel,pins = +							<1 2 0x2 0x0>;	/* PB2 periph B */  					};  				};  				usart3 {  					pinctrl_uart3: usart3-0 {  						atmel,pins = -							<3 23 0x2 0x1	/* PC22 periph B with pullup */ -							 3 23 0x2 0x0>;	/* PC23 periph B */ +							<2 23 0x2 0x1	/* PC22 periph B with pullup */ +							 2 23 0x2 0x0>;	/* PC23 periph B */  					};  					pinctrl_usart3_rts: usart3_rts-0 {  						atmel,pins = -							<3 24 0x2 0x0>;	/* PC24 periph B */ +							<2 24 0x2 0x0>;	/* PC24 periph B */  					};  					pinctrl_usart3_cts: usart3_cts-0 {  						atmel,pins = -							<3 25 0x2 0x0>;	/* PC25 periph B */ +							<2 25 0x2 0x0>;	/* PC25 periph B */ +					}; + +					pinctrl_usart3_sck: usart3_sck-0 { +						atmel,pins = +							<2 26 0x2 0x0>;	/* PC26 periph B */  					};  				};  				uart0 {  					pinctrl_uart0: uart0-0 {  						atmel,pins = -							<3 8 0x3 0x0	/* PC8 periph C */ -							 3 9 0x3 0x1>;	/* PC9 periph C with pullup */ +							<2 8 0x3 0x0	/* PC8 periph C */ +							 2 9 0x3 0x1>;	/* PC9 periph C with pullup */  					};  				};  				uart1 {  					pinctrl_uart1: uart1-0 {  						atmel,pins = -							<3 16 0x3 0x0	/* PC16 periph C */ -							 3 17 0x3 0x1>;	/* PC17 periph C with pullup */ +							<2 16 0x3 0x0	/* PC16 periph C */ +							 2 17 0x3 0x1>;	/* PC17 periph C with pullup */  					};  				}; @@ -240,14 +260,14 @@  					pinctrl_macb0_rmii_mii: macb0_rmii_mii-0 {  						atmel,pins = -							<1 8 0x1 0x0	/* PA8 periph A */ -							 1 11 0x1 0x0	/* PA11 periph A */ -							 1 12 0x1 0x0	/* PA12 periph A */ -							 1 13 0x1 0x0	/* PA13 periph A */ -							 1 14 0x1 0x0	/* PA14 periph A */ -							 1 15 0x1 0x0	/* PA15 periph A */ -							 1 16 0x1 0x0	/* PA16 periph A */ -							 1 17 0x1 0x0>;	/* PA17 periph A */ +							<1 8 0x1 0x0	/* PB8 periph A */ +							 1 11 0x1 0x0	/* PB11 periph A */ +							 1 12 0x1 0x0	/* PB12 periph A */ +							 1 13 0x1 0x0	/* PB13 periph A */ +							 1 14 0x1 0x0	/* PB14 periph A */ +							 1 15 0x1 0x0	/* PB15 periph A */ +							 1 16 0x1 0x0	/* PB16 periph A */ +							 1 17 0x1 0x0>;	/* PB17 periph A */  					};  				}; diff --git a/arch/arm/boot/dts/cros5250-common.dtsi b/arch/arm/boot/dts/cros5250-common.dtsi index fddd1741743..46c09801703 100644 --- a/arch/arm/boot/dts/cros5250-common.dtsi +++ b/arch/arm/boot/dts/cros5250-common.dtsi @@ -96,8 +96,8 @@  		fifo-depth = <0x80>;  		card-detect-delay = <200>;  		samsung,dw-mshc-ciu-div = <3>; -		samsung,dw-mshc-sdr-timing = <2 3 3>; -		samsung,dw-mshc-ddr-timing = <1 2 3>; +		samsung,dw-mshc-sdr-timing = <2 3>; +		samsung,dw-mshc-ddr-timing = <1 2>;  		slot@0 {  			reg = <0>; @@ -120,8 +120,8 @@  		fifo-depth = <0x80>;  		card-detect-delay = <200>;  		samsung,dw-mshc-ciu-div = <3>; -		samsung,dw-mshc-sdr-timing = <2 3 3>; -		samsung,dw-mshc-ddr-timing = <1 2 3>; +		samsung,dw-mshc-sdr-timing = <2 3>; +		samsung,dw-mshc-ddr-timing = <1 2>;  		slot@0 {  			reg = <0>; @@ -141,8 +141,8 @@  		fifo-depth = <0x80>;  		card-detect-delay = <200>;  		samsung,dw-mshc-ciu-div = <3>; -		samsung,dw-mshc-sdr-timing = <2 3 3>; -		samsung,dw-mshc-ddr-timing = <1 2 3>; +		samsung,dw-mshc-sdr-timing = <2 3>; +		samsung,dw-mshc-ddr-timing = <1 2>;  		slot@0 {  			reg = <0>; diff --git a/arch/arm/boot/dts/dove-cubox.dts b/arch/arm/boot/dts/dove-cubox.dts index fed7d3f9f43..cdee96fca6e 100644 --- a/arch/arm/boot/dts/dove-cubox.dts +++ b/arch/arm/boot/dts/dove-cubox.dts @@ -26,10 +26,15 @@  };  &uart0 { status = "okay"; }; -&sdio0 { status = "okay"; };  &sata0 { status = "okay"; };  &i2c0 { status = "okay"; }; +&sdio0 { +	status = "okay"; +	/* sdio0 card detect is connected to wrong pin on CuBox */ +	cd-gpios = <&gpio0 12 1>; +}; +  &spi0 {  	status = "okay"; @@ -42,9 +47,14 @@  };  &pinctrl { -	pinctrl-0 = <&pmx_gpio_18>; +	pinctrl-0 = <&pmx_gpio_12 &pmx_gpio_18>;  	pinctrl-names = "default"; +	pmx_gpio_12: pmx-gpio-12 { +		marvell,pins = "mpp12"; +		marvell,function = "gpio"; +	}; +  	pmx_gpio_18: pmx-gpio-18 {  		marvell,pins = "mpp18";  		marvell,function = "gpio"; diff --git a/arch/arm/boot/dts/exynos5250-smdk5250.dts b/arch/arm/boot/dts/exynos5250-smdk5250.dts index 942d5761ca9..e05b18f3c33 100644 --- a/arch/arm/boot/dts/exynos5250-smdk5250.dts +++ b/arch/arm/boot/dts/exynos5250-smdk5250.dts @@ -115,8 +115,8 @@  		fifo-depth = <0x80>;  		card-detect-delay = <200>;  		samsung,dw-mshc-ciu-div = <3>; -		samsung,dw-mshc-sdr-timing = <2 3 3>; -		samsung,dw-mshc-ddr-timing = <1 2 3>; +		samsung,dw-mshc-sdr-timing = <2 3>; +		samsung,dw-mshc-ddr-timing = <1 2>;  		slot@0 {  			reg = <0>; @@ -139,8 +139,8 @@  		fifo-depth = <0x80>;  		card-detect-delay = <200>;  		samsung,dw-mshc-ciu-div = <3>; -		samsung,dw-mshc-sdr-timing = <2 3 3>; -		samsung,dw-mshc-ddr-timing = <1 2 3>; +		samsung,dw-mshc-sdr-timing = <2 3>; +		samsung,dw-mshc-ddr-timing = <1 2>;  		slot@0 {  			reg = <0>; diff --git a/arch/arm/boot/dts/kirkwood-ns2-common.dtsi b/arch/arm/boot/dts/kirkwood-ns2-common.dtsi index 9bc6785ad22..77d21abfcdf 100644 --- a/arch/arm/boot/dts/kirkwood-ns2-common.dtsi +++ b/arch/arm/boot/dts/kirkwood-ns2-common.dtsi @@ -1,4 +1,5 @@  /include/ "kirkwood.dtsi" +/include/ "kirkwood-6281.dtsi"  / {  	chosen { @@ -6,6 +7,21 @@  	};  	ocp@f1000000 { +		pinctrl: pinctrl@10000 { +			pinctrl-0 = < &pmx_spi &pmx_twsi0 &pmx_uart0 +					&pmx_ns2_sata0 &pmx_ns2_sata1>; +			pinctrl-names = "default"; + +			pmx_ns2_sata0: pmx-ns2-sata0 { +				marvell,pins = "mpp21"; +				marvell,function = "sata0"; +			}; +			pmx_ns2_sata1: pmx-ns2-sata1 { +				marvell,pins = "mpp20"; +				marvell,function = "sata1"; +			}; +		}; +  		serial@12000 {  			clock-frequency = <166666667>;  			status = "okay"; diff --git a/arch/arm/boot/dts/kirkwood.dtsi b/arch/arm/boot/dts/kirkwood.dtsi index 110d6cbb795..d6ab442b701 100644 --- a/arch/arm/boot/dts/kirkwood.dtsi +++ b/arch/arm/boot/dts/kirkwood.dtsi @@ -36,6 +36,7 @@  			reg = <0x10100 0x40>;  			ngpios = <32>;  			interrupt-controller; +			#interrupt-cells = <2>;  			interrupts = <35>, <36>, <37>, <38>;  		}; @@ -46,6 +47,7 @@  			reg = <0x10140 0x40>;  			ngpios = <18>;  			interrupt-controller; +			#interrupt-cells = <2>;  			interrupts = <39>, <40>, <41>;  		}; diff --git a/arch/arm/boot/dts/kizbox.dts b/arch/arm/boot/dts/kizbox.dts index e8814fe0e27..b4dc3ed9a3e 100644 --- a/arch/arm/boot/dts/kizbox.dts +++ b/arch/arm/boot/dts/kizbox.dts @@ -48,6 +48,8 @@  			macb0: ethernet@fffc4000 {  				phy-mode = "mii"; +				pinctrl-0 = <&pinctrl_macb_rmii +				             &pinctrl_macb_rmii_mii_alt>;  				status = "okay";  			}; diff --git a/arch/arm/boot/dts/sunxi.dtsi b/arch/arm/boot/dts/sunxi.dtsi index 8bbc2bfef22..8b36abea9f2 100644 --- a/arch/arm/boot/dts/sunxi.dtsi +++ b/arch/arm/boot/dts/sunxi.dtsi @@ -60,19 +60,21 @@  		};  		uart0: uart@01c28000 { -			compatible = "ns8250"; +			compatible = "snps,dw-apb-uart";  			reg = <0x01c28000 0x400>;  			interrupts = <1>;  			reg-shift = <2>; +			reg-io-width = <4>;  			clock-frequency = <24000000>;  			status = "disabled";  		};  		uart1: uart@01c28400 { -			compatible = "ns8250"; +			compatible = "snps,dw-apb-uart";  			reg = <0x01c28400 0x400>;  			interrupts = <2>;  			reg-shift = <2>; +			reg-io-width = <4>;  			clock-frequency = <24000000>;  			status = "disabled";  		}; diff --git a/arch/arm/boot/dts/vexpress-v2p-ca15_a7.dts b/arch/arm/boot/dts/vexpress-v2p-ca15_a7.dts index 1fc405a9ecf..cf8071ad22d 100644 --- a/arch/arm/boot/dts/vexpress-v2p-ca15_a7.dts +++ b/arch/arm/boot/dts/vexpress-v2p-ca15_a7.dts @@ -45,7 +45,6 @@  			reg = <1>;  		}; -/* A7s disabled till big.LITTLE patches are available...  		cpu2: cpu@2 {  			device_type = "cpu";  			compatible = "arm,cortex-a7"; @@ -63,7 +62,6 @@  			compatible = "arm,cortex-a7";  			reg = <0x102>;  		}; -*/  	};  	memory@80000000 { diff --git a/arch/arm/common/gic.c b/arch/arm/common/gic.c index 36ae03a3f5d..87dfa9026c5 100644 --- a/arch/arm/common/gic.c +++ b/arch/arm/common/gic.c @@ -351,6 +351,25 @@ void __init gic_cascade_irq(unsigned int gic_nr, unsigned int irq)  	irq_set_chained_handler(irq, gic_handle_cascade_irq);  } +static u8 gic_get_cpumask(struct gic_chip_data *gic) +{ +	void __iomem *base = gic_data_dist_base(gic); +	u32 mask, i; + +	for (i = mask = 0; i < 32; i += 4) { +		mask = readl_relaxed(base + GIC_DIST_TARGET + i); +		mask |= mask >> 16; +		mask |= mask >> 8; +		if (mask) +			break; +	} + +	if (!mask) +		pr_crit("GIC CPU mask not found - kernel will fail to boot.\n"); + +	return mask; +} +  static void __init gic_dist_init(struct gic_chip_data *gic)  {  	unsigned int i; @@ -369,7 +388,9 @@ static void __init gic_dist_init(struct gic_chip_data *gic)  	/*  	 * Set all global interrupts to this CPU only.  	 */ -	cpumask = readl_relaxed(base + GIC_DIST_TARGET + 0); +	cpumask = gic_get_cpumask(gic); +	cpumask |= cpumask << 8; +	cpumask |= cpumask << 16;  	for (i = 32; i < gic_irqs; i += 4)  		writel_relaxed(cpumask, base + GIC_DIST_TARGET + i * 4 / 4); @@ -400,7 +421,7 @@ static void __cpuinit gic_cpu_init(struct gic_chip_data *gic)  	 * Get what the GIC says our CPU mask is.  	 */  	BUG_ON(cpu >= NR_GIC_CPU_IF); -	cpu_mask = readl_relaxed(dist_base + GIC_DIST_TARGET + 0); +	cpu_mask = gic_get_cpumask(gic);  	gic_cpu_map[cpu] = cpu_mask;  	/* diff --git a/arch/arm/configs/at91_dt_defconfig b/arch/arm/configs/at91_dt_defconfig index b175577d7ab..1ea959019fc 100644 --- a/arch/arm/configs/at91_dt_defconfig +++ b/arch/arm/configs/at91_dt_defconfig @@ -19,6 +19,7 @@ CONFIG_SOC_AT91SAM9260=y  CONFIG_SOC_AT91SAM9263=y  CONFIG_SOC_AT91SAM9G45=y  CONFIG_SOC_AT91SAM9X5=y +CONFIG_SOC_AT91SAM9N12=y  CONFIG_MACH_AT91SAM_DT=y  CONFIG_AT91_PROGRAMMABLE_CLOCKS=y  CONFIG_AT91_TIMER_HZ=128 @@ -31,7 +32,7 @@ CONFIG_ZBOOT_ROM_TEXT=0x0  CONFIG_ZBOOT_ROM_BSS=0x0  CONFIG_ARM_APPENDED_DTB=y  CONFIG_ARM_ATAG_DTB_COMPAT=y -CONFIG_CMDLINE="mem=128M console=ttyS0,115200 initrd=0x21100000,25165824 root=/dev/ram0 rw" +CONFIG_CMDLINE="console=ttyS0,115200 initrd=0x21100000,25165824 root=/dev/ram0 rw"  CONFIG_KEXEC=y  CONFIG_AUTO_ZRELADDR=y  # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h index 73cf03aa981..1c4df27f933 100644 --- a/arch/arm/include/asm/memory.h +++ b/arch/arm/include/asm/memory.h @@ -37,7 +37,7 @@   */  #define PAGE_OFFSET		UL(CONFIG_PAGE_OFFSET)  #define TASK_SIZE		(UL(CONFIG_PAGE_OFFSET) - UL(0x01000000)) -#define TASK_UNMAPPED_BASE	(UL(CONFIG_PAGE_OFFSET) / 3) +#define TASK_UNMAPPED_BASE	ALIGN(TASK_SIZE / 3, SZ_16M)  /*   * The maximum size of a 26-bit user space task. diff --git a/arch/arm/include/asm/smp_scu.h b/arch/arm/include/asm/smp_scu.h index 4eb6d005ffa..86dff32a073 100644 --- a/arch/arm/include/asm/smp_scu.h +++ b/arch/arm/include/asm/smp_scu.h @@ -7,8 +7,14 @@  #ifndef __ASSEMBLER__  unsigned int scu_get_core_count(void __iomem *); -void scu_enable(void __iomem *);  int scu_power_mode(void __iomem *, unsigned int); + +#ifdef CONFIG_SMP +void scu_enable(void __iomem *scu_base); +#else +static inline void scu_enable(void __iomem *scu_base) {} +#endif +  #endif  #endif diff --git a/arch/arm/kernel/debug.S b/arch/arm/kernel/debug.S index 6809200c31f..14f7c3b1463 100644 --- a/arch/arm/kernel/debug.S +++ b/arch/arm/kernel/debug.S @@ -100,12 +100,14 @@ ENTRY(printch)  		b	1b  ENDPROC(printch) +#ifdef CONFIG_MMU  ENTRY(debug_ll_addr)  		addruart r2, r3, ip  		str	r2, [r0]  		str	r3, [r1]  		mov	pc, lr  ENDPROC(debug_ll_addr) +#endif  #else diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S index 4eee351f466..486a15ae901 100644 --- a/arch/arm/kernel/head.S +++ b/arch/arm/kernel/head.S @@ -246,6 +246,7 @@ __create_page_tables:  	/*  	 * Then map boot params address in r2 if specified. +	 * We map 2 sections in case the ATAGs/DTB crosses a section boundary.  	 */  	mov	r0, r2, lsr #SECTION_SHIFT  	movs	r0, r0, lsl #SECTION_SHIFT @@ -253,6 +254,8 @@ __create_page_tables:  	addne	r3, r3, #PAGE_OFFSET  	addne	r3, r4, r3, lsr #(SECTION_SHIFT - PMD_ORDER)  	orrne	r6, r7, r0 +	strne	r6, [r3], #1 << PMD_ORDER +	addne	r6, r6, #1 << SECTION_SHIFT  	strne	r6, [r3]  #ifdef CONFIG_DEBUG_LL @@ -331,7 +334,7 @@ ENTRY(secondary_startup)  	 * as it has already been validated by the primary processor.  	 */  #ifdef CONFIG_ARM_VIRT_EXT -	bl	__hyp_stub_install +	bl	__hyp_stub_install_secondary  #endif  	safe_svcmode_maskall r9 diff --git a/arch/arm/kernel/hyp-stub.S b/arch/arm/kernel/hyp-stub.S index 65b2417aebc..1315c4ccfa5 100644 --- a/arch/arm/kernel/hyp-stub.S +++ b/arch/arm/kernel/hyp-stub.S @@ -99,7 +99,7 @@ ENTRY(__hyp_stub_install_secondary)  	 * immediately.  	 */  	compare_cpu_mode_with_primary	r4, r5, r6, r7 -	bxne	lr +	movne	pc, lr  	/*  	 * Once we have given up on one CPU, we do not try to install the @@ -111,7 +111,7 @@ ENTRY(__hyp_stub_install_secondary)  	 */  	cmp	r4, #HYP_MODE -	bxne	lr			@ give up if the CPU is not in HYP mode +	movne	pc, lr			@ give up if the CPU is not in HYP mode  /*   * Configure HSCTLR to set correct exception endianness/instruction set @@ -120,7 +120,8 @@ ENTRY(__hyp_stub_install_secondary)   * Eventually, CPU-specific code might be needed -- assume not for now   *   * This code relies on the "eret" instruction to synchronize the - * various coprocessor accesses. + * various coprocessor accesses. This is done when we switch to SVC + * (see safe_svcmode_maskall).   */  	@ Now install the hypervisor stub:  	adr	r7, __hyp_stub_vectors @@ -155,14 +156,7 @@ THUMB(	orr	r7, #(1 << 30)	)	@ HSCTLR.TE  1:  #endif -	bic	r7, r4, #MODE_MASK -	orr	r7, r7, #SVC_MODE -THUMB(	orr	r7, r7, #PSR_T_BIT	) -	msr	spsr_cxsf, r7		@ This is SPSR_hyp. - -	__MSR_ELR_HYP(14)		@ msr elr_hyp, lr -	__ERET				@ return, switching to SVC mode -					@ The boot CPU mode is left in r4. +	bx	lr			@ The boot CPU mode is left in r4.  ENDPROC(__hyp_stub_install_secondary)  __hyp_stub_do_trap: @@ -200,7 +194,7 @@ ENDPROC(__hyp_get_vectors)  	@ fall through  ENTRY(__hyp_set_vectors)  	__HVC(0) -	bx	lr +	mov	pc, lr  ENDPROC(__hyp_set_vectors)  #ifndef ZIMAGE diff --git a/arch/arm/kernel/smp_scu.c b/arch/arm/kernel/smp_scu.c index b9f015e843d..45eac87ed66 100644 --- a/arch/arm/kernel/smp_scu.c +++ b/arch/arm/kernel/smp_scu.c @@ -75,7 +75,7 @@ void scu_enable(void __iomem *scu_base)  int scu_power_mode(void __iomem *scu_base, unsigned int mode)  {  	unsigned int val; -	int cpu = cpu_logical_map(smp_processor_id()); +	int cpu = MPIDR_AFFINITY_LEVEL(cpu_logical_map(smp_processor_id()), 0);  	if (mode > 3 || mode == 1 || cpu > 3)  		return -EINVAL; diff --git a/arch/arm/mach-at91/setup.c b/arch/arm/mach-at91/setup.c index 9ee866ce047..4b678478cf9 100644 --- a/arch/arm/mach-at91/setup.c +++ b/arch/arm/mach-at91/setup.c @@ -105,6 +105,8 @@ static void __init soc_detect(u32 dbgu_base)  	switch (socid) {  	case ARCH_ID_AT91RM9200:  		at91_soc_initdata.type = AT91_SOC_RM9200; +		if (at91_soc_initdata.subtype == AT91_SOC_SUBTYPE_NONE) +			at91_soc_initdata.subtype = AT91_SOC_RM9200_BGA;  		at91_boot_soc = at91rm9200_soc;  		break; diff --git a/arch/arm/mach-exynos/Kconfig b/arch/arm/mach-exynos/Kconfig index e103c290bc9..85afb031b67 100644 --- a/arch/arm/mach-exynos/Kconfig +++ b/arch/arm/mach-exynos/Kconfig @@ -414,7 +414,7 @@ config MACH_EXYNOS4_DT  	select CPU_EXYNOS4210  	select HAVE_SAMSUNG_KEYPAD if INPUT_KEYBOARD  	select PINCTRL -	select PINCTRL_EXYNOS4 +	select PINCTRL_EXYNOS  	select USE_OF  	help  	  Machine support for Samsung Exynos4 machine with device tree enabled. diff --git a/arch/arm/mach-highbank/highbank.c b/arch/arm/mach-highbank/highbank.c index 981dc1e1da5..e6c06128293 100644 --- a/arch/arm/mach-highbank/highbank.c +++ b/arch/arm/mach-highbank/highbank.c @@ -28,6 +28,7 @@  #include <asm/arch_timer.h>  #include <asm/cacheflush.h> +#include <asm/cputype.h>  #include <asm/smp_plat.h>  #include <asm/smp_twd.h>  #include <asm/hardware/arm_timer.h> @@ -59,7 +60,7 @@ static void __init highbank_scu_map_io(void)  void highbank_set_cpu_jump(int cpu, void *jump_addr)  { -	cpu = cpu_logical_map(cpu); +	cpu = MPIDR_AFFINITY_LEVEL(cpu_logical_map(cpu), 0);  	writel(virt_to_phys(jump_addr), HB_JUMP_TABLE_VIRT(cpu));  	__cpuc_flush_dcache_area(HB_JUMP_TABLE_VIRT(cpu), 16);  	outer_clean_range(HB_JUMP_TABLE_PHYS(cpu), diff --git a/arch/arm/mach-highbank/sysregs.h b/arch/arm/mach-highbank/sysregs.h index 70af9d13fce..5995df7f262 100644 --- a/arch/arm/mach-highbank/sysregs.h +++ b/arch/arm/mach-highbank/sysregs.h @@ -37,7 +37,7 @@ extern void __iomem *sregs_base;  static inline void highbank_set_core_pwr(void)  { -	int cpu = cpu_logical_map(smp_processor_id()); +	int cpu = MPIDR_AFFINITY_LEVEL(cpu_logical_map(smp_processor_id()), 0);  	if (scu_base_addr)  		scu_power_mode(scu_base_addr, SCU_PM_POWEROFF);  	else @@ -46,7 +46,7 @@ static inline void highbank_set_core_pwr(void)  static inline void highbank_clear_core_pwr(void)  { -	int cpu = cpu_logical_map(smp_processor_id()); +	int cpu = MPIDR_AFFINITY_LEVEL(cpu_logical_map(smp_processor_id()), 0);  	if (scu_base_addr)  		scu_power_mode(scu_base_addr, SCU_PM_NORMAL);  	else diff --git a/arch/arm/mach-imx/Kconfig b/arch/arm/mach-imx/Kconfig index 3e628fd7a67..0a2349dc701 100644 --- a/arch/arm/mach-imx/Kconfig +++ b/arch/arm/mach-imx/Kconfig @@ -851,6 +851,7 @@ config SOC_IMX6Q  	select HAVE_CAN_FLEXCAN if CAN  	select HAVE_IMX_GPC  	select HAVE_IMX_MMDC +	select HAVE_IMX_SRC  	select HAVE_SMP  	select MFD_SYSCON  	select PINCTRL diff --git a/arch/arm/mach-imx/clk-imx25.c b/arch/arm/mach-imx/clk-imx25.c index b197aa73dc4..2c570cdaae7 100644 --- a/arch/arm/mach-imx/clk-imx25.c +++ b/arch/arm/mach-imx/clk-imx25.c @@ -254,9 +254,9 @@ int __init mx25_clocks_init(void)  	clk_register_clkdev(clk[ipg], "ipg", "mxc-ehci.2");  	clk_register_clkdev(clk[usbotg_ahb], "ahb", "mxc-ehci.2");  	clk_register_clkdev(clk[usb_div], "per", "mxc-ehci.2"); -	clk_register_clkdev(clk[ipg], "ipg", "fsl-usb2-udc"); -	clk_register_clkdev(clk[usbotg_ahb], "ahb", "fsl-usb2-udc"); -	clk_register_clkdev(clk[usb_div], "per", "fsl-usb2-udc"); +	clk_register_clkdev(clk[ipg], "ipg", "imx-udc-mx27"); +	clk_register_clkdev(clk[usbotg_ahb], "ahb", "imx-udc-mx27"); +	clk_register_clkdev(clk[usb_div], "per", "imx-udc-mx27");  	clk_register_clkdev(clk[nfc_ipg_per], NULL, "imx25-nand.0");  	/* i.mx25 has the i.mx35 type cspi */  	clk_register_clkdev(clk[cspi1_ipg], NULL, "imx35-cspi.0"); diff --git a/arch/arm/mach-imx/clk-imx27.c b/arch/arm/mach-imx/clk-imx27.c index 4c1d1e4efc7..1ffe3b534e5 100644 --- a/arch/arm/mach-imx/clk-imx27.c +++ b/arch/arm/mach-imx/clk-imx27.c @@ -236,9 +236,9 @@ int __init mx27_clocks_init(unsigned long fref)  	clk_register_clkdev(clk[lcdc_ahb_gate], "ahb", "imx21-fb.0");  	clk_register_clkdev(clk[csi_ahb_gate], "ahb", "imx27-camera.0");  	clk_register_clkdev(clk[per4_gate], "per", "imx27-camera.0"); -	clk_register_clkdev(clk[usb_div], "per", "fsl-usb2-udc"); -	clk_register_clkdev(clk[usb_ipg_gate], "ipg", "fsl-usb2-udc"); -	clk_register_clkdev(clk[usb_ahb_gate], "ahb", "fsl-usb2-udc"); +	clk_register_clkdev(clk[usb_div], "per", "imx-udc-mx27"); +	clk_register_clkdev(clk[usb_ipg_gate], "ipg", "imx-udc-mx27"); +	clk_register_clkdev(clk[usb_ahb_gate], "ahb", "imx-udc-mx27");  	clk_register_clkdev(clk[usb_div], "per", "mxc-ehci.0");  	clk_register_clkdev(clk[usb_ipg_gate], "ipg", "mxc-ehci.0");  	clk_register_clkdev(clk[usb_ahb_gate], "ahb", "mxc-ehci.0"); diff --git a/arch/arm/mach-imx/clk-imx31.c b/arch/arm/mach-imx/clk-imx31.c index 8be64e0a4ac..16ccbd41dea 100644 --- a/arch/arm/mach-imx/clk-imx31.c +++ b/arch/arm/mach-imx/clk-imx31.c @@ -139,9 +139,9 @@ int __init mx31_clocks_init(unsigned long fref)  	clk_register_clkdev(clk[usb_div_post], "per", "mxc-ehci.2");  	clk_register_clkdev(clk[usb_gate], "ahb", "mxc-ehci.2");  	clk_register_clkdev(clk[ipg], "ipg", "mxc-ehci.2"); -	clk_register_clkdev(clk[usb_div_post], "per", "fsl-usb2-udc"); -	clk_register_clkdev(clk[usb_gate], "ahb", "fsl-usb2-udc"); -	clk_register_clkdev(clk[ipg], "ipg", "fsl-usb2-udc"); +	clk_register_clkdev(clk[usb_div_post], "per", "imx-udc-mx27"); +	clk_register_clkdev(clk[usb_gate], "ahb", "imx-udc-mx27"); +	clk_register_clkdev(clk[ipg], "ipg", "imx-udc-mx27");  	clk_register_clkdev(clk[csi_gate], NULL, "mx3-camera.0");  	/* i.mx31 has the i.mx21 type uart */  	clk_register_clkdev(clk[uart1_gate], "per", "imx21-uart.0"); diff --git a/arch/arm/mach-imx/clk-imx35.c b/arch/arm/mach-imx/clk-imx35.c index 66f3d65ea27..f0727e80815 100644 --- a/arch/arm/mach-imx/clk-imx35.c +++ b/arch/arm/mach-imx/clk-imx35.c @@ -251,9 +251,9 @@ int __init mx35_clocks_init()  	clk_register_clkdev(clk[usb_div], "per", "mxc-ehci.2");  	clk_register_clkdev(clk[ipg], "ipg", "mxc-ehci.2");  	clk_register_clkdev(clk[usbotg_gate], "ahb", "mxc-ehci.2"); -	clk_register_clkdev(clk[usb_div], "per", "fsl-usb2-udc"); -	clk_register_clkdev(clk[ipg], "ipg", "fsl-usb2-udc"); -	clk_register_clkdev(clk[usbotg_gate], "ahb", "fsl-usb2-udc"); +	clk_register_clkdev(clk[usb_div], "per", "imx-udc-mx27"); +	clk_register_clkdev(clk[ipg], "ipg", "imx-udc-mx27"); +	clk_register_clkdev(clk[usbotg_gate], "ahb", "imx-udc-mx27");  	clk_register_clkdev(clk[wdog_gate], NULL, "imx2-wdt.0");  	clk_register_clkdev(clk[nfc_div], NULL, "imx25-nand.0");  	clk_register_clkdev(clk[csi_gate], NULL, "mx3-camera.0"); diff --git a/arch/arm/mach-imx/clk-imx51-imx53.c b/arch/arm/mach-imx/clk-imx51-imx53.c index 579023f59dc..fb7cb841b64 100644 --- a/arch/arm/mach-imx/clk-imx51-imx53.c +++ b/arch/arm/mach-imx/clk-imx51-imx53.c @@ -269,9 +269,9 @@ static void __init mx5_clocks_common_init(unsigned long rate_ckil,  	clk_register_clkdev(clk[usboh3_per_gate], "per", "mxc-ehci.2");  	clk_register_clkdev(clk[usboh3_gate], "ipg", "mxc-ehci.2");  	clk_register_clkdev(clk[usboh3_gate], "ahb", "mxc-ehci.2"); -	clk_register_clkdev(clk[usboh3_per_gate], "per", "fsl-usb2-udc"); -	clk_register_clkdev(clk[usboh3_gate], "ipg", "fsl-usb2-udc"); -	clk_register_clkdev(clk[usboh3_gate], "ahb", "fsl-usb2-udc"); +	clk_register_clkdev(clk[usboh3_per_gate], "per", "imx-udc-mx51"); +	clk_register_clkdev(clk[usboh3_gate], "ipg", "imx-udc-mx51"); +	clk_register_clkdev(clk[usboh3_gate], "ahb", "imx-udc-mx51");  	clk_register_clkdev(clk[nfc_gate], NULL, "imx51-nand");  	clk_register_clkdev(clk[ssi1_ipg_gate], NULL, "imx-ssi.0");  	clk_register_clkdev(clk[ssi2_ipg_gate], NULL, "imx-ssi.1"); diff --git a/arch/arm/mach-imx/clk-imx6q.c b/arch/arm/mach-imx/clk-imx6q.c index 7f2c10c7413..c0c4e723b7f 100644 --- a/arch/arm/mach-imx/clk-imx6q.c +++ b/arch/arm/mach-imx/clk-imx6q.c @@ -436,6 +436,9 @@ int __init mx6q_clocks_init(void)  	for (i = 0; i < ARRAY_SIZE(clks_init_on); i++)  		clk_prepare_enable(clk[clks_init_on[i]]); +	/* Set initial power mode */ +	imx6q_set_lpm(WAIT_CLOCKED); +  	np = of_find_compatible_node(NULL, NULL, "fsl,imx6q-gpt");  	base = of_iomap(np, 0);  	WARN_ON(!base); diff --git a/arch/arm/mach-imx/common.h b/arch/arm/mach-imx/common.h index 7191ab4434e..fa36fb84ab1 100644 --- a/arch/arm/mach-imx/common.h +++ b/arch/arm/mach-imx/common.h @@ -142,6 +142,7 @@ extern int imx6q_set_lpm(enum mxc_cpu_pwr_mode mode);  extern void imx6q_clock_map_io(void);  extern void imx_cpu_die(unsigned int cpu); +extern int imx_cpu_kill(unsigned int cpu);  #ifdef CONFIG_PM  extern void imx6q_pm_init(void); diff --git a/arch/arm/mach-imx/devices/devices-common.h b/arch/arm/mach-imx/devices/devices-common.h index 6277baf1b7b..9bd5777ff0e 100644 --- a/arch/arm/mach-imx/devices/devices-common.h +++ b/arch/arm/mach-imx/devices/devices-common.h @@ -63,6 +63,7 @@ struct platform_device *__init imx_add_flexcan(  #include <linux/fsl_devices.h>  struct imx_fsl_usb2_udc_data { +	const char *devid;  	resource_size_t iobase;  	resource_size_t irq;  }; diff --git a/arch/arm/mach-imx/devices/platform-fsl-usb2-udc.c b/arch/arm/mach-imx/devices/platform-fsl-usb2-udc.c index 37e44398197..3c06bd96e9c 100644 --- a/arch/arm/mach-imx/devices/platform-fsl-usb2-udc.c +++ b/arch/arm/mach-imx/devices/platform-fsl-usb2-udc.c @@ -11,35 +11,36 @@  #include "../hardware.h"  #include "devices-common.h" -#define imx_fsl_usb2_udc_data_entry_single(soc)				\ +#define imx_fsl_usb2_udc_data_entry_single(soc, _devid)			\  	{								\ +		.devid = _devid,					\  		.iobase = soc ## _USB_OTG_BASE_ADDR,			\  		.irq = soc ## _INT_USB_OTG,				\  	}  #ifdef CONFIG_SOC_IMX25  const struct imx_fsl_usb2_udc_data imx25_fsl_usb2_udc_data __initconst = -	imx_fsl_usb2_udc_data_entry_single(MX25); +	imx_fsl_usb2_udc_data_entry_single(MX25, "imx-udc-mx27");  #endif /* ifdef CONFIG_SOC_IMX25 */  #ifdef CONFIG_SOC_IMX27  const struct imx_fsl_usb2_udc_data imx27_fsl_usb2_udc_data __initconst = -	imx_fsl_usb2_udc_data_entry_single(MX27); +	imx_fsl_usb2_udc_data_entry_single(MX27, "imx-udc-mx27");  #endif /* ifdef CONFIG_SOC_IMX27 */  #ifdef CONFIG_SOC_IMX31  const struct imx_fsl_usb2_udc_data imx31_fsl_usb2_udc_data __initconst = -	imx_fsl_usb2_udc_data_entry_single(MX31); +	imx_fsl_usb2_udc_data_entry_single(MX31, "imx-udc-mx27");  #endif /* ifdef CONFIG_SOC_IMX31 */  #ifdef CONFIG_SOC_IMX35  const struct imx_fsl_usb2_udc_data imx35_fsl_usb2_udc_data __initconst = -	imx_fsl_usb2_udc_data_entry_single(MX35); +	imx_fsl_usb2_udc_data_entry_single(MX35, "imx-udc-mx27");  #endif /* ifdef CONFIG_SOC_IMX35 */  #ifdef CONFIG_SOC_IMX51  const struct imx_fsl_usb2_udc_data imx51_fsl_usb2_udc_data __initconst = -	imx_fsl_usb2_udc_data_entry_single(MX51); +	imx_fsl_usb2_udc_data_entry_single(MX51, "imx-udc-mx51");  #endif  struct platform_device *__init imx_add_fsl_usb2_udc( @@ -57,7 +58,7 @@ struct platform_device *__init imx_add_fsl_usb2_udc(  			.flags = IORESOURCE_IRQ,  		},  	}; -	return imx_add_platform_device_dmamask("fsl-usb2-udc", -1, +	return imx_add_platform_device_dmamask(data->devid, -1,  			res, ARRAY_SIZE(res),  			pdata, sizeof(*pdata), DMA_BIT_MASK(32));  } diff --git a/arch/arm/mach-imx/devices/platform-imx-fb.c b/arch/arm/mach-imx/devices/platform-imx-fb.c index 10b0ed39f07..25a47c616b2 100644 --- a/arch/arm/mach-imx/devices/platform-imx-fb.c +++ b/arch/arm/mach-imx/devices/platform-imx-fb.c @@ -54,7 +54,7 @@ struct platform_device *__init imx_add_imx_fb(  			.flags = IORESOURCE_IRQ,  		},  	}; -	return imx_add_platform_device_dmamask("imx-fb", 0, +	return imx_add_platform_device_dmamask(data->devid, 0,  			res, ARRAY_SIZE(res),  			pdata, sizeof(*pdata), DMA_BIT_MASK(32));  } diff --git a/arch/arm/mach-imx/hotplug.c b/arch/arm/mach-imx/hotplug.c index 3dec962b077..7bc5fe15dda 100644 --- a/arch/arm/mach-imx/hotplug.c +++ b/arch/arm/mach-imx/hotplug.c @@ -46,9 +46,11 @@ static inline void cpu_enter_lowpower(void)  void imx_cpu_die(unsigned int cpu)  {  	cpu_enter_lowpower(); -	imx_enable_cpu(cpu, false); +	cpu_do_idle(); +} -	/* spin here until hardware takes it down */ -	while (1) -		; +int imx_cpu_kill(unsigned int cpu) +{ +	imx_enable_cpu(cpu, false); +	return 1;  } diff --git a/arch/arm/mach-imx/iram_alloc.c b/arch/arm/mach-imx/iram_alloc.c index 6c80424f678..e05cf407db6 100644 --- a/arch/arm/mach-imx/iram_alloc.c +++ b/arch/arm/mach-imx/iram_alloc.c @@ -22,8 +22,7 @@  #include <linux/module.h>  #include <linux/spinlock.h>  #include <linux/genalloc.h> - -#include "iram.h" +#include "linux/platform_data/imx-iram.h"  static unsigned long iram_phys_base;  static void __iomem *iram_virt_base; diff --git a/arch/arm/mach-imx/platsmp.c b/arch/arm/mach-imx/platsmp.c index 3777b805b76..66fae885c84 100644 --- a/arch/arm/mach-imx/platsmp.c +++ b/arch/arm/mach-imx/platsmp.c @@ -92,5 +92,6 @@ struct smp_operations  imx_smp_ops __initdata = {  	.smp_boot_secondary	= imx_boot_secondary,  #ifdef CONFIG_HOTPLUG_CPU  	.cpu_die		= imx_cpu_die, +	.cpu_kill		= imx_cpu_kill,  #endif  }; diff --git a/arch/arm/mach-imx/pm-imx6q.c b/arch/arm/mach-imx/pm-imx6q.c index a17543da602..ee42d20cba1 100644 --- a/arch/arm/mach-imx/pm-imx6q.c +++ b/arch/arm/mach-imx/pm-imx6q.c @@ -41,6 +41,7 @@ static int imx6q_pm_enter(suspend_state_t state)  		cpu_suspend(0, imx6q_suspend_finish);  		imx_smp_prepare();  		imx_gpc_post_resume(); +		imx6q_set_lpm(WAIT_CLOCKED);  		break;  	default:  		return -EINVAL; diff --git a/arch/arm/mach-integrator/pci_v3.c b/arch/arm/mach-integrator/pci_v3.c index be50e795536..e7fcea7f330 100644 --- a/arch/arm/mach-integrator/pci_v3.c +++ b/arch/arm/mach-integrator/pci_v3.c @@ -475,13 +475,12 @@ int __init pci_v3_setup(int nr, struct pci_sys_data *sys)  {  	int ret = 0; +	if (!ap_syscon_base) +		return -EINVAL; +  	if (nr == 0) {  		sys->mem_offset = PHYS_PCI_MEM_BASE;  		ret = pci_v3_setup_resources(sys); -		/* Remap the Integrator system controller */ -		ap_syscon_base = ioremap(INTEGRATOR_SC_BASE, 0x100); -		if (!ap_syscon_base) -			return -EINVAL;  	}  	return ret; @@ -497,6 +496,13 @@ void __init pci_v3_preinit(void)  	unsigned int temp;  	int ret; +	/* Remap the Integrator system controller */ +	ap_syscon_base = ioremap(INTEGRATOR_SC_BASE, 0x100); +	if (!ap_syscon_base) { +		pr_err("unable to remap the AP syscon for PCIv3\n"); +		return; +	} +  	pcibios_min_mem = 0x00100000;  	/* diff --git a/arch/arm/mach-kirkwood/board-ns2.c b/arch/arm/mach-kirkwood/board-ns2.c index 8821720ab5a..f4632a809f6 100644 --- a/arch/arm/mach-kirkwood/board-ns2.c +++ b/arch/arm/mach-kirkwood/board-ns2.c @@ -18,47 +18,11 @@  #include <linux/gpio.h>  #include <linux/of.h>  #include "common.h" -#include "mpp.h"  static struct mv643xx_eth_platform_data ns2_ge00_data = {  	.phy_addr	= MV643XX_ETH_PHY_ADDR(8),  }; -static unsigned int ns2_mpp_config[] __initdata = { -	MPP0_SPI_SCn, -	MPP1_SPI_MOSI, -	MPP2_SPI_SCK, -	MPP3_SPI_MISO, -	MPP4_NF_IO6, -	MPP5_NF_IO7, -	MPP6_SYSRST_OUTn, -	MPP7_GPO,		/* Fan speed (bit 1) */ -	MPP8_TW0_SDA, -	MPP9_TW0_SCK, -	MPP10_UART0_TXD, -	MPP11_UART0_RXD, -	MPP12_GPO,		/* Red led */ -	MPP14_GPIO,		/* USB fuse */ -	MPP16_GPIO,		/* SATA 0 power */ -	MPP17_GPIO,		/* SATA 1 power */ -	MPP18_NF_IO0, -	MPP19_NF_IO1, -	MPP20_SATA1_ACTn, -	MPP21_SATA0_ACTn, -	MPP22_GPIO,		/* Fan speed (bit 0) */ -	MPP23_GPIO,		/* Fan power */ -	MPP24_GPIO,		/* USB mode select */ -	MPP25_GPIO,		/* Fan rotation fail */ -	MPP26_GPIO,		/* USB device vbus */ -	MPP28_GPIO,		/* USB enable host vbus */ -	MPP29_GPIO,		/* Blue led (slow register) */ -	MPP30_GPIO,		/* Blue led (command register) */ -	MPP31_GPIO,		/* Board power off */ -	MPP32_GPIO,		/* Power button (0 = Released, 1 = Pushed) */ -	MPP33_GPO,		/* Fan speed (bit 2) */ -	0 -}; -  #define NS2_GPIO_POWER_OFF	31  static void ns2_power_off(void) @@ -71,8 +35,6 @@ void __init ns2_init(void)  	/*  	 * Basic setup. Needs to be called early.  	 */ -	kirkwood_mpp_conf(ns2_mpp_config); -  	if (of_machine_is_compatible("lacie,netspace_lite_v2") ||  	    of_machine_is_compatible("lacie,netspace_mini_v2"))  		ns2_ge00_data.phy_addr = MV643XX_ETH_PHY_ADDR(0); diff --git a/arch/arm/mach-mvebu/Makefile b/arch/arm/mach-mvebu/Makefile index 5dcb369b58a..99df4df680f 100644 --- a/arch/arm/mach-mvebu/Makefile +++ b/arch/arm/mach-mvebu/Makefile @@ -1,6 +1,8 @@  ccflags-$(CONFIG_ARCH_MULTIPLATFORM) := -I$(srctree)/$(src)/include \  	-I$(srctree)/arch/arm/plat-orion/include +AFLAGS_coherency_ll.o		:= -Wa,-march=armv7-a +  obj-y += system-controller.o  obj-$(CONFIG_MACH_ARMADA_370_XP) += armada-370-xp.o irq-armada-370-xp.o addr-map.o coherency.o coherency_ll.o pmsu.o  obj-$(CONFIG_SMP)                += platsmp.o headsmp.o diff --git a/arch/arm/mach-omap2/board-omap4panda.c b/arch/arm/mach-omap2/board-omap4panda.c index 5c8e9cee2c2..769c1feee1c 100644 --- a/arch/arm/mach-omap2/board-omap4panda.c +++ b/arch/arm/mach-omap2/board-omap4panda.c @@ -397,6 +397,12 @@ static struct omap_board_mux board_mux[] __initdata = {  		  OMAP_PULL_ENA),  	OMAP4_MUX(ABE_MCBSP1_FSX, OMAP_MUX_MODE0 | OMAP_PIN_INPUT), +	/* UART2 - BT/FM/GPS shared transport */ +	OMAP4_MUX(UART2_CTS,	OMAP_PIN_INPUT	| OMAP_MUX_MODE0), +	OMAP4_MUX(UART2_RTS,	OMAP_PIN_OUTPUT	| OMAP_MUX_MODE0), +	OMAP4_MUX(UART2_RX,	OMAP_PIN_INPUT	| OMAP_MUX_MODE0), +	OMAP4_MUX(UART2_TX,	OMAP_PIN_OUTPUT	| OMAP_MUX_MODE0), +  	{ .reg_offset = OMAP_MUX_TERMINATOR },  }; diff --git a/arch/arm/mach-omap2/cclock2420_data.c b/arch/arm/mach-omap2/cclock2420_data.c index 7e5febe456d..ab7e952d207 100644 --- a/arch/arm/mach-omap2/cclock2420_data.c +++ b/arch/arm/mach-omap2/cclock2420_data.c @@ -1935,6 +1935,8 @@ int __init omap2420_clk_init(void)  			omap2_init_clk_hw_omap_clocks(c->lk.clk);  	} +	omap2xxx_clkt_vps_late_init(); +  	omap2_clk_disable_autoidle_all();  	omap2_clk_enable_init_clocks(enable_init_clks, diff --git a/arch/arm/mach-omap2/cclock2430_data.c b/arch/arm/mach-omap2/cclock2430_data.c index eda079b96c6..eb3dab68d53 100644 --- a/arch/arm/mach-omap2/cclock2430_data.c +++ b/arch/arm/mach-omap2/cclock2430_data.c @@ -2050,6 +2050,8 @@ int __init omap2430_clk_init(void)  			omap2_init_clk_hw_omap_clocks(c->lk.clk);  	} +	omap2xxx_clkt_vps_late_init(); +  	omap2_clk_disable_autoidle_all();  	omap2_clk_enable_init_clocks(enable_init_clks, diff --git a/arch/arm/mach-omap2/cclock44xx_data.c b/arch/arm/mach-omap2/cclock44xx_data.c index 5789a5e2556..a2cc046b47f 100644 --- a/arch/arm/mach-omap2/cclock44xx_data.c +++ b/arch/arm/mach-omap2/cclock44xx_data.c @@ -2026,14 +2026,13 @@ int __init omap4xxx_clk_init(void)  	 * On OMAP4460 the ABE DPLL fails to turn on if in idle low-power  	 * state when turning the ABE clock domain. Workaround this by  	 * locking the ABE DPLL on boot. +	 * Lock the ABE DPLL in any case to avoid issues with audio.  	 */ -	if (cpu_is_omap446x()) { -		rc = clk_set_parent(&abe_dpll_refclk_mux_ck, &sys_32k_ck); -		if (!rc) -			rc = clk_set_rate(&dpll_abe_ck, OMAP4_DPLL_ABE_DEFFREQ); -		if (rc) -			pr_err("%s: failed to configure ABE DPLL!\n", __func__); -	} +	rc = clk_set_parent(&abe_dpll_refclk_mux_ck, &sys_32k_ck); +	if (!rc) +		rc = clk_set_rate(&dpll_abe_ck, OMAP4_DPLL_ABE_DEFFREQ); +	if (rc) +		pr_err("%s: failed to configure ABE DPLL!\n", __func__);  	return 0;  } diff --git a/arch/arm/mach-omap2/devices.c b/arch/arm/mach-omap2/devices.c index 5e304d0719a..626f3ea3142 100644 --- a/arch/arm/mach-omap2/devices.c +++ b/arch/arm/mach-omap2/devices.c @@ -639,7 +639,7 @@ static int count_ocp2scp_devices(struct omap_ocp2scp_dev *ocp2scp_dev)  	return cnt;  } -static void omap_init_ocp2scp(void) +static void __init omap_init_ocp2scp(void)  {  	struct omap_hwmod	*oh;  	struct platform_device	*pdev; diff --git a/arch/arm/mach-omap2/drm.c b/arch/arm/mach-omap2/drm.c index 4c7566c7e24..2a2cfa88ddb 100644 --- a/arch/arm/mach-omap2/drm.c +++ b/arch/arm/mach-omap2/drm.c @@ -25,6 +25,7 @@  #include <linux/dma-mapping.h>  #include <linux/platform_data/omap_drm.h> +#include "soc.h"  #include "omap_device.h"  #include "omap_hwmod.h" @@ -56,7 +57,7 @@ static int __init omap_init_drm(void)  			oh->name);  	} -	platform_data.omaprev = GET_OMAP_REVISION(); +	platform_data.omaprev = GET_OMAP_TYPE;  	return platform_device_register(&omap_drm_device); diff --git a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c index 129d5081ed1..793f54ac7d1 100644 --- a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c +++ b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c @@ -2132,8 +2132,12 @@ static struct omap_hwmod omap44xx_mcpdm_hwmod = {  	 * currently reset very early during boot, before I2C is  	 * available, so it doesn't seem that we have any choice in  	 * the kernel other than to avoid resetting it. +	 * +	 * Also, McPDM needs to be configured to NO_IDLE mode when it +	 * is in used otherwise vital clocks will be gated which +	 * results 'slow motion' audio playback.  	 */ -	.flags		= HWMOD_EXT_OPT_MAIN_CLK, +	.flags		= HWMOD_EXT_OPT_MAIN_CLK | HWMOD_SWSUP_SIDLE,  	.mpu_irqs	= omap44xx_mcpdm_irqs,  	.sdma_reqs	= omap44xx_mcpdm_sdma_reqs,  	.main_clk	= "mcpdm_fck", diff --git a/arch/arm/mach-omap2/timer.c b/arch/arm/mach-omap2/timer.c index 691aa674665..b8ad6e632bb 100644 --- a/arch/arm/mach-omap2/timer.c +++ b/arch/arm/mach-omap2/timer.c @@ -165,15 +165,11 @@ static struct device_node * __init omap_get_timer_dt(struct of_device_id *match,  	struct device_node *np;  	for_each_matching_node(np, match) { -		if (!of_device_is_available(np)) { -			of_node_put(np); +		if (!of_device_is_available(np))  			continue; -		} -		if (property && !of_get_property(np, property, NULL)) { -			of_node_put(np); +		if (property && !of_get_property(np, property, NULL))  			continue; -		}  		of_add_property(np, &device_disabled);  		return np; diff --git a/arch/arm/mach-realview/include/mach/irqs-eb.h b/arch/arm/mach-realview/include/mach/irqs-eb.h index d6b5073692d..44754230fdc 100644 --- a/arch/arm/mach-realview/include/mach/irqs-eb.h +++ b/arch/arm/mach-realview/include/mach/irqs-eb.h @@ -115,7 +115,7 @@  /*   * Only define NR_IRQS if less than NR_IRQS_EB   */ -#define NR_IRQS_EB		(IRQ_EB_GIC_START + 96) +#define NR_IRQS_EB		(IRQ_EB_GIC_START + 128)  #if defined(CONFIG_MACH_REALVIEW_EB) \  	&& (!defined(NR_IRQS) || (NR_IRQS < NR_IRQS_EB)) diff --git a/arch/arm/mach-s3c64xx/mach-crag6410-module.c b/arch/arm/mach-s3c64xx/mach-crag6410-module.c index 553059f5184..755c0bb119f 100644 --- a/arch/arm/mach-s3c64xx/mach-crag6410-module.c +++ b/arch/arm/mach-s3c64xx/mach-crag6410-module.c @@ -47,7 +47,7 @@ static struct spi_board_info wm1253_devs[] = {  		.bus_num	= 0,  		.chip_select	= 0,  		.mode		= SPI_MODE_0, -		.irq		= S3C_EINT(5), +		.irq		= S3C_EINT(4),  		.controller_data = &wm0010_spi_csinfo,  		.platform_data = &wm0010_pdata,  	}, diff --git a/arch/arm/mach-s3c64xx/pm.c b/arch/arm/mach-s3c64xx/pm.c index 7feb426fc20..d2e1a16690b 100644 --- a/arch/arm/mach-s3c64xx/pm.c +++ b/arch/arm/mach-s3c64xx/pm.c @@ -338,8 +338,10 @@ int __init s3c64xx_pm_init(void)  	for (i = 0; i < ARRAY_SIZE(s3c64xx_pm_domains); i++)  		pm_genpd_init(&s3c64xx_pm_domains[i]->pd, NULL, false); +#ifdef CONFIG_S3C_DEV_FB  	if (dev_get_platdata(&s3c_device_fb.dev))  		pm_genpd_add_device(&s3c64xx_pm_f.pd, &s3c_device_fb.dev); +#endif  	return 0;  } diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index 6b2fb87c869..dda3904dc64 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -640,7 +640,7 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,  	if (is_coherent || nommu())  		addr = __alloc_simple_buffer(dev, size, gfp, &page); -	else if (gfp & GFP_ATOMIC) +	else if (!(gfp & __GFP_WAIT))  		addr = __alloc_from_pool(size, &page);  	else if (!IS_ENABLED(CONFIG_CMA))  		addr = __alloc_remap_buffer(dev, size, gfp, prot, &page, caller); @@ -774,25 +774,27 @@ static void dma_cache_maint_page(struct page *page, unsigned long offset,  	size_t size, enum dma_data_direction dir,  	void (*op)(const void *, size_t, int))  { +	unsigned long pfn; +	size_t left = size; + +	pfn = page_to_pfn(page) + offset / PAGE_SIZE; +	offset %= PAGE_SIZE; +  	/*  	 * A single sg entry may refer to multiple physically contiguous  	 * pages.  But we still need to process highmem pages individually.  	 * If highmem is not configured then the bulk of this loop gets  	 * optimized out.  	 */ -	size_t left = size;  	do {  		size_t len = left;  		void *vaddr; +		page = pfn_to_page(pfn); +  		if (PageHighMem(page)) { -			if (len + offset > PAGE_SIZE) { -				if (offset >= PAGE_SIZE) { -					page += offset / PAGE_SIZE; -					offset %= PAGE_SIZE; -				} +			if (len + offset > PAGE_SIZE)  				len = PAGE_SIZE - offset; -			}  			vaddr = kmap_high_get(page);  			if (vaddr) {  				vaddr += offset; @@ -809,7 +811,7 @@ static void dma_cache_maint_page(struct page *page, unsigned long offset,  			op(vaddr, len, dir);  		}  		offset = 0; -		page++; +		pfn++;  		left -= len;  	} while (left);  } diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index 9f0610243bd..ce328c7f5c9 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -283,7 +283,7 @@ static struct mem_type mem_types[] = {  	},  	[MT_MEMORY_SO] = {  		.prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | -				L_PTE_MT_UNCACHED, +				L_PTE_MT_UNCACHED | L_PTE_XN,  		.prot_l1   = PMD_TYPE_TABLE,  		.prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_S |  				PMD_SECT_UNCACHED | PMD_SECT_XN, diff --git a/arch/arm/plat-versatile/headsmp.S b/arch/arm/plat-versatile/headsmp.S index dd703ef09b8..b178d44e9ea 100644 --- a/arch/arm/plat-versatile/headsmp.S +++ b/arch/arm/plat-versatile/headsmp.S @@ -20,7 +20,7 @@   */  ENTRY(versatile_secondary_startup)  	mrc	p15, 0, r0, c0, c0, 5 -	and	r0, r0, #15 +	bic	r0, #0xff000000  	adr	r4, 1f  	ldmia	r4, {r5, r6}  	sub	r4, r4, r5 diff --git a/arch/arm/vfp/entry.S b/arch/arm/vfp/entry.S index cc926c98598..323ce1a62bb 100644 --- a/arch/arm/vfp/entry.S +++ b/arch/arm/vfp/entry.S @@ -22,7 +22,7 @@  @  IRQs disabled.  @  ENTRY(do_vfp) -#ifdef CONFIG_PREEMPT +#ifdef CONFIG_PREEMPT_COUNT  	ldr	r4, [r10, #TI_PREEMPT]	@ get preempt count  	add	r11, r4, #1		@ increment it  	str	r11, [r10, #TI_PREEMPT] @@ -35,7 +35,7 @@ ENTRY(do_vfp)  ENDPROC(do_vfp)  ENTRY(vfp_null_entry) -#ifdef CONFIG_PREEMPT +#ifdef CONFIG_PREEMPT_COUNT  	get_thread_info	r10  	ldr	r4, [r10, #TI_PREEMPT]	@ get preempt count  	sub	r11, r4, #1		@ decrement it @@ -53,7 +53,7 @@ ENDPROC(vfp_null_entry)  	__INIT  ENTRY(vfp_testing_entry) -#ifdef CONFIG_PREEMPT +#ifdef CONFIG_PREEMPT_COUNT  	get_thread_info	r10  	ldr	r4, [r10, #TI_PREEMPT]	@ get preempt count  	sub	r11, r4, #1		@ decrement it diff --git a/arch/arm/vfp/vfphw.S b/arch/arm/vfp/vfphw.S index ea0349f6358..dd5e56f95f3 100644 --- a/arch/arm/vfp/vfphw.S +++ b/arch/arm/vfp/vfphw.S @@ -168,7 +168,7 @@ vfp_hw_state_valid:  					@ else it's one 32-bit instruction, so  					@ always subtract 4 from the following  					@ instruction address. -#ifdef CONFIG_PREEMPT +#ifdef CONFIG_PREEMPT_COUNT  	get_thread_info	r10  	ldr	r4, [r10, #TI_PREEMPT]	@ get preempt count  	sub	r11, r4, #1		@ decrement it @@ -192,7 +192,7 @@ look_for_VFP_exceptions:  	@ not recognised by VFP  	DBGSTR	"not VFP" -#ifdef CONFIG_PREEMPT +#ifdef CONFIG_PREEMPT_COUNT  	get_thread_info	r10  	ldr	r4, [r10, #TI_PREEMPT]	@ get preempt count  	sub	r11, r4, #1		@ decrement it diff --git a/arch/arm64/include/asm/elf.h b/arch/arm64/include/asm/elf.h index 07fea290d7c..fe32c0e4ac0 100644 --- a/arch/arm64/include/asm/elf.h +++ b/arch/arm64/include/asm/elf.h @@ -26,7 +26,10 @@  typedef unsigned long elf_greg_t; -#define ELF_NGREG (sizeof (struct pt_regs) / sizeof(elf_greg_t)) +#define ELF_NGREG (sizeof(struct user_pt_regs) / sizeof(elf_greg_t)) +#define ELF_CORE_COPY_REGS(dest, regs)	\ +	*(struct user_pt_regs *)&(dest) = (regs)->user_regs; +  typedef elf_greg_t elf_gregset_t[ELF_NGREG];  typedef struct user_fpsimd_state elf_fpregset_t; diff --git a/arch/avr32/include/asm/dma-mapping.h b/arch/avr32/include/asm/dma-mapping.h index aaf5199d8fc..b3d18f9f3e8 100644 --- a/arch/avr32/include/asm/dma-mapping.h +++ b/arch/avr32/include/asm/dma-mapping.h @@ -336,4 +336,14 @@ dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,  #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)  #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) +/* drivers/base/dma-mapping.c */ +extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma, +			   void *cpu_addr, dma_addr_t dma_addr, size_t size); +extern int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt, +				  void *cpu_addr, dma_addr_t dma_addr, +				  size_t size); + +#define dma_mmap_coherent(d, v, c, h, s) dma_common_mmap(d, v, c, h, s) +#define dma_get_sgtable(d, t, v, h, s) dma_common_get_sgtable(d, t, v, h, s) +  #endif /* __ASM_AVR32_DMA_MAPPING_H */ diff --git a/arch/blackfin/include/asm/dma-mapping.h b/arch/blackfin/include/asm/dma-mapping.h index bbf461076a0..054d9ec57d9 100644 --- a/arch/blackfin/include/asm/dma-mapping.h +++ b/arch/blackfin/include/asm/dma-mapping.h @@ -154,4 +154,14 @@ dma_cache_sync(struct device *dev, void *vaddr, size_t size,  	_dma_sync((dma_addr_t)vaddr, size, dir);  } +/* drivers/base/dma-mapping.c */ +extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma, +			   void *cpu_addr, dma_addr_t dma_addr, size_t size); +extern int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt, +				  void *cpu_addr, dma_addr_t dma_addr, +				  size_t size); + +#define dma_mmap_coherent(d, v, c, h, s) dma_common_mmap(d, v, c, h, s) +#define dma_get_sgtable(d, t, v, h, s) dma_common_get_sgtable(d, t, v, h, s) +  #endif				/* _BLACKFIN_DMA_MAPPING_H */ diff --git a/arch/c6x/include/asm/dma-mapping.h b/arch/c6x/include/asm/dma-mapping.h index 3c694065030..88bd0d899bd 100644 --- a/arch/c6x/include/asm/dma-mapping.h +++ b/arch/c6x/include/asm/dma-mapping.h @@ -89,4 +89,19 @@ extern void dma_free_coherent(struct device *, size_t, void *, dma_addr_t);  #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent((d), (s), (h), (f))  #define dma_free_noncoherent(d, s, v, h)  dma_free_coherent((d), (s), (v), (h)) +/* Not supported for now */ +static inline int dma_mmap_coherent(struct device *dev, +				    struct vm_area_struct *vma, void *cpu_addr, +				    dma_addr_t dma_addr, size_t size) +{ +	return -EINVAL; +} + +static inline int dma_get_sgtable(struct device *dev, struct sg_table *sgt, +				  void *cpu_addr, dma_addr_t dma_addr, +				  size_t size) +{ +	return -EINVAL; +} +  #endif	/* _ASM_C6X_DMA_MAPPING_H */ diff --git a/arch/cris/include/asm/dma-mapping.h b/arch/cris/include/asm/dma-mapping.h index 8588b2ccf85..2f0f654f1b4 100644 --- a/arch/cris/include/asm/dma-mapping.h +++ b/arch/cris/include/asm/dma-mapping.h @@ -158,5 +158,15 @@ dma_cache_sync(struct device *dev, void *vaddr, size_t size,  {  } +/* drivers/base/dma-mapping.c */ +extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma, +			   void *cpu_addr, dma_addr_t dma_addr, size_t size); +extern int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt, +				  void *cpu_addr, dma_addr_t dma_addr, +				  size_t size); + +#define dma_mmap_coherent(d, v, c, h, s) dma_common_mmap(d, v, c, h, s) +#define dma_get_sgtable(d, t, v, h, s) dma_common_get_sgtable(d, t, v, h, s) +  #endif diff --git a/arch/frv/include/asm/dma-mapping.h b/arch/frv/include/asm/dma-mapping.h index dfb811002c6..1746a2b8e6e 100644 --- a/arch/frv/include/asm/dma-mapping.h +++ b/arch/frv/include/asm/dma-mapping.h @@ -132,4 +132,19 @@ void dma_cache_sync(struct device *dev, void *vaddr, size_t size,  	flush_write_buffers();  } +/* Not supported for now */ +static inline int dma_mmap_coherent(struct device *dev, +				    struct vm_area_struct *vma, void *cpu_addr, +				    dma_addr_t dma_addr, size_t size) +{ +	return -EINVAL; +} + +static inline int dma_get_sgtable(struct device *dev, struct sg_table *sgt, +				  void *cpu_addr, dma_addr_t dma_addr, +				  size_t size) +{ +	return -EINVAL; +} +  #endif  /* _ASM_DMA_MAPPING_H */ diff --git a/arch/ia64/kernel/ptrace.c b/arch/ia64/kernel/ptrace.c index 4265ff64219..b7a5fffe092 100644 --- a/arch/ia64/kernel/ptrace.c +++ b/arch/ia64/kernel/ptrace.c @@ -672,33 +672,6 @@ ptrace_attach_sync_user_rbs (struct task_struct *child)  	read_unlock(&tasklist_lock);  } -static inline int -thread_matches (struct task_struct *thread, unsigned long addr) -{ -	unsigned long thread_rbs_end; -	struct pt_regs *thread_regs; - -	if (ptrace_check_attach(thread, 0) < 0) -		/* -		 * If the thread is not in an attachable state, we'll -		 * ignore it.  The net effect is that if ADDR happens -		 * to overlap with the portion of the thread's -		 * register backing store that is currently residing -		 * on the thread's kernel stack, then ptrace() may end -		 * up accessing a stale value.  But if the thread -		 * isn't stopped, that's a problem anyhow, so we're -		 * doing as well as we can... -		 */ -		return 0; - -	thread_regs = task_pt_regs(thread); -	thread_rbs_end = ia64_get_user_rbs_end(thread, thread_regs, NULL); -	if (!on_kernel_rbs(addr, thread_regs->ar_bspstore, thread_rbs_end)) -		return 0; - -	return 1;	/* looks like we've got a winner */ -} -  /*   * Write f32-f127 back to task->thread.fph if it has been modified.   */ diff --git a/arch/m68k/include/asm/dma-mapping.h b/arch/m68k/include/asm/dma-mapping.h index 17f7a45948e..292805f0762 100644 --- a/arch/m68k/include/asm/dma-mapping.h +++ b/arch/m68k/include/asm/dma-mapping.h @@ -21,6 +21,22 @@ extern void *dma_alloc_coherent(struct device *, size_t,  extern void dma_free_coherent(struct device *, size_t,  			      void *, dma_addr_t); +static inline void *dma_alloc_attrs(struct device *dev, size_t size, +				    dma_addr_t *dma_handle, gfp_t flag, +				    struct dma_attrs *attrs) +{ +	/* attrs is not supported and ignored */ +	return dma_alloc_coherent(dev, size, dma_handle, flag); +} + +static inline void dma_free_attrs(struct device *dev, size_t size, +				  void *cpu_addr, dma_addr_t dma_handle, +				  struct dma_attrs *attrs) +{ +	/* attrs is not supported and ignored */ +	dma_free_coherent(dev, size, cpu_addr, dma_handle); +} +  static inline void *dma_alloc_noncoherent(struct device *dev, size_t size,  					  dma_addr_t *handle, gfp_t flag)  { @@ -99,4 +115,14 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t handle)  #include <asm-generic/dma-mapping-broken.h>  #endif +/* drivers/base/dma-mapping.c */ +extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma, +			   void *cpu_addr, dma_addr_t dma_addr, size_t size); +extern int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt, +				  void *cpu_addr, dma_addr_t dma_addr, +				  size_t size); + +#define dma_mmap_coherent(d, v, c, h, s) dma_common_mmap(d, v, c, h, s) +#define dma_get_sgtable(d, t, v, h, s) dma_common_get_sgtable(d, t, v, h, s) +  #endif  /* _M68K_DMA_MAPPING_H */ diff --git a/arch/m68k/include/asm/pgtable_no.h b/arch/m68k/include/asm/pgtable_no.h index bf86b29fe64..037028f4ab7 100644 --- a/arch/m68k/include/asm/pgtable_no.h +++ b/arch/m68k/include/asm/pgtable_no.h @@ -64,6 +64,8 @@ extern unsigned int kobjsize(const void *objp);   */  #define	VMALLOC_START	0  #define	VMALLOC_END	0xffffffff +#define	KMAP_START	0 +#define	KMAP_END	0xffffffff  #include <asm-generic/pgtable.h> diff --git a/arch/m68k/include/asm/processor.h b/arch/m68k/include/asm/processor.h index ae700f49e51..b0768a65792 100644 --- a/arch/m68k/include/asm/processor.h +++ b/arch/m68k/include/asm/processor.h @@ -130,7 +130,6 @@ extern int handle_kernel_fault(struct pt_regs *regs);  #define start_thread(_regs, _pc, _usp)                  \  do {                                                    \  	(_regs)->pc = (_pc);                            \ -	((struct switch_stack *)(_regs))[-1].a6 = 0;    \  	setframeformat(_regs);                          \  	if (current->mm)                                \  		(_regs)->d5 = current->mm->start_data;  \ diff --git a/arch/m68k/include/asm/unistd.h b/arch/m68k/include/asm/unistd.h index 847994ce680..f9337f61466 100644 --- a/arch/m68k/include/asm/unistd.h +++ b/arch/m68k/include/asm/unistd.h @@ -4,7 +4,7 @@  #include <uapi/asm/unistd.h> -#define NR_syscalls		348 +#define NR_syscalls		349  #define __ARCH_WANT_OLD_READDIR  #define __ARCH_WANT_OLD_STAT diff --git a/arch/m68k/include/uapi/asm/unistd.h b/arch/m68k/include/uapi/asm/unistd.h index b94bfbf9070..625f321001d 100644 --- a/arch/m68k/include/uapi/asm/unistd.h +++ b/arch/m68k/include/uapi/asm/unistd.h @@ -353,5 +353,6 @@  #define __NR_process_vm_readv	345  #define __NR_process_vm_writev	346  #define __NR_kcmp		347 +#define __NR_finit_module	348  #endif /* _UAPI_ASM_M68K_UNISTD_H_ */ diff --git a/arch/m68k/kernel/syscalltable.S b/arch/m68k/kernel/syscalltable.S index c30da5b3f2d..3f04ea0ab80 100644 --- a/arch/m68k/kernel/syscalltable.S +++ b/arch/m68k/kernel/syscalltable.S @@ -368,4 +368,5 @@ ENTRY(sys_call_table)  	.long sys_process_vm_readv	/* 345 */  	.long sys_process_vm_writev  	.long sys_kcmp +	.long sys_finit_module diff --git a/arch/m68k/mm/init.c b/arch/m68k/mm/init.c index f0e05bce92f..afd8106fd83 100644 --- a/arch/m68k/mm/init.c +++ b/arch/m68k/mm/init.c @@ -39,6 +39,11 @@  void *empty_zero_page;  EXPORT_SYMBOL(empty_zero_page); +#if !defined(CONFIG_SUN3) && !defined(CONFIG_COLDFIRE) +extern void init_pointer_table(unsigned long ptable); +extern pmd_t *zero_pgtable; +#endif +  #ifdef CONFIG_MMU  pg_data_t pg_data_map[MAX_NUMNODES]; @@ -69,9 +74,6 @@ void __init m68k_setup_node(int node)  	node_set_online(node);  } -extern void init_pointer_table(unsigned long ptable); -extern pmd_t *zero_pgtable; -  #else /* CONFIG_MMU */  /* diff --git a/arch/mips/bcm47xx/Kconfig b/arch/mips/bcm47xx/Kconfig index d7af29f1fcf..ba611927749 100644 --- a/arch/mips/bcm47xx/Kconfig +++ b/arch/mips/bcm47xx/Kconfig @@ -8,8 +8,10 @@ config BCM47XX_SSB  	select SSB_DRIVER_EXTIF  	select SSB_EMBEDDED  	select SSB_B43_PCI_BRIDGE if PCI +	select SSB_DRIVER_PCICORE if PCI  	select SSB_PCICORE_HOSTMODE if PCI  	select SSB_DRIVER_GPIO +	select GPIOLIB  	default y  	help  	 Add support for old Broadcom BCM47xx boards with Sonics Silicon Backplane support. @@ -25,6 +27,7 @@ config BCM47XX_BCMA  	select BCMA_HOST_PCI if PCI  	select BCMA_DRIVER_PCI_HOSTMODE if PCI  	select BCMA_DRIVER_GPIO +	select GPIOLIB  	default y  	help  	 Add support for new Broadcom BCM47xx boards with Broadcom specific Advanced Microcontroller Bus. diff --git a/arch/mips/cavium-octeon/executive/cvmx-l2c.c b/arch/mips/cavium-octeon/executive/cvmx-l2c.c index 9f883bf7695..33b72144db3 100644 --- a/arch/mips/cavium-octeon/executive/cvmx-l2c.c +++ b/arch/mips/cavium-octeon/executive/cvmx-l2c.c @@ -30,6 +30,7 @@   * measurement, and debugging facilities.   */ +#include <linux/compiler.h>  #include <linux/irqflags.h>  #include <asm/octeon/cvmx.h>  #include <asm/octeon/cvmx-l2c.h> @@ -285,22 +286,22 @@ uint64_t cvmx_l2c_read_perf(uint32_t counter)   */  static void fault_in(uint64_t addr, int len)  { -	volatile char *ptr; -	volatile char dummy; +	char *ptr; +  	/*  	 * Adjust addr and length so we get all cache lines even for  	 * small ranges spanning two cache lines.  	 */  	len += addr & CVMX_CACHE_LINE_MASK;  	addr &= ~CVMX_CACHE_LINE_MASK; -	ptr = (volatile char *)cvmx_phys_to_ptr(addr); +	ptr = cvmx_phys_to_ptr(addr);  	/*  	 * Invalidate L1 cache to make sure all loads result in data  	 * being in L2.  	 */  	CVMX_DCACHE_INVALIDATE;  	while (len > 0) { -		dummy += *ptr; +		ACCESS_ONCE(*ptr);  		len -= CVMX_CACHE_LINE_SIZE;  		ptr += CVMX_CACHE_LINE_SIZE;  	} diff --git a/arch/mips/include/asm/dsp.h b/arch/mips/include/asm/dsp.h index e9bfc0813c7..7bfad0520e2 100644 --- a/arch/mips/include/asm/dsp.h +++ b/arch/mips/include/asm/dsp.h @@ -16,7 +16,7 @@  #include <asm/mipsregs.h>  #define DSP_DEFAULT	0x00000000 -#define DSP_MASK	0x3ff +#define DSP_MASK	0x3f  #define __enable_dsp_hazard()						\  do {									\ diff --git a/arch/mips/include/asm/inst.h b/arch/mips/include/asm/inst.h index ab84064283d..33c34adbecf 100644 --- a/arch/mips/include/asm/inst.h +++ b/arch/mips/include/asm/inst.h @@ -353,6 +353,7 @@ union mips_instruction {  	struct u_format u_format;  	struct c_format c_format;  	struct r_format r_format; +	struct p_format p_format;  	struct f_format f_format;  	struct ma_format ma_format;  	struct b_format b_format; diff --git a/arch/mips/include/asm/mach-pnx833x/war.h b/arch/mips/include/asm/mach-pnx833x/war.h index edaa06d9d49..e410df4e1b3 100644 --- a/arch/mips/include/asm/mach-pnx833x/war.h +++ b/arch/mips/include/asm/mach-pnx833x/war.h @@ -21,4 +21,4 @@  #define R10000_LLSC_WAR			0  #define MIPS34K_MISSED_ITLB_WAR		0 -#endif /* __ASM_MIPS_MACH_PNX8550_WAR_H */ +#endif /* __ASM_MIPS_MACH_PNX833X_WAR_H */ diff --git a/arch/mips/include/asm/pgtable-64.h b/arch/mips/include/asm/pgtable-64.h index c63191055e6..013d5f78126 100644 --- a/arch/mips/include/asm/pgtable-64.h +++ b/arch/mips/include/asm/pgtable-64.h @@ -230,6 +230,7 @@ static inline void pud_clear(pud_t *pudp)  #else  #define pte_pfn(x)		((unsigned long)((x).pte >> _PFN_SHIFT))  #define pfn_pte(pfn, prot)	__pte(((pfn) << _PFN_SHIFT) | pgprot_val(prot)) +#define pfn_pmd(pfn, prot)	__pmd(((pfn) << _PFN_SHIFT) | pgprot_val(prot))  #endif  #define __pgd_offset(address)	pgd_index(address) diff --git a/arch/mips/include/uapi/asm/Kbuild b/arch/mips/include/uapi/asm/Kbuild index a1a0452ac18..77d4fb33f75 100644 --- a/arch/mips/include/uapi/asm/Kbuild +++ b/arch/mips/include/uapi/asm/Kbuild @@ -3,6 +3,7 @@ include include/uapi/asm-generic/Kbuild.asm  header-y += auxvec.h  header-y += bitsperlong.h +header-y += break.h  header-y += byteorder.h  header-y += cachectl.h  header-y += errno.h diff --git a/arch/mips/include/asm/break.h b/arch/mips/include/uapi/asm/break.h index 9161e684cb4..9161e684cb4 100644 --- a/arch/mips/include/asm/break.h +++ b/arch/mips/include/uapi/asm/break.h diff --git a/arch/mips/kernel/ftrace.c b/arch/mips/kernel/ftrace.c index 6a2d758dd8e..83fa1460e29 100644 --- a/arch/mips/kernel/ftrace.c +++ b/arch/mips/kernel/ftrace.c @@ -25,6 +25,12 @@  #define MCOUNT_OFFSET_INSNS 4  #endif +/* Arch override because MIPS doesn't need to run this from stop_machine() */ +void arch_ftrace_update_code(int command) +{ +	ftrace_modify_all_code(command); +} +  /*   * Check if the address is in kernel space   * @@ -89,6 +95,24 @@ static int ftrace_modify_code(unsigned long ip, unsigned int new_code)  	return 0;  } +#ifndef CONFIG_64BIT +static int ftrace_modify_code_2(unsigned long ip, unsigned int new_code1, +				unsigned int new_code2) +{ +	int faulted; + +	safe_store_code(new_code1, ip, faulted); +	if (unlikely(faulted)) +		return -EFAULT; +	ip += 4; +	safe_store_code(new_code2, ip, faulted); +	if (unlikely(faulted)) +		return -EFAULT; +	flush_icache_range(ip, ip + 8); /* original ip + 12 */ +	return 0; +} +#endif +  /*   * The details about the calling site of mcount on MIPS   * @@ -131,8 +155,18 @@ int ftrace_make_nop(struct module *mod,  	 * needed.  	 */  	new = in_kernel_space(ip) ? INSN_NOP : INSN_B_1F; - +#ifdef CONFIG_64BIT  	return ftrace_modify_code(ip, new); +#else +	/* +	 * On 32 bit MIPS platforms, gcc adds a stack adjust +	 * instruction in the delay slot after the branch to +	 * mcount and expects mcount to restore the sp on return. +	 * This is based on a legacy API and does nothing but +	 * waste instructions so it's being removed at runtime. +	 */ +	return ftrace_modify_code_2(ip, new, INSN_NOP); +#endif  }  int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) diff --git a/arch/mips/kernel/mcount.S b/arch/mips/kernel/mcount.S index 4c968e7efb7..16586767335 100644 --- a/arch/mips/kernel/mcount.S +++ b/arch/mips/kernel/mcount.S @@ -46,9 +46,8 @@  	PTR_L	a5, PT_R9(sp)  	PTR_L	a6, PT_R10(sp)  	PTR_L	a7, PT_R11(sp) -	PTR_ADDIU	sp, PT_SIZE  #else -	PTR_ADDIU	sp, (PT_SIZE + 8) +	PTR_ADDIU	sp, PT_SIZE  #endif  .endm @@ -69,7 +68,9 @@ NESTED(ftrace_caller, PT_SIZE, ra)  	.globl _mcount  _mcount:  	b	ftrace_stub -	 nop +	addiu sp,sp,8 + +	/* When tracing is activated, it calls ftrace_caller+8 (aka here) */  	lw	t1, function_trace_stop  	bnez	t1, ftrace_stub  	 nop diff --git a/arch/mips/kernel/vpe.c b/arch/mips/kernel/vpe.c index eec690af658..147cec19621 100644 --- a/arch/mips/kernel/vpe.c +++ b/arch/mips/kernel/vpe.c @@ -705,7 +705,7 @@ static int vpe_run(struct vpe * v)  			printk(KERN_WARNING  			       "VPE loader: TC %d is already in use.\n", -                               t->index); +			       v->tc->index);  			return -ENOEXEC;  		}  	} else { diff --git a/arch/mips/lantiq/irq.c b/arch/mips/lantiq/irq.c index f36acd1b380..a7935bf0fec 100644 --- a/arch/mips/lantiq/irq.c +++ b/arch/mips/lantiq/irq.c @@ -408,7 +408,7 @@ int __init icu_of_init(struct device_node *node, struct device_node *parent)  #endif  	/* tell oprofile which irq to use */ -	cp0_perfcount_irq = LTQ_PERF_IRQ; +	cp0_perfcount_irq = irq_create_mapping(ltq_domain, LTQ_PERF_IRQ);  	/*  	 * if the timer irq is not one of the mips irqs we need to diff --git a/arch/mips/lib/delay.c b/arch/mips/lib/delay.c index dc81ca8dc0d..288f7954988 100644 --- a/arch/mips/lib/delay.c +++ b/arch/mips/lib/delay.c @@ -21,7 +21,7 @@ void __delay(unsigned long loops)  	"	.set	noreorder				\n"  	"	.align	3					\n"  	"1:	bnez	%0, 1b					\n" -#if __SIZEOF_LONG__ == 4 +#if BITS_PER_LONG == 32  	"	subu	%0, 1					\n"  #else  	"	dsubu	%0, 1					\n" diff --git a/arch/mips/mm/ioremap.c b/arch/mips/mm/ioremap.c index 7657fd21cd3..cacfd31e8ec 100644 --- a/arch/mips/mm/ioremap.c +++ b/arch/mips/mm/ioremap.c @@ -190,9 +190,3 @@ void __iounmap(const volatile void __iomem *addr)  EXPORT_SYMBOL(__ioremap);  EXPORT_SYMBOL(__iounmap); - -int __virt_addr_valid(const volatile void *kaddr) -{ -	return pfn_valid(PFN_DOWN(virt_to_phys(kaddr))); -} -EXPORT_SYMBOL_GPL(__virt_addr_valid); diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c index d9be7540a6b..7e5fe2790d8 100644 --- a/arch/mips/mm/mmap.c +++ b/arch/mips/mm/mmap.c @@ -192,3 +192,9 @@ unsigned long arch_randomize_brk(struct mm_struct *mm)  	return ret;  } + +int __virt_addr_valid(const volatile void *kaddr) +{ +	return pfn_valid(PFN_DOWN(virt_to_phys(kaddr))); +} +EXPORT_SYMBOL_GPL(__virt_addr_valid); diff --git a/arch/mips/netlogic/xlr/setup.c b/arch/mips/netlogic/xlr/setup.c index 4e7f49d3d5a..c5ce6992ac4 100644 --- a/arch/mips/netlogic/xlr/setup.c +++ b/arch/mips/netlogic/xlr/setup.c @@ -193,8 +193,11 @@ static void nlm_init_node(void)  void __init prom_init(void)  { -	int i, *argv, *envp;		/* passed as 32 bit ptrs */ +	int *argv, *envp;		/* passed as 32 bit ptrs */  	struct psb_info *prom_infop; +#ifdef CONFIG_SMP +	int i; +#endif  	/* truncate to 32 bit and sign extend all args */  	argv = (int *)(long)(int)fw_arg1; diff --git a/arch/mips/pci/pci-ar71xx.c b/arch/mips/pci/pci-ar71xx.c index 1552522b871..6eaa4f2d0e3 100644 --- a/arch/mips/pci/pci-ar71xx.c +++ b/arch/mips/pci/pci-ar71xx.c @@ -24,7 +24,7 @@  #include <asm/mach-ath79/pci.h>  #define AR71XX_PCI_MEM_BASE	0x10000000 -#define AR71XX_PCI_MEM_SIZE	0x08000000 +#define AR71XX_PCI_MEM_SIZE	0x07000000  #define AR71XX_PCI_WIN0_OFFS		0x10000000  #define AR71XX_PCI_WIN1_OFFS		0x11000000 diff --git a/arch/mips/pci/pci-ar724x.c b/arch/mips/pci/pci-ar724x.c index 86d77a66645..c11c75be2d7 100644 --- a/arch/mips/pci/pci-ar724x.c +++ b/arch/mips/pci/pci-ar724x.c @@ -21,7 +21,7 @@  #define AR724X_PCI_CTRL_SIZE	0x100  #define AR724X_PCI_MEM_BASE	0x10000000 -#define AR724X_PCI_MEM_SIZE	0x08000000 +#define AR724X_PCI_MEM_SIZE	0x04000000  #define AR724X_PCI_REG_RESET		0x18  #define AR724X_PCI_REG_INT_STATUS	0x4c diff --git a/arch/mn10300/include/asm/dma-mapping.h b/arch/mn10300/include/asm/dma-mapping.h index c1be4397b1e..a18abfc558e 100644 --- a/arch/mn10300/include/asm/dma-mapping.h +++ b/arch/mn10300/include/asm/dma-mapping.h @@ -168,4 +168,19 @@ void dma_cache_sync(void *vaddr, size_t size,  	mn10300_dcache_flush_inv();  } +/* Not supported for now */ +static inline int dma_mmap_coherent(struct device *dev, +				    struct vm_area_struct *vma, void *cpu_addr, +				    dma_addr_t dma_addr, size_t size) +{ +	return -EINVAL; +} + +static inline int dma_get_sgtable(struct device *dev, struct sg_table *sgt, +				  void *cpu_addr, dma_addr_t dma_addr, +				  size_t size) +{ +	return -EINVAL; +} +  #endif diff --git a/arch/parisc/include/asm/dma-mapping.h b/arch/parisc/include/asm/dma-mapping.h index 467bbd510ea..106b395688e 100644 --- a/arch/parisc/include/asm/dma-mapping.h +++ b/arch/parisc/include/asm/dma-mapping.h @@ -238,4 +238,19 @@ void * sba_get_iommu(struct parisc_device *dev);  /* At the moment, we panic on error for IOMMU resource exaustion */  #define dma_mapping_error(dev, x)	0 +/* This API cannot be supported on PA-RISC */ +static inline int dma_mmap_coherent(struct device *dev, +				    struct vm_area_struct *vma, void *cpu_addr, +				    dma_addr_t dma_addr, size_t size) +{ +	return -EINVAL; +} + +static inline int dma_get_sgtable(struct device *dev, struct sg_table *sgt, +				  void *cpu_addr, dma_addr_t dma_addr, +				  size_t size) +{ +	return -EINVAL; +} +  #endif diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S index bfb44247d7a..eb7850b46c2 100644 --- a/arch/parisc/kernel/entry.S +++ b/arch/parisc/kernel/entry.S @@ -1865,7 +1865,7 @@ syscall_restore:  	/* Are we being ptraced? */  	ldw	TASK_FLAGS(%r1),%r19 -	ldi	(_TIF_SINGLESTEP|_TIF_BLOCKSTEP),%r2 +	ldi	_TIF_SYSCALL_TRACE_MASK,%r2  	and,COND(=)	%r19,%r2,%r0  	b,n	syscall_restore_rfi @@ -1978,15 +1978,23 @@ syscall_restore_rfi:  	/* sr2 should be set to zero for userspace syscalls */  	STREG	%r0,TASK_PT_SR2(%r1) -pt_regs_ok:  	LDREG	TASK_PT_GR31(%r1),%r2 -	depi	3,31,2,%r2			   /* ensure return to user mode. */ -	STREG	%r2,TASK_PT_IAOQ0(%r1) +	depi	3,31,2,%r2		   /* ensure return to user mode. */ +	STREG   %r2,TASK_PT_IAOQ0(%r1)  	ldo	4(%r2),%r2  	STREG	%r2,TASK_PT_IAOQ1(%r1) +	b	intr_restore  	copy	%r25,%r16 + +pt_regs_ok: +	LDREG	TASK_PT_IAOQ0(%r1),%r2 +	depi	3,31,2,%r2		   /* ensure return to user mode. */ +	STREG	%r2,TASK_PT_IAOQ0(%r1) +	LDREG	TASK_PT_IAOQ1(%r1),%r2 +	depi	3,31,2,%r2 +	STREG	%r2,TASK_PT_IAOQ1(%r1)  	b	intr_restore -	nop +	copy	%r25,%r16  	.import schedule,code  syscall_do_resched: diff --git a/arch/parisc/kernel/irq.c b/arch/parisc/kernel/irq.c index c0b1affc06a..0299d63cd11 100644 --- a/arch/parisc/kernel/irq.c +++ b/arch/parisc/kernel/irq.c @@ -410,11 +410,13 @@ void __init init_IRQ(void)  {  	local_irq_disable();	/* PARANOID - should already be disabled */  	mtctl(~0UL, 23);	/* EIRR : clear all pending external intr */ -	claim_cpu_irqs();  #ifdef CONFIG_SMP -	if (!cpu_eiem) +	if (!cpu_eiem) { +		claim_cpu_irqs();  		cpu_eiem = EIEM_MASK(IPI_IRQ) | EIEM_MASK(TIMER_IRQ); +	}  #else +	claim_cpu_irqs();  	cpu_eiem = EIEM_MASK(TIMER_IRQ);  #endif          set_eiem(cpu_eiem);	/* EIEM : enable all external intr */ diff --git a/arch/parisc/kernel/ptrace.c b/arch/parisc/kernel/ptrace.c index 857c2f54547..534abd4936e 100644 --- a/arch/parisc/kernel/ptrace.c +++ b/arch/parisc/kernel/ptrace.c @@ -26,7 +26,7 @@  #include <asm/asm-offsets.h>  /* PSW bits we allow the debugger to modify */ -#define USER_PSW_BITS	(PSW_N | PSW_V | PSW_CB) +#define USER_PSW_BITS	(PSW_N | PSW_B | PSW_V | PSW_CB)  /*   * Called by kernel/ptrace.c when detaching.. diff --git a/arch/parisc/kernel/signal.c b/arch/parisc/kernel/signal.c index 53799695599..fd051705a40 100644 --- a/arch/parisc/kernel/signal.c +++ b/arch/parisc/kernel/signal.c @@ -190,8 +190,10 @@ get_sigframe(struct k_sigaction *ka, unsigned long sp, size_t frame_size)  	DBG(1,"get_sigframe: ka = %#lx, sp = %#lx, frame_size = %#lx\n",  			(unsigned long)ka, sp, frame_size); +	/* Align alternate stack and reserve 64 bytes for the signal +	   handler's frame marker.  */  	if ((ka->sa.sa_flags & SA_ONSTACK) != 0 && ! sas_ss_flags(sp)) -		sp = current->sas_ss_sp; /* Stacks grow up! */ +		sp = (current->sas_ss_sp + 0x7f) & ~0x3f; /* Stacks grow up! */  	DBG(1,"get_sigframe: Returning sp = %#lx\n", (unsigned long)sp);  	return (void __user *) sp; /* Stacks grow up.  Fun. */ diff --git a/arch/parisc/math-emu/cnv_float.h b/arch/parisc/math-emu/cnv_float.h index 9071e093164..933423fa514 100644 --- a/arch/parisc/math-emu/cnv_float.h +++ b/arch/parisc/math-emu/cnv_float.h @@ -347,16 +347,15 @@      Sgl_isinexact_to_fix(sgl_value,exponent)  #define Duint_from_sgl_mantissa(sgl_value,exponent,dresultA,dresultB)	\ -  {Sall(sgl_value) <<= SGL_EXP_LENGTH;  /*  left-justify  */		\ +  {unsigned int val = Sall(sgl_value) << SGL_EXP_LENGTH;		\      if (exponent <= 31) {						\ -    	Dintp1(dresultA) = 0;						\ -    	Dintp2(dresultB) = (unsigned)Sall(sgl_value) >> (31 - exponent); \ +	Dintp1(dresultA) = 0;						\ +	Dintp2(dresultB) = val >> (31 - exponent);			\      }									\      else {								\ -    	Dintp1(dresultA) = Sall(sgl_value) >> (63 - exponent);		\ -    	Dintp2(dresultB) = Sall(sgl_value) << (exponent - 31);		\ +	Dintp1(dresultA) = val >> (63 - exponent);			\ +	Dintp2(dresultB) = exponent <= 62 ? val << (exponent - 31) : 0;	\      }									\ -    Sall(sgl_value) >>= SGL_EXP_LENGTH;  /* return to original */	\    }  #define Duint_setzero(dresultA,dresultB) 	\ diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S index d22e73e4618..e514de57a12 100644 --- a/arch/powerpc/kernel/entry_32.S +++ b/arch/powerpc/kernel/entry_32.S @@ -439,6 +439,8 @@ ret_from_fork:  ret_from_kernel_thread:  	REST_NVGPRS(r1)  	bl	schedule_tail +	li	r3,0 +	stw	r3,0(r1)  	mtlr	r14  	mr	r3,r15  	PPC440EP_ERR42 diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S index b310a057362..3d990d3bd8b 100644 --- a/arch/powerpc/kernel/entry_64.S +++ b/arch/powerpc/kernel/entry_64.S @@ -664,6 +664,19 @@ resume_kernel:  	ld	r4,TI_FLAGS(r9)  	andi.	r0,r4,_TIF_NEED_RESCHED  	bne	1b + +	/* +	 * arch_local_irq_restore() from preempt_schedule_irq above may +	 * enable hard interrupt but we really should disable interrupts +	 * when we return from the interrupt, and so that we don't get +	 * interrupted after loading SRR0/1. +	 */ +#ifdef CONFIG_PPC_BOOK3E +	wrteei	0 +#else +	ld	r10,PACAKMSR(r13) /* Get kernel MSR without EE */ +	mtmsrd	r10,1		  /* Update machine state */ +#endif /* CONFIG_PPC_BOOK3E */  #endif /* CONFIG_PREEMPT */  	.globl	fast_exc_return_irq diff --git a/arch/powerpc/kernel/kgdb.c b/arch/powerpc/kernel/kgdb.c index c470a40b29f..a7bc7521c06 100644 --- a/arch/powerpc/kernel/kgdb.c +++ b/arch/powerpc/kernel/kgdb.c @@ -154,12 +154,12 @@ static int kgdb_handle_breakpoint(struct pt_regs *regs)  static int kgdb_singlestep(struct pt_regs *regs)  {  	struct thread_info *thread_info, *exception_thread_info; -	struct thread_info *backup_current_thread_info = \ -		(struct thread_info *)kmalloc(sizeof(struct thread_info), GFP_KERNEL); +	struct thread_info *backup_current_thread_info;  	if (user_mode(regs))  		return 0; +	backup_current_thread_info = (struct thread_info *)kmalloc(sizeof(struct thread_info), GFP_KERNEL);  	/*  	 * On Book E and perhaps other processors, singlestep is handled on  	 * the critical exception stack.  This causes current_thread_info() @@ -185,6 +185,7 @@ static int kgdb_singlestep(struct pt_regs *regs)  		/* Restore current_thread_info lastly. */  		memcpy(exception_thread_info, backup_current_thread_info, sizeof *thread_info); +	kfree(backup_current_thread_info);  	return 1;  } diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index 6f6b1cccc91..127361e093f 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c @@ -494,10 +494,15 @@ void timer_interrupt(struct pt_regs * regs)  	set_dec(DECREMENTER_MAX);  	/* Some implementations of hotplug will get timer interrupts while -	 * offline, just ignore these +	 * offline, just ignore these and we also need to set +	 * decrementers_next_tb as MAX to make sure __check_irq_replay +	 * don't replay timer interrupt when return, otherwise we'll trap +	 * here infinitely :(  	 */ -	if (!cpu_online(smp_processor_id())) +	if (!cpu_online(smp_processor_id())) { +		*next_tb = ~(u64)0;  		return; +	}  	/* Conditionally hard-enable interrupts now that the DEC has been  	 * bumped to its maximum value diff --git a/arch/powerpc/kvm/emulate.c b/arch/powerpc/kvm/emulate.c index b0855e5d890..9d9cddc5b34 100644 --- a/arch/powerpc/kvm/emulate.c +++ b/arch/powerpc/kvm/emulate.c @@ -39,6 +39,7 @@  #define OP_31_XOP_TRAP      4  #define OP_31_XOP_LWZX      23  #define OP_31_XOP_TRAP_64   68 +#define OP_31_XOP_DCBF      86  #define OP_31_XOP_LBZX      87  #define OP_31_XOP_STWX      151  #define OP_31_XOP_STBX      215 @@ -374,6 +375,7 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)  			emulated = kvmppc_emulate_mtspr(vcpu, sprn, rs);  			break; +		case OP_31_XOP_DCBF:  		case OP_31_XOP_DCBI:  			/* Do nothing. The guest is performing dcbi because  			 * hardware DMA is not snooped by the dcache, but diff --git a/arch/powerpc/mm/hash_low_64.S b/arch/powerpc/mm/hash_low_64.S index 56585086413..7443481a315 100644 --- a/arch/powerpc/mm/hash_low_64.S +++ b/arch/powerpc/mm/hash_low_64.S @@ -115,11 +115,13 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)  	sldi	r29,r5,SID_SHIFT - VPN_SHIFT  	rldicl  r28,r3,64 - VPN_SHIFT,64 - (SID_SHIFT - VPN_SHIFT)  	or	r29,r28,r29 - -	/* Calculate hash value for primary slot and store it in r28 */ -	rldicl	r5,r5,0,25		/* vsid & 0x0000007fffffffff */ -	rldicl	r0,r3,64-12,48		/* (ea >> 12) & 0xffff */ -	xor	r28,r5,r0 +	/* +	 * Calculate hash value for primary slot and store it in r28 +	 * r3 = va, r5 = vsid +	 * r0 = (va >> 12) & ((1ul << (28 - 12)) -1) +	 */ +	rldicl	r0,r3,64-12,48 +	xor	r28,r5,r0		/* hash */  	b	4f  3:	/* Calc vpn and put it in r29 */ @@ -130,11 +132,12 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)  	/*  	 * calculate hash value for primary slot and  	 * store it in r28 for 1T segment +	 * r3 = va, r5 = vsid  	 */ -	rldic	r28,r5,25,25		/* (vsid << 25) & 0x7fffffffff */ -	clrldi	r5,r5,40		/* vsid & 0xffffff */ -	rldicl	r0,r3,64-12,36		/* (ea >> 12) & 0xfffffff */ -	xor	r28,r28,r5 +	sldi	r28,r5,25		/* vsid << 25 */ +	/* r0 =  (va >> 12) & ((1ul << (40 - 12)) -1) */ +	rldicl	r0,r3,64-12,36 +	xor	r28,r28,r5		/* vsid ^ ( vsid << 25) */  	xor	r28,r28,r0		/* hash */  	/* Convert linux PTE bits into HW equivalents */ @@ -407,11 +410,13 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)  	 */  	rldicl  r28,r3,64 - VPN_SHIFT,64 - (SID_SHIFT - VPN_SHIFT)  	or	r29,r28,r29 - -	/* Calculate hash value for primary slot and store it in r28 */ -	rldicl	r5,r5,0,25		/* vsid & 0x0000007fffffffff */ -	rldicl	r0,r3,64-12,48		/* (ea >> 12) & 0xffff */ -	xor	r28,r5,r0 +	/* +	 * Calculate hash value for primary slot and store it in r28 +	 * r3 = va, r5 = vsid +	 * r0 = (va >> 12) & ((1ul << (28 - 12)) -1) +	 */ +	rldicl	r0,r3,64-12,48 +	xor	r28,r5,r0		/* hash */  	b	4f  3:	/* Calc vpn and put it in r29 */ @@ -426,11 +431,12 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)  	/*  	 * Calculate hash value for primary slot and  	 * store it in r28  for 1T segment +	 * r3 = va, r5 = vsid  	 */ -	rldic	r28,r5,25,25		/* (vsid << 25) & 0x7fffffffff */ -	clrldi	r5,r5,40		/* vsid & 0xffffff */ -	rldicl	r0,r3,64-12,36		/* (ea >> 12) & 0xfffffff */ -	xor	r28,r28,r5 +	sldi	r28,r5,25		/* vsid << 25 */ +	/* r0 = (va >> 12) & ((1ul << (40 - 12)) -1) */ +	rldicl	r0,r3,64-12,36 +	xor	r28,r28,r5		/* vsid ^ ( vsid << 25) */  	xor	r28,r28,r0		/* hash */  	/* Convert linux PTE bits into HW equivalents */ @@ -752,25 +758,27 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)  	rldicl  r28,r3,64 - VPN_SHIFT,64 - (SID_SHIFT - VPN_SHIFT)  	or	r29,r28,r29 -	/* Calculate hash value for primary slot and store it in r28 */ -	rldicl	r5,r5,0,25		/* vsid & 0x0000007fffffffff */ -	rldicl	r0,r3,64-16,52		/* (ea >> 16) & 0xfff */ -	xor	r28,r5,r0 +	/* Calculate hash value for primary slot and store it in r28 +	 * r3 = va, r5 = vsid +	 * r0 = (va >> 16) & ((1ul << (28 - 16)) -1) +	 */ +	rldicl	r0,r3,64-16,52 +	xor	r28,r5,r0		/* hash */  	b	4f  3:	/* Calc vpn and put it in r29 */  	sldi	r29,r5,SID_SHIFT_1T - VPN_SHIFT  	rldicl  r28,r3,64 - VPN_SHIFT,64 - (SID_SHIFT_1T - VPN_SHIFT)  	or	r29,r28,r29 -  	/*  	 * calculate hash value for primary slot and  	 * store it in r28 for 1T segment +	 * r3 = va, r5 = vsid  	 */ -	rldic	r28,r5,25,25		/* (vsid << 25) & 0x7fffffffff */ -	clrldi	r5,r5,40		/* vsid & 0xffffff */ -	rldicl	r0,r3,64-16,40		/* (ea >> 16) & 0xffffff */ -	xor	r28,r28,r5 +	sldi	r28,r5,25		/* vsid << 25 */ +	/* r0 = (va >> 16) & ((1ul << (40 - 16)) -1) */ +	rldicl	r0,r3,64-16,40 +	xor	r28,r28,r5		/* vsid ^ ( vsid << 25) */  	xor	r28,r28,r0		/* hash */  	/* Convert linux PTE bits into HW equivalents */ diff --git a/arch/powerpc/oprofile/op_model_power4.c b/arch/powerpc/oprofile/op_model_power4.c index 315f9495e9b..f444b94935f 100644 --- a/arch/powerpc/oprofile/op_model_power4.c +++ b/arch/powerpc/oprofile/op_model_power4.c @@ -52,7 +52,7 @@ static int power7_marked_instr_event(u64 mmcr1)  	for (pmc = 0; pmc < 4; pmc++) {  		psel = mmcr1 & (OPROFILE_PM_PMCSEL_MSK  				<< (OPROFILE_MAX_PMC_NUM - pmc) -				* OPROFILE_MAX_PMC_NUM); +				* OPROFILE_PMSEL_FIELD_WIDTH);  		psel = (psel >> ((OPROFILE_MAX_PMC_NUM - pmc)  				 * OPROFILE_PMSEL_FIELD_WIDTH)) & ~1ULL;  		unit = mmcr1 & (OPROFILE_PM_UNIT_MSK diff --git a/arch/powerpc/platforms/pasemi/cpufreq.c b/arch/powerpc/platforms/pasemi/cpufreq.c index 95d00173029..890f30e70f9 100644 --- a/arch/powerpc/platforms/pasemi/cpufreq.c +++ b/arch/powerpc/platforms/pasemi/cpufreq.c @@ -236,6 +236,13 @@ out:  static int pas_cpufreq_cpu_exit(struct cpufreq_policy *policy)  { +	/* +	 * We don't support CPU hotplug. Don't unmap after the system +	 * has already made it to a running state. +	 */ +	if (system_state != SYSTEM_BOOTING) +		return 0; +  	if (sdcasr_mapbase)  		iounmap(sdcasr_mapbase);  	if (sdcpwr_mapbase) diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h index c1d7930a82f..098adbb6266 100644 --- a/arch/s390/include/asm/pgtable.h +++ b/arch/s390/include/asm/pgtable.h @@ -1365,6 +1365,18 @@ static inline void pmdp_invalidate(struct vm_area_struct *vma,  	__pmd_idte(address, pmdp);  } +#define __HAVE_ARCH_PMDP_SET_WRPROTECT +static inline void pmdp_set_wrprotect(struct mm_struct *mm, +				      unsigned long address, pmd_t *pmdp) +{ +	pmd_t pmd = *pmdp; + +	if (pmd_write(pmd)) { +		__pmd_idte(address, pmdp); +		set_pmd_at(mm, address, pmdp, pmd_wrprotect(pmd)); +	} +} +  static inline pmd_t mk_pmd_phys(unsigned long physpage, pgprot_t pgprot)  {  	pmd_t __pmd; diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c index a5f4f5a1d24..0aa98db8a80 100644 --- a/arch/s390/kernel/time.c +++ b/arch/s390/kernel/time.c @@ -120,6 +120,9 @@ static int s390_next_ktime(ktime_t expires,  	nsecs = ktime_to_ns(ktime_add(timespec_to_ktime(ts), expires));  	do_div(nsecs, 125);  	S390_lowcore.clock_comparator = sched_clock_base_cc + (nsecs << 9); +	/* Program the maximum value if we have an overflow (== year 2042) */ +	if (unlikely(S390_lowcore.clock_comparator < sched_clock_base_cc)) +		S390_lowcore.clock_comparator = -1ULL;  	set_clock_comparator(S390_lowcore.clock_comparator);  	return 0;  } diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig index ed937ae72df..9bff3db17c8 100644 --- a/arch/sparc/Kconfig +++ b/arch/sparc/Kconfig @@ -60,6 +60,7 @@ config SPARC64  	select HAVE_MEMBLOCK  	select HAVE_MEMBLOCK_NODE_MAP  	select HAVE_SYSCALL_WRAPPERS +	select HAVE_ARCH_TRANSPARENT_HUGEPAGE  	select HAVE_DYNAMIC_FTRACE  	select HAVE_FTRACE_MCOUNT_RECORD  	select HAVE_SYSCALL_TRACEPOINTS diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h index 7870be0f5ad..08fcce90316 100644 --- a/arch/sparc/include/asm/pgtable_64.h +++ b/arch/sparc/include/asm/pgtable_64.h @@ -71,7 +71,6 @@  #define PMD_PADDR	_AC(0xfffffffe,UL)  #define PMD_PADDR_SHIFT	_AC(11,UL) -#ifdef CONFIG_TRANSPARENT_HUGEPAGE  #define PMD_ISHUGE	_AC(0x00000001,UL)  /* This is the PMD layout when PMD_ISHUGE is set.  With 4MB huge @@ -86,7 +85,6 @@  #define PMD_HUGE_ACCESSED	_AC(0x00000080,UL)  #define PMD_HUGE_EXEC		_AC(0x00000040,UL)  #define PMD_HUGE_SPLITTING	_AC(0x00000020,UL) -#endif  /* PGDs point to PMD tables which are 8K aligned.  */  #define PGD_PADDR	_AC(0xfffffffc,UL) @@ -628,6 +626,12 @@ static inline unsigned long pte_special(pte_t pte)  	return pte_val(pte) & _PAGE_SPECIAL;  } +static inline int pmd_large(pmd_t pmd) +{ +	return (pmd_val(pmd) & (PMD_ISHUGE | PMD_HUGE_PRESENT)) == +		(PMD_ISHUGE | PMD_HUGE_PRESENT); +} +  #ifdef CONFIG_TRANSPARENT_HUGEPAGE  static inline int pmd_young(pmd_t pmd)  { @@ -646,12 +650,6 @@ static inline unsigned long pmd_pfn(pmd_t pmd)  	return val >> (PAGE_SHIFT - PMD_PADDR_SHIFT);  } -static inline int pmd_large(pmd_t pmd) -{ -	return (pmd_val(pmd) & (PMD_ISHUGE | PMD_HUGE_PRESENT)) == -		(PMD_ISHUGE | PMD_HUGE_PRESENT); -} -  static inline int pmd_trans_splitting(pmd_t pmd)  {  	return (pmd_val(pmd) & (PMD_ISHUGE|PMD_HUGE_SPLITTING)) == diff --git a/arch/sparc/kernel/sbus.c b/arch/sparc/kernel/sbus.c index 1271b3a27d4..be5bdf93c76 100644 --- a/arch/sparc/kernel/sbus.c +++ b/arch/sparc/kernel/sbus.c @@ -554,10 +554,8 @@ static void __init sbus_iommu_init(struct platform_device *op)  	regs = pr->phys_addr;  	iommu = kzalloc(sizeof(*iommu), GFP_ATOMIC); -	if (!iommu) -		goto fatal_memory_error;  	strbuf = kzalloc(sizeof(*strbuf), GFP_ATOMIC); -	if (!strbuf) +	if (!iommu || !strbuf)  		goto fatal_memory_error;  	op->dev.archdata.iommu = iommu; @@ -656,6 +654,8 @@ static void __init sbus_iommu_init(struct platform_device *op)  	return;  fatal_memory_error: +	kfree(iommu); +	kfree(strbuf);  	prom_printf("sbus_iommu_init: Fatal memory allocation error.\n");  } diff --git a/arch/sparc/mm/gup.c b/arch/sparc/mm/gup.c index 42c55df3aec..01ee23dd724 100644 --- a/arch/sparc/mm/gup.c +++ b/arch/sparc/mm/gup.c @@ -66,6 +66,56 @@ static noinline int gup_pte_range(pmd_t pmd, unsigned long addr,  	return 1;  } +static int gup_huge_pmd(pmd_t *pmdp, pmd_t pmd, unsigned long addr, +			unsigned long end, int write, struct page **pages, +			int *nr) +{ +	struct page *head, *page, *tail; +	u32 mask; +	int refs; + +	mask = PMD_HUGE_PRESENT; +	if (write) +		mask |= PMD_HUGE_WRITE; +	if ((pmd_val(pmd) & mask) != mask) +		return 0; + +	refs = 0; +	head = pmd_page(pmd); +	page = head + ((addr & ~PMD_MASK) >> PAGE_SHIFT); +	tail = page; +	do { +		VM_BUG_ON(compound_head(page) != head); +		pages[*nr] = page; +		(*nr)++; +		page++; +		refs++; +	} while (addr += PAGE_SIZE, addr != end); + +	if (!page_cache_add_speculative(head, refs)) { +		*nr -= refs; +		return 0; +	} + +	if (unlikely(pmd_val(pmd) != pmd_val(*pmdp))) { +		*nr -= refs; +		while (refs--) +			put_page(head); +		return 0; +	} + +	/* Any tail page need their mapcount reference taken before we +	 * return. +	 */ +	while (refs--) { +		if (PageTail(tail)) +			get_huge_page_tail(tail); +		tail++; +	} + +	return 1; +} +  static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end,  		int write, struct page **pages, int *nr)  { @@ -77,9 +127,14 @@ static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end,  		pmd_t pmd = *pmdp;  		next = pmd_addr_end(addr, end); -		if (pmd_none(pmd)) +		if (pmd_none(pmd) || pmd_trans_splitting(pmd))  			return 0; -		if (!gup_pte_range(pmd, addr, next, write, pages, nr)) +		if (unlikely(pmd_large(pmd))) { +			if (!gup_huge_pmd(pmdp, pmd, addr, next, +					  write, pages, nr)) +				return 0; +		} else if (!gup_pte_range(pmd, addr, next, write, +					  pages, nr))  			return 0;  	} while (pmdp++, addr = next, addr != end); diff --git a/arch/tile/Kconfig b/arch/tile/Kconfig index 875d008828b..1bb7ad4aeff 100644 --- a/arch/tile/Kconfig +++ b/arch/tile/Kconfig @@ -140,6 +140,8 @@ config ARCH_DEFCONFIG  source "init/Kconfig" +source "kernel/Kconfig.freezer" +  menu "Tilera-specific configuration"  config NR_CPUS diff --git a/arch/tile/include/asm/io.h b/arch/tile/include/asm/io.h index 2a9b293fece..31672918064 100644 --- a/arch/tile/include/asm/io.h +++ b/arch/tile/include/asm/io.h @@ -250,7 +250,9 @@ static inline void writeq(u64 val, unsigned long addr)  #define iowrite32 writel  #define iowrite64 writeq -static inline void memset_io(void *dst, int val, size_t len) +#if CHIP_HAS_MMIO() || defined(CONFIG_PCI) + +static inline void memset_io(volatile void *dst, int val, size_t len)  {  	int x;  	BUG_ON((unsigned long)dst & 0x3); @@ -277,6 +279,8 @@ static inline void memcpy_toio(volatile void __iomem *dst, const void *src,  		writel(*(u32 *)(src + x), dst + x);  } +#endif +  /*   * The Tile architecture does not support IOPORT, even with PCI.   * Unfortunately we can't yet simply not declare these methods, diff --git a/arch/tile/include/asm/irqflags.h b/arch/tile/include/asm/irqflags.h index b4e96fef2cf..241c0bb60b1 100644 --- a/arch/tile/include/asm/irqflags.h +++ b/arch/tile/include/asm/irqflags.h @@ -18,32 +18,20 @@  #include <arch/interrupts.h>  #include <arch/chip.h> -#if !defined(__tilegx__) && defined(__ASSEMBLY__) -  /*   * The set of interrupts we want to allow when interrupts are nominally   * disabled.  The remainder are effectively "NMI" interrupts from   * the point of view of the generic Linux code.  Note that synchronous   * interrupts (aka "non-queued") are not blocked by the mask in any case.   */ -#if CHIP_HAS_AUX_PERF_COUNTERS() -#define LINUX_MASKABLE_INTERRUPTS_HI \ -	(~(INT_MASK_HI(INT_PERF_COUNT) | INT_MASK_HI(INT_AUX_PERF_COUNT))) -#else -#define LINUX_MASKABLE_INTERRUPTS_HI \ -	(~(INT_MASK_HI(INT_PERF_COUNT))) -#endif - -#else - -#if CHIP_HAS_AUX_PERF_COUNTERS() -#define LINUX_MASKABLE_INTERRUPTS \ -	(~(INT_MASK(INT_PERF_COUNT) | INT_MASK(INT_AUX_PERF_COUNT))) -#else  #define LINUX_MASKABLE_INTERRUPTS \ -	(~(INT_MASK(INT_PERF_COUNT))) -#endif +	(~((_AC(1,ULL) << INT_PERF_COUNT) | (_AC(1,ULL) << INT_AUX_PERF_COUNT))) +#if CHIP_HAS_SPLIT_INTR_MASK() +/* The same macro, but for the two 32-bit SPRs separately. */ +#define LINUX_MASKABLE_INTERRUPTS_LO (-1) +#define LINUX_MASKABLE_INTERRUPTS_HI \ +	(~((1 << (INT_PERF_COUNT - 32)) | (1 << (INT_AUX_PERF_COUNT - 32))))  #endif  #ifndef __ASSEMBLY__ @@ -126,7 +114,7 @@   * to know our current state.   */  DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask); -#define INITIAL_INTERRUPTS_ENABLED INT_MASK(INT_MEM_ERROR) +#define INITIAL_INTERRUPTS_ENABLED (1ULL << INT_MEM_ERROR)  /* Disable interrupts. */  #define arch_local_irq_disable() \ @@ -165,7 +153,7 @@ DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask);  /* Prevent the given interrupt from being enabled next time we enable irqs. */  #define arch_local_irq_mask(interrupt) \ -	(__get_cpu_var(interrupts_enabled_mask) &= ~INT_MASK(interrupt)) +	(__get_cpu_var(interrupts_enabled_mask) &= ~(1ULL << (interrupt)))  /* Prevent the given interrupt from being enabled immediately. */  #define arch_local_irq_mask_now(interrupt) do { \ @@ -175,7 +163,7 @@ DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask);  /* Allow the given interrupt to be enabled next time we enable irqs. */  #define arch_local_irq_unmask(interrupt) \ -	(__get_cpu_var(interrupts_enabled_mask) |= INT_MASK(interrupt)) +	(__get_cpu_var(interrupts_enabled_mask) |= (1ULL << (interrupt)))  /* Allow the given interrupt to be enabled immediately, if !irqs_disabled. */  #define arch_local_irq_unmask_now(interrupt) do { \ @@ -250,7 +238,7 @@ DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask);  /* Disable interrupts. */  #define IRQ_DISABLE(tmp0, tmp1)					\  	{							\ -	 movei  tmp0, -1;					\ +	 movei  tmp0, LINUX_MASKABLE_INTERRUPTS_LO;		\  	 moveli tmp1, lo16(LINUX_MASKABLE_INTERRUPTS_HI)	\  	};							\  	{							\ diff --git a/arch/tile/include/uapi/arch/interrupts_32.h b/arch/tile/include/uapi/arch/interrupts_32.h index 96b5710505b..2efe3f68b2d 100644 --- a/arch/tile/include/uapi/arch/interrupts_32.h +++ b/arch/tile/include/uapi/arch/interrupts_32.h @@ -15,6 +15,7 @@  #ifndef __ARCH_INTERRUPTS_H__  #define __ARCH_INTERRUPTS_H__ +#ifndef __KERNEL__  /** Mask for an interrupt. */  /* Note: must handle breaking interrupts into high and low words manually. */  #define INT_MASK_LO(intno) (1 << (intno)) @@ -23,6 +24,7 @@  #ifndef __ASSEMBLER__  #define INT_MASK(intno) (1ULL << (intno))  #endif +#endif  /** Where a given interrupt executes */ @@ -92,216 +94,216 @@  #ifndef __ASSEMBLER__  #define QUEUED_INTERRUPTS ( \ -    INT_MASK(INT_MEM_ERROR) | \ -    INT_MASK(INT_DMATLB_MISS) | \ -    INT_MASK(INT_DMATLB_ACCESS) | \ -    INT_MASK(INT_SNITLB_MISS) | \ -    INT_MASK(INT_SN_NOTIFY) | \ -    INT_MASK(INT_SN_FIREWALL) | \ -    INT_MASK(INT_IDN_FIREWALL) | \ -    INT_MASK(INT_UDN_FIREWALL) | \ -    INT_MASK(INT_TILE_TIMER) | \ -    INT_MASK(INT_IDN_TIMER) | \ -    INT_MASK(INT_UDN_TIMER) | \ -    INT_MASK(INT_DMA_NOTIFY) | \ -    INT_MASK(INT_IDN_CA) | \ -    INT_MASK(INT_UDN_CA) | \ -    INT_MASK(INT_IDN_AVAIL) | \ -    INT_MASK(INT_UDN_AVAIL) | \ -    INT_MASK(INT_PERF_COUNT) | \ -    INT_MASK(INT_INTCTRL_3) | \ -    INT_MASK(INT_INTCTRL_2) | \ -    INT_MASK(INT_INTCTRL_1) | \ -    INT_MASK(INT_INTCTRL_0) | \ -    INT_MASK(INT_BOOT_ACCESS) | \ -    INT_MASK(INT_WORLD_ACCESS) | \ -    INT_MASK(INT_I_ASID) | \ -    INT_MASK(INT_D_ASID) | \ -    INT_MASK(INT_DMA_ASID) | \ -    INT_MASK(INT_SNI_ASID) | \ -    INT_MASK(INT_DMA_CPL) | \ -    INT_MASK(INT_SN_CPL) | \ -    INT_MASK(INT_DOUBLE_FAULT) | \ -    INT_MASK(INT_AUX_PERF_COUNT) | \ +    (1ULL << INT_MEM_ERROR) | \ +    (1ULL << INT_DMATLB_MISS) | \ +    (1ULL << INT_DMATLB_ACCESS) | \ +    (1ULL << INT_SNITLB_MISS) | \ +    (1ULL << INT_SN_NOTIFY) | \ +    (1ULL << INT_SN_FIREWALL) | \ +    (1ULL << INT_IDN_FIREWALL) | \ +    (1ULL << INT_UDN_FIREWALL) | \ +    (1ULL << INT_TILE_TIMER) | \ +    (1ULL << INT_IDN_TIMER) | \ +    (1ULL << INT_UDN_TIMER) | \ +    (1ULL << INT_DMA_NOTIFY) | \ +    (1ULL << INT_IDN_CA) | \ +    (1ULL << INT_UDN_CA) | \ +    (1ULL << INT_IDN_AVAIL) | \ +    (1ULL << INT_UDN_AVAIL) | \ +    (1ULL << INT_PERF_COUNT) | \ +    (1ULL << INT_INTCTRL_3) | \ +    (1ULL << INT_INTCTRL_2) | \ +    (1ULL << INT_INTCTRL_1) | \ +    (1ULL << INT_INTCTRL_0) | \ +    (1ULL << INT_BOOT_ACCESS) | \ +    (1ULL << INT_WORLD_ACCESS) | \ +    (1ULL << INT_I_ASID) | \ +    (1ULL << INT_D_ASID) | \ +    (1ULL << INT_DMA_ASID) | \ +    (1ULL << INT_SNI_ASID) | \ +    (1ULL << INT_DMA_CPL) | \ +    (1ULL << INT_SN_CPL) | \ +    (1ULL << INT_DOUBLE_FAULT) | \ +    (1ULL << INT_AUX_PERF_COUNT) | \      0)  #define NONQUEUED_INTERRUPTS ( \ -    INT_MASK(INT_ITLB_MISS) | \ -    INT_MASK(INT_ILL) | \ -    INT_MASK(INT_GPV) | \ -    INT_MASK(INT_SN_ACCESS) | \ -    INT_MASK(INT_IDN_ACCESS) | \ -    INT_MASK(INT_UDN_ACCESS) | \ -    INT_MASK(INT_IDN_REFILL) | \ -    INT_MASK(INT_UDN_REFILL) | \ -    INT_MASK(INT_IDN_COMPLETE) | \ -    INT_MASK(INT_UDN_COMPLETE) | \ -    INT_MASK(INT_SWINT_3) | \ -    INT_MASK(INT_SWINT_2) | \ -    INT_MASK(INT_SWINT_1) | \ -    INT_MASK(INT_SWINT_0) | \ -    INT_MASK(INT_UNALIGN_DATA) | \ -    INT_MASK(INT_DTLB_MISS) | \ -    INT_MASK(INT_DTLB_ACCESS) | \ -    INT_MASK(INT_SN_STATIC_ACCESS) | \ +    (1ULL << INT_ITLB_MISS) | \ +    (1ULL << INT_ILL) | \ +    (1ULL << INT_GPV) | \ +    (1ULL << INT_SN_ACCESS) | \ +    (1ULL << INT_IDN_ACCESS) | \ +    (1ULL << INT_UDN_ACCESS) | \ +    (1ULL << INT_IDN_REFILL) | \ +    (1ULL << INT_UDN_REFILL) | \ +    (1ULL << INT_IDN_COMPLETE) | \ +    (1ULL << INT_UDN_COMPLETE) | \ +    (1ULL << INT_SWINT_3) | \ +    (1ULL << INT_SWINT_2) | \ +    (1ULL << INT_SWINT_1) | \ +    (1ULL << INT_SWINT_0) | \ +    (1ULL << INT_UNALIGN_DATA) | \ +    (1ULL << INT_DTLB_MISS) | \ +    (1ULL << INT_DTLB_ACCESS) | \ +    (1ULL << INT_SN_STATIC_ACCESS) | \      0)  #define CRITICAL_MASKED_INTERRUPTS ( \ -    INT_MASK(INT_MEM_ERROR) | \ -    INT_MASK(INT_DMATLB_MISS) | \ -    INT_MASK(INT_DMATLB_ACCESS) | \ -    INT_MASK(INT_SNITLB_MISS) | \ -    INT_MASK(INT_SN_NOTIFY) | \ -    INT_MASK(INT_SN_FIREWALL) | \ -    INT_MASK(INT_IDN_FIREWALL) | \ -    INT_MASK(INT_UDN_FIREWALL) | \ -    INT_MASK(INT_TILE_TIMER) | \ -    INT_MASK(INT_IDN_TIMER) | \ -    INT_MASK(INT_UDN_TIMER) | \ -    INT_MASK(INT_DMA_NOTIFY) | \ -    INT_MASK(INT_IDN_CA) | \ -    INT_MASK(INT_UDN_CA) | \ -    INT_MASK(INT_IDN_AVAIL) | \ -    INT_MASK(INT_UDN_AVAIL) | \ -    INT_MASK(INT_PERF_COUNT) | \ -    INT_MASK(INT_INTCTRL_3) | \ -    INT_MASK(INT_INTCTRL_2) | \ -    INT_MASK(INT_INTCTRL_1) | \ -    INT_MASK(INT_INTCTRL_0) | \ -    INT_MASK(INT_AUX_PERF_COUNT) | \ +    (1ULL << INT_MEM_ERROR) | \ +    (1ULL << INT_DMATLB_MISS) | \ +    (1ULL << INT_DMATLB_ACCESS) | \ +    (1ULL << INT_SNITLB_MISS) | \ +    (1ULL << INT_SN_NOTIFY) | \ +    (1ULL << INT_SN_FIREWALL) | \ +    (1ULL << INT_IDN_FIREWALL) | \ +    (1ULL << INT_UDN_FIREWALL) | \ +    (1ULL << INT_TILE_TIMER) | \ +    (1ULL << INT_IDN_TIMER) | \ +    (1ULL << INT_UDN_TIMER) | \ +    (1ULL << INT_DMA_NOTIFY) | \ +    (1ULL << INT_IDN_CA) | \ +    (1ULL << INT_UDN_CA) | \ +    (1ULL << INT_IDN_AVAIL) | \ +    (1ULL << INT_UDN_AVAIL) | \ +    (1ULL << INT_PERF_COUNT) | \ +    (1ULL << INT_INTCTRL_3) | \ +    (1ULL << INT_INTCTRL_2) | \ +    (1ULL << INT_INTCTRL_1) | \ +    (1ULL << INT_INTCTRL_0) | \ +    (1ULL << INT_AUX_PERF_COUNT) | \      0)  #define CRITICAL_UNMASKED_INTERRUPTS ( \ -    INT_MASK(INT_ITLB_MISS) | \ -    INT_MASK(INT_ILL) | \ -    INT_MASK(INT_GPV) | \ -    INT_MASK(INT_SN_ACCESS) | \ -    INT_MASK(INT_IDN_ACCESS) | \ -    INT_MASK(INT_UDN_ACCESS) | \ -    INT_MASK(INT_IDN_REFILL) | \ -    INT_MASK(INT_UDN_REFILL) | \ -    INT_MASK(INT_IDN_COMPLETE) | \ -    INT_MASK(INT_UDN_COMPLETE) | \ -    INT_MASK(INT_SWINT_3) | \ -    INT_MASK(INT_SWINT_2) | \ -    INT_MASK(INT_SWINT_1) | \ -    INT_MASK(INT_SWINT_0) | \ -    INT_MASK(INT_UNALIGN_DATA) | \ -    INT_MASK(INT_DTLB_MISS) | \ -    INT_MASK(INT_DTLB_ACCESS) | \ -    INT_MASK(INT_BOOT_ACCESS) | \ -    INT_MASK(INT_WORLD_ACCESS) | \ -    INT_MASK(INT_I_ASID) | \ -    INT_MASK(INT_D_ASID) | \ -    INT_MASK(INT_DMA_ASID) | \ -    INT_MASK(INT_SNI_ASID) | \ -    INT_MASK(INT_DMA_CPL) | \ -    INT_MASK(INT_SN_CPL) | \ -    INT_MASK(INT_DOUBLE_FAULT) | \ -    INT_MASK(INT_SN_STATIC_ACCESS) | \ +    (1ULL << INT_ITLB_MISS) | \ +    (1ULL << INT_ILL) | \ +    (1ULL << INT_GPV) | \ +    (1ULL << INT_SN_ACCESS) | \ +    (1ULL << INT_IDN_ACCESS) | \ +    (1ULL << INT_UDN_ACCESS) | \ +    (1ULL << INT_IDN_REFILL) | \ +    (1ULL << INT_UDN_REFILL) | \ +    (1ULL << INT_IDN_COMPLETE) | \ +    (1ULL << INT_UDN_COMPLETE) | \ +    (1ULL << INT_SWINT_3) | \ +    (1ULL << INT_SWINT_2) | \ +    (1ULL << INT_SWINT_1) | \ +    (1ULL << INT_SWINT_0) | \ +    (1ULL << INT_UNALIGN_DATA) | \ +    (1ULL << INT_DTLB_MISS) | \ +    (1ULL << INT_DTLB_ACCESS) | \ +    (1ULL << INT_BOOT_ACCESS) | \ +    (1ULL << INT_WORLD_ACCESS) | \ +    (1ULL << INT_I_ASID) | \ +    (1ULL << INT_D_ASID) | \ +    (1ULL << INT_DMA_ASID) | \ +    (1ULL << INT_SNI_ASID) | \ +    (1ULL << INT_DMA_CPL) | \ +    (1ULL << INT_SN_CPL) | \ +    (1ULL << INT_DOUBLE_FAULT) | \ +    (1ULL << INT_SN_STATIC_ACCESS) | \      0)  #define MASKABLE_INTERRUPTS ( \ -    INT_MASK(INT_MEM_ERROR) | \ -    INT_MASK(INT_IDN_REFILL) | \ -    INT_MASK(INT_UDN_REFILL) | \ -    INT_MASK(INT_IDN_COMPLETE) | \ -    INT_MASK(INT_UDN_COMPLETE) | \ -    INT_MASK(INT_DMATLB_MISS) | \ -    INT_MASK(INT_DMATLB_ACCESS) | \ -    INT_MASK(INT_SNITLB_MISS) | \ -    INT_MASK(INT_SN_NOTIFY) | \ -    INT_MASK(INT_SN_FIREWALL) | \ -    INT_MASK(INT_IDN_FIREWALL) | \ -    INT_MASK(INT_UDN_FIREWALL) | \ -    INT_MASK(INT_TILE_TIMER) | \ -    INT_MASK(INT_IDN_TIMER) | \ -    INT_MASK(INT_UDN_TIMER) | \ -    INT_MASK(INT_DMA_NOTIFY) | \ -    INT_MASK(INT_IDN_CA) | \ -    INT_MASK(INT_UDN_CA) | \ -    INT_MASK(INT_IDN_AVAIL) | \ -    INT_MASK(INT_UDN_AVAIL) | \ -    INT_MASK(INT_PERF_COUNT) | \ -    INT_MASK(INT_INTCTRL_3) | \ -    INT_MASK(INT_INTCTRL_2) | \ -    INT_MASK(INT_INTCTRL_1) | \ -    INT_MASK(INT_INTCTRL_0) | \ -    INT_MASK(INT_AUX_PERF_COUNT) | \ +    (1ULL << INT_MEM_ERROR) | \ +    (1ULL << INT_IDN_REFILL) | \ +    (1ULL << INT_UDN_REFILL) | \ +    (1ULL << INT_IDN_COMPLETE) | \ +    (1ULL << INT_UDN_COMPLETE) | \ +    (1ULL << INT_DMATLB_MISS) | \ +    (1ULL << INT_DMATLB_ACCESS) | \ +    (1ULL << INT_SNITLB_MISS) | \ +    (1ULL << INT_SN_NOTIFY) | \ +    (1ULL << INT_SN_FIREWALL) | \ +    (1ULL << INT_IDN_FIREWALL) | \ +    (1ULL << INT_UDN_FIREWALL) | \ +    (1ULL << INT_TILE_TIMER) | \ +    (1ULL << INT_IDN_TIMER) | \ +    (1ULL << INT_UDN_TIMER) | \ +    (1ULL << INT_DMA_NOTIFY) | \ +    (1ULL << INT_IDN_CA) | \ +    (1ULL << INT_UDN_CA) | \ +    (1ULL << INT_IDN_AVAIL) | \ +    (1ULL << INT_UDN_AVAIL) | \ +    (1ULL << INT_PERF_COUNT) | \ +    (1ULL << INT_INTCTRL_3) | \ +    (1ULL << INT_INTCTRL_2) | \ +    (1ULL << INT_INTCTRL_1) | \ +    (1ULL << INT_INTCTRL_0) | \ +    (1ULL << INT_AUX_PERF_COUNT) | \      0)  #define UNMASKABLE_INTERRUPTS ( \ -    INT_MASK(INT_ITLB_MISS) | \ -    INT_MASK(INT_ILL) | \ -    INT_MASK(INT_GPV) | \ -    INT_MASK(INT_SN_ACCESS) | \ -    INT_MASK(INT_IDN_ACCESS) | \ -    INT_MASK(INT_UDN_ACCESS) | \ -    INT_MASK(INT_SWINT_3) | \ -    INT_MASK(INT_SWINT_2) | \ -    INT_MASK(INT_SWINT_1) | \ -    INT_MASK(INT_SWINT_0) | \ -    INT_MASK(INT_UNALIGN_DATA) | \ -    INT_MASK(INT_DTLB_MISS) | \ -    INT_MASK(INT_DTLB_ACCESS) | \ -    INT_MASK(INT_BOOT_ACCESS) | \ -    INT_MASK(INT_WORLD_ACCESS) | \ -    INT_MASK(INT_I_ASID) | \ -    INT_MASK(INT_D_ASID) | \ -    INT_MASK(INT_DMA_ASID) | \ -    INT_MASK(INT_SNI_ASID) | \ -    INT_MASK(INT_DMA_CPL) | \ -    INT_MASK(INT_SN_CPL) | \ -    INT_MASK(INT_DOUBLE_FAULT) | \ -    INT_MASK(INT_SN_STATIC_ACCESS) | \ +    (1ULL << INT_ITLB_MISS) | \ +    (1ULL << INT_ILL) | \ +    (1ULL << INT_GPV) | \ +    (1ULL << INT_SN_ACCESS) | \ +    (1ULL << INT_IDN_ACCESS) | \ +    (1ULL << INT_UDN_ACCESS) | \ +    (1ULL << INT_SWINT_3) | \ +    (1ULL << INT_SWINT_2) | \ +    (1ULL << INT_SWINT_1) | \ +    (1ULL << INT_SWINT_0) | \ +    (1ULL << INT_UNALIGN_DATA) | \ +    (1ULL << INT_DTLB_MISS) | \ +    (1ULL << INT_DTLB_ACCESS) | \ +    (1ULL << INT_BOOT_ACCESS) | \ +    (1ULL << INT_WORLD_ACCESS) | \ +    (1ULL << INT_I_ASID) | \ +    (1ULL << INT_D_ASID) | \ +    (1ULL << INT_DMA_ASID) | \ +    (1ULL << INT_SNI_ASID) | \ +    (1ULL << INT_DMA_CPL) | \ +    (1ULL << INT_SN_CPL) | \ +    (1ULL << INT_DOUBLE_FAULT) | \ +    (1ULL << INT_SN_STATIC_ACCESS) | \      0)  #define SYNC_INTERRUPTS ( \ -    INT_MASK(INT_ITLB_MISS) | \ -    INT_MASK(INT_ILL) | \ -    INT_MASK(INT_GPV) | \ -    INT_MASK(INT_SN_ACCESS) | \ -    INT_MASK(INT_IDN_ACCESS) | \ -    INT_MASK(INT_UDN_ACCESS) | \ -    INT_MASK(INT_IDN_REFILL) | \ -    INT_MASK(INT_UDN_REFILL) | \ -    INT_MASK(INT_IDN_COMPLETE) | \ -    INT_MASK(INT_UDN_COMPLETE) | \ -    INT_MASK(INT_SWINT_3) | \ -    INT_MASK(INT_SWINT_2) | \ -    INT_MASK(INT_SWINT_1) | \ -    INT_MASK(INT_SWINT_0) | \ -    INT_MASK(INT_UNALIGN_DATA) | \ -    INT_MASK(INT_DTLB_MISS) | \ -    INT_MASK(INT_DTLB_ACCESS) | \ -    INT_MASK(INT_SN_STATIC_ACCESS) | \ +    (1ULL << INT_ITLB_MISS) | \ +    (1ULL << INT_ILL) | \ +    (1ULL << INT_GPV) | \ +    (1ULL << INT_SN_ACCESS) | \ +    (1ULL << INT_IDN_ACCESS) | \ +    (1ULL << INT_UDN_ACCESS) | \ +    (1ULL << INT_IDN_REFILL) | \ +    (1ULL << INT_UDN_REFILL) | \ +    (1ULL << INT_IDN_COMPLETE) | \ +    (1ULL << INT_UDN_COMPLETE) | \ +    (1ULL << INT_SWINT_3) | \ +    (1ULL << INT_SWINT_2) | \ +    (1ULL << INT_SWINT_1) | \ +    (1ULL << INT_SWINT_0) | \ +    (1ULL << INT_UNALIGN_DATA) | \ +    (1ULL << INT_DTLB_MISS) | \ +    (1ULL << INT_DTLB_ACCESS) | \ +    (1ULL << INT_SN_STATIC_ACCESS) | \      0)  #define NON_SYNC_INTERRUPTS ( \ -    INT_MASK(INT_MEM_ERROR) | \ -    INT_MASK(INT_DMATLB_MISS) | \ -    INT_MASK(INT_DMATLB_ACCESS) | \ -    INT_MASK(INT_SNITLB_MISS) | \ -    INT_MASK(INT_SN_NOTIFY) | \ -    INT_MASK(INT_SN_FIREWALL) | \ -    INT_MASK(INT_IDN_FIREWALL) | \ -    INT_MASK(INT_UDN_FIREWALL) | \ -    INT_MASK(INT_TILE_TIMER) | \ -    INT_MASK(INT_IDN_TIMER) | \ -    INT_MASK(INT_UDN_TIMER) | \ -    INT_MASK(INT_DMA_NOTIFY) | \ -    INT_MASK(INT_IDN_CA) | \ -    INT_MASK(INT_UDN_CA) | \ -    INT_MASK(INT_IDN_AVAIL) | \ -    INT_MASK(INT_UDN_AVAIL) | \ -    INT_MASK(INT_PERF_COUNT) | \ -    INT_MASK(INT_INTCTRL_3) | \ -    INT_MASK(INT_INTCTRL_2) | \ -    INT_MASK(INT_INTCTRL_1) | \ -    INT_MASK(INT_INTCTRL_0) | \ -    INT_MASK(INT_BOOT_ACCESS) | \ -    INT_MASK(INT_WORLD_ACCESS) | \ -    INT_MASK(INT_I_ASID) | \ -    INT_MASK(INT_D_ASID) | \ -    INT_MASK(INT_DMA_ASID) | \ -    INT_MASK(INT_SNI_ASID) | \ -    INT_MASK(INT_DMA_CPL) | \ -    INT_MASK(INT_SN_CPL) | \ -    INT_MASK(INT_DOUBLE_FAULT) | \ -    INT_MASK(INT_AUX_PERF_COUNT) | \ +    (1ULL << INT_MEM_ERROR) | \ +    (1ULL << INT_DMATLB_MISS) | \ +    (1ULL << INT_DMATLB_ACCESS) | \ +    (1ULL << INT_SNITLB_MISS) | \ +    (1ULL << INT_SN_NOTIFY) | \ +    (1ULL << INT_SN_FIREWALL) | \ +    (1ULL << INT_IDN_FIREWALL) | \ +    (1ULL << INT_UDN_FIREWALL) | \ +    (1ULL << INT_TILE_TIMER) | \ +    (1ULL << INT_IDN_TIMER) | \ +    (1ULL << INT_UDN_TIMER) | \ +    (1ULL << INT_DMA_NOTIFY) | \ +    (1ULL << INT_IDN_CA) | \ +    (1ULL << INT_UDN_CA) | \ +    (1ULL << INT_IDN_AVAIL) | \ +    (1ULL << INT_UDN_AVAIL) | \ +    (1ULL << INT_PERF_COUNT) | \ +    (1ULL << INT_INTCTRL_3) | \ +    (1ULL << INT_INTCTRL_2) | \ +    (1ULL << INT_INTCTRL_1) | \ +    (1ULL << INT_INTCTRL_0) | \ +    (1ULL << INT_BOOT_ACCESS) | \ +    (1ULL << INT_WORLD_ACCESS) | \ +    (1ULL << INT_I_ASID) | \ +    (1ULL << INT_D_ASID) | \ +    (1ULL << INT_DMA_ASID) | \ +    (1ULL << INT_SNI_ASID) | \ +    (1ULL << INT_DMA_CPL) | \ +    (1ULL << INT_SN_CPL) | \ +    (1ULL << INT_DOUBLE_FAULT) | \ +    (1ULL << INT_AUX_PERF_COUNT) | \      0)  #endif /* !__ASSEMBLER__ */  #endif /* !__ARCH_INTERRUPTS_H__ */ diff --git a/arch/tile/include/uapi/arch/interrupts_64.h b/arch/tile/include/uapi/arch/interrupts_64.h index 5bb58b2e4e6..13c9f918234 100644 --- a/arch/tile/include/uapi/arch/interrupts_64.h +++ b/arch/tile/include/uapi/arch/interrupts_64.h @@ -15,6 +15,7 @@  #ifndef __ARCH_INTERRUPTS_H__  #define __ARCH_INTERRUPTS_H__ +#ifndef __KERNEL__  /** Mask for an interrupt. */  #ifdef __ASSEMBLER__  /* Note: must handle breaking interrupts into high and low words manually. */ @@ -22,6 +23,7 @@  #else  #define INT_MASK(intno) (1ULL << (intno))  #endif +#endif  /** Where a given interrupt executes */ @@ -85,192 +87,192 @@  #ifndef __ASSEMBLER__  #define QUEUED_INTERRUPTS ( \ -    INT_MASK(INT_MEM_ERROR) | \ -    INT_MASK(INT_IDN_COMPLETE) | \ -    INT_MASK(INT_UDN_COMPLETE) | \ -    INT_MASK(INT_IDN_FIREWALL) | \ -    INT_MASK(INT_UDN_FIREWALL) | \ -    INT_MASK(INT_TILE_TIMER) | \ -    INT_MASK(INT_AUX_TILE_TIMER) | \ -    INT_MASK(INT_IDN_TIMER) | \ -    INT_MASK(INT_UDN_TIMER) | \ -    INT_MASK(INT_IDN_AVAIL) | \ -    INT_MASK(INT_UDN_AVAIL) | \ -    INT_MASK(INT_IPI_3) | \ -    INT_MASK(INT_IPI_2) | \ -    INT_MASK(INT_IPI_1) | \ -    INT_MASK(INT_IPI_0) | \ -    INT_MASK(INT_PERF_COUNT) | \ -    INT_MASK(INT_AUX_PERF_COUNT) | \ -    INT_MASK(INT_INTCTRL_3) | \ -    INT_MASK(INT_INTCTRL_2) | \ -    INT_MASK(INT_INTCTRL_1) | \ -    INT_MASK(INT_INTCTRL_0) | \ -    INT_MASK(INT_BOOT_ACCESS) | \ -    INT_MASK(INT_WORLD_ACCESS) | \ -    INT_MASK(INT_I_ASID) | \ -    INT_MASK(INT_D_ASID) | \ -    INT_MASK(INT_DOUBLE_FAULT) | \ +    (1ULL << INT_MEM_ERROR) | \ +    (1ULL << INT_IDN_COMPLETE) | \ +    (1ULL << INT_UDN_COMPLETE) | \ +    (1ULL << INT_IDN_FIREWALL) | \ +    (1ULL << INT_UDN_FIREWALL) | \ +    (1ULL << INT_TILE_TIMER) | \ +    (1ULL << INT_AUX_TILE_TIMER) | \ +    (1ULL << INT_IDN_TIMER) | \ +    (1ULL << INT_UDN_TIMER) | \ +    (1ULL << INT_IDN_AVAIL) | \ +    (1ULL << INT_UDN_AVAIL) | \ +    (1ULL << INT_IPI_3) | \ +    (1ULL << INT_IPI_2) | \ +    (1ULL << INT_IPI_1) | \ +    (1ULL << INT_IPI_0) | \ +    (1ULL << INT_PERF_COUNT) | \ +    (1ULL << INT_AUX_PERF_COUNT) | \ +    (1ULL << INT_INTCTRL_3) | \ +    (1ULL << INT_INTCTRL_2) | \ +    (1ULL << INT_INTCTRL_1) | \ +    (1ULL << INT_INTCTRL_0) | \ +    (1ULL << INT_BOOT_ACCESS) | \ +    (1ULL << INT_WORLD_ACCESS) | \ +    (1ULL << INT_I_ASID) | \ +    (1ULL << INT_D_ASID) | \ +    (1ULL << INT_DOUBLE_FAULT) | \      0)  #define NONQUEUED_INTERRUPTS ( \ -    INT_MASK(INT_SINGLE_STEP_3) | \ -    INT_MASK(INT_SINGLE_STEP_2) | \ -    INT_MASK(INT_SINGLE_STEP_1) | \ -    INT_MASK(INT_SINGLE_STEP_0) | \ -    INT_MASK(INT_ITLB_MISS) | \ -    INT_MASK(INT_ILL) | \ -    INT_MASK(INT_GPV) | \ -    INT_MASK(INT_IDN_ACCESS) | \ -    INT_MASK(INT_UDN_ACCESS) | \ -    INT_MASK(INT_SWINT_3) | \ -    INT_MASK(INT_SWINT_2) | \ -    INT_MASK(INT_SWINT_1) | \ -    INT_MASK(INT_SWINT_0) | \ -    INT_MASK(INT_ILL_TRANS) | \ -    INT_MASK(INT_UNALIGN_DATA) | \ -    INT_MASK(INT_DTLB_MISS) | \ -    INT_MASK(INT_DTLB_ACCESS) | \ +    (1ULL << INT_SINGLE_STEP_3) | \ +    (1ULL << INT_SINGLE_STEP_2) | \ +    (1ULL << INT_SINGLE_STEP_1) | \ +    (1ULL << INT_SINGLE_STEP_0) | \ +    (1ULL << INT_ITLB_MISS) | \ +    (1ULL << INT_ILL) | \ +    (1ULL << INT_GPV) | \ +    (1ULL << INT_IDN_ACCESS) | \ +    (1ULL << INT_UDN_ACCESS) | \ +    (1ULL << INT_SWINT_3) | \ +    (1ULL << INT_SWINT_2) | \ +    (1ULL << INT_SWINT_1) | \ +    (1ULL << INT_SWINT_0) | \ +    (1ULL << INT_ILL_TRANS) | \ +    (1ULL << INT_UNALIGN_DATA) | \ +    (1ULL << INT_DTLB_MISS) | \ +    (1ULL << INT_DTLB_ACCESS) | \      0)  #define CRITICAL_MASKED_INTERRUPTS ( \ -    INT_MASK(INT_MEM_ERROR) | \ -    INT_MASK(INT_SINGLE_STEP_3) | \ -    INT_MASK(INT_SINGLE_STEP_2) | \ -    INT_MASK(INT_SINGLE_STEP_1) | \ -    INT_MASK(INT_SINGLE_STEP_0) | \ -    INT_MASK(INT_IDN_COMPLETE) | \ -    INT_MASK(INT_UDN_COMPLETE) | \ -    INT_MASK(INT_IDN_FIREWALL) | \ -    INT_MASK(INT_UDN_FIREWALL) | \ -    INT_MASK(INT_TILE_TIMER) | \ -    INT_MASK(INT_AUX_TILE_TIMER) | \ -    INT_MASK(INT_IDN_TIMER) | \ -    INT_MASK(INT_UDN_TIMER) | \ -    INT_MASK(INT_IDN_AVAIL) | \ -    INT_MASK(INT_UDN_AVAIL) | \ -    INT_MASK(INT_IPI_3) | \ -    INT_MASK(INT_IPI_2) | \ -    INT_MASK(INT_IPI_1) | \ -    INT_MASK(INT_IPI_0) | \ -    INT_MASK(INT_PERF_COUNT) | \ -    INT_MASK(INT_AUX_PERF_COUNT) | \ -    INT_MASK(INT_INTCTRL_3) | \ -    INT_MASK(INT_INTCTRL_2) | \ -    INT_MASK(INT_INTCTRL_1) | \ -    INT_MASK(INT_INTCTRL_0) | \ +    (1ULL << INT_MEM_ERROR) | \ +    (1ULL << INT_SINGLE_STEP_3) | \ +    (1ULL << INT_SINGLE_STEP_2) | \ +    (1ULL << INT_SINGLE_STEP_1) | \ +    (1ULL << INT_SINGLE_STEP_0) | \ +    (1ULL << INT_IDN_COMPLETE) | \ +    (1ULL << INT_UDN_COMPLETE) | \ +    (1ULL << INT_IDN_FIREWALL) | \ +    (1ULL << INT_UDN_FIREWALL) | \ +    (1ULL << INT_TILE_TIMER) | \ +    (1ULL << INT_AUX_TILE_TIMER) | \ +    (1ULL << INT_IDN_TIMER) | \ +    (1ULL << INT_UDN_TIMER) | \ +    (1ULL << INT_IDN_AVAIL) | \ +    (1ULL << INT_UDN_AVAIL) | \ +    (1ULL << INT_IPI_3) | \ +    (1ULL << INT_IPI_2) | \ +    (1ULL << INT_IPI_1) | \ +    (1ULL << INT_IPI_0) | \ +    (1ULL << INT_PERF_COUNT) | \ +    (1ULL << INT_AUX_PERF_COUNT) | \ +    (1ULL << INT_INTCTRL_3) | \ +    (1ULL << INT_INTCTRL_2) | \ +    (1ULL << INT_INTCTRL_1) | \ +    (1ULL << INT_INTCTRL_0) | \      0)  #define CRITICAL_UNMASKED_INTERRUPTS ( \ -    INT_MASK(INT_ITLB_MISS) | \ -    INT_MASK(INT_ILL) | \ -    INT_MASK(INT_GPV) | \ -    INT_MASK(INT_IDN_ACCESS) | \ -    INT_MASK(INT_UDN_ACCESS) | \ -    INT_MASK(INT_SWINT_3) | \ -    INT_MASK(INT_SWINT_2) | \ -    INT_MASK(INT_SWINT_1) | \ -    INT_MASK(INT_SWINT_0) | \ -    INT_MASK(INT_ILL_TRANS) | \ -    INT_MASK(INT_UNALIGN_DATA) | \ -    INT_MASK(INT_DTLB_MISS) | \ -    INT_MASK(INT_DTLB_ACCESS) | \ -    INT_MASK(INT_BOOT_ACCESS) | \ -    INT_MASK(INT_WORLD_ACCESS) | \ -    INT_MASK(INT_I_ASID) | \ -    INT_MASK(INT_D_ASID) | \ -    INT_MASK(INT_DOUBLE_FAULT) | \ +    (1ULL << INT_ITLB_MISS) | \ +    (1ULL << INT_ILL) | \ +    (1ULL << INT_GPV) | \ +    (1ULL << INT_IDN_ACCESS) | \ +    (1ULL << INT_UDN_ACCESS) | \ +    (1ULL << INT_SWINT_3) | \ +    (1ULL << INT_SWINT_2) | \ +    (1ULL << INT_SWINT_1) | \ +    (1ULL << INT_SWINT_0) | \ +    (1ULL << INT_ILL_TRANS) | \ +    (1ULL << INT_UNALIGN_DATA) | \ +    (1ULL << INT_DTLB_MISS) | \ +    (1ULL << INT_DTLB_ACCESS) | \ +    (1ULL << INT_BOOT_ACCESS) | \ +    (1ULL << INT_WORLD_ACCESS) | \ +    (1ULL << INT_I_ASID) | \ +    (1ULL << INT_D_ASID) | \ +    (1ULL << INT_DOUBLE_FAULT) | \      0)  #define MASKABLE_INTERRUPTS ( \ -    INT_MASK(INT_MEM_ERROR) | \ -    INT_MASK(INT_SINGLE_STEP_3) | \ -    INT_MASK(INT_SINGLE_STEP_2) | \ -    INT_MASK(INT_SINGLE_STEP_1) | \ -    INT_MASK(INT_SINGLE_STEP_0) | \ -    INT_MASK(INT_IDN_COMPLETE) | \ -    INT_MASK(INT_UDN_COMPLETE) | \ -    INT_MASK(INT_IDN_FIREWALL) | \ -    INT_MASK(INT_UDN_FIREWALL) | \ -    INT_MASK(INT_TILE_TIMER) | \ -    INT_MASK(INT_AUX_TILE_TIMER) | \ -    INT_MASK(INT_IDN_TIMER) | \ -    INT_MASK(INT_UDN_TIMER) | \ -    INT_MASK(INT_IDN_AVAIL) | \ -    INT_MASK(INT_UDN_AVAIL) | \ -    INT_MASK(INT_IPI_3) | \ -    INT_MASK(INT_IPI_2) | \ -    INT_MASK(INT_IPI_1) | \ -    INT_MASK(INT_IPI_0) | \ -    INT_MASK(INT_PERF_COUNT) | \ -    INT_MASK(INT_AUX_PERF_COUNT) | \ -    INT_MASK(INT_INTCTRL_3) | \ -    INT_MASK(INT_INTCTRL_2) | \ -    INT_MASK(INT_INTCTRL_1) | \ -    INT_MASK(INT_INTCTRL_0) | \ +    (1ULL << INT_MEM_ERROR) | \ +    (1ULL << INT_SINGLE_STEP_3) | \ +    (1ULL << INT_SINGLE_STEP_2) | \ +    (1ULL << INT_SINGLE_STEP_1) | \ +    (1ULL << INT_SINGLE_STEP_0) | \ +    (1ULL << INT_IDN_COMPLETE) | \ +    (1ULL << INT_UDN_COMPLETE) | \ +    (1ULL << INT_IDN_FIREWALL) | \ +    (1ULL << INT_UDN_FIREWALL) | \ +    (1ULL << INT_TILE_TIMER) | \ +    (1ULL << INT_AUX_TILE_TIMER) | \ +    (1ULL << INT_IDN_TIMER) | \ +    (1ULL << INT_UDN_TIMER) | \ +    (1ULL << INT_IDN_AVAIL) | \ +    (1ULL << INT_UDN_AVAIL) | \ +    (1ULL << INT_IPI_3) | \ +    (1ULL << INT_IPI_2) | \ +    (1ULL << INT_IPI_1) | \ +    (1ULL << INT_IPI_0) | \ +    (1ULL << INT_PERF_COUNT) | \ +    (1ULL << INT_AUX_PERF_COUNT) | \ +    (1ULL << INT_INTCTRL_3) | \ +    (1ULL << INT_INTCTRL_2) | \ +    (1ULL << INT_INTCTRL_1) | \ +    (1ULL << INT_INTCTRL_0) | \      0)  #define UNMASKABLE_INTERRUPTS ( \ -    INT_MASK(INT_ITLB_MISS) | \ -    INT_MASK(INT_ILL) | \ -    INT_MASK(INT_GPV) | \ -    INT_MASK(INT_IDN_ACCESS) | \ -    INT_MASK(INT_UDN_ACCESS) | \ -    INT_MASK(INT_SWINT_3) | \ -    INT_MASK(INT_SWINT_2) | \ -    INT_MASK(INT_SWINT_1) | \ -    INT_MASK(INT_SWINT_0) | \ -    INT_MASK(INT_ILL_TRANS) | \ -    INT_MASK(INT_UNALIGN_DATA) | \ -    INT_MASK(INT_DTLB_MISS) | \ -    INT_MASK(INT_DTLB_ACCESS) | \ -    INT_MASK(INT_BOOT_ACCESS) | \ -    INT_MASK(INT_WORLD_ACCESS) | \ -    INT_MASK(INT_I_ASID) | \ -    INT_MASK(INT_D_ASID) | \ -    INT_MASK(INT_DOUBLE_FAULT) | \ +    (1ULL << INT_ITLB_MISS) | \ +    (1ULL << INT_ILL) | \ +    (1ULL << INT_GPV) | \ +    (1ULL << INT_IDN_ACCESS) | \ +    (1ULL << INT_UDN_ACCESS) | \ +    (1ULL << INT_SWINT_3) | \ +    (1ULL << INT_SWINT_2) | \ +    (1ULL << INT_SWINT_1) | \ +    (1ULL << INT_SWINT_0) | \ +    (1ULL << INT_ILL_TRANS) | \ +    (1ULL << INT_UNALIGN_DATA) | \ +    (1ULL << INT_DTLB_MISS) | \ +    (1ULL << INT_DTLB_ACCESS) | \ +    (1ULL << INT_BOOT_ACCESS) | \ +    (1ULL << INT_WORLD_ACCESS) | \ +    (1ULL << INT_I_ASID) | \ +    (1ULL << INT_D_ASID) | \ +    (1ULL << INT_DOUBLE_FAULT) | \      0)  #define SYNC_INTERRUPTS ( \ -    INT_MASK(INT_SINGLE_STEP_3) | \ -    INT_MASK(INT_SINGLE_STEP_2) | \ -    INT_MASK(INT_SINGLE_STEP_1) | \ -    INT_MASK(INT_SINGLE_STEP_0) | \ -    INT_MASK(INT_IDN_COMPLETE) | \ -    INT_MASK(INT_UDN_COMPLETE) | \ -    INT_MASK(INT_ITLB_MISS) | \ -    INT_MASK(INT_ILL) | \ -    INT_MASK(INT_GPV) | \ -    INT_MASK(INT_IDN_ACCESS) | \ -    INT_MASK(INT_UDN_ACCESS) | \ -    INT_MASK(INT_SWINT_3) | \ -    INT_MASK(INT_SWINT_2) | \ -    INT_MASK(INT_SWINT_1) | \ -    INT_MASK(INT_SWINT_0) | \ -    INT_MASK(INT_ILL_TRANS) | \ -    INT_MASK(INT_UNALIGN_DATA) | \ -    INT_MASK(INT_DTLB_MISS) | \ -    INT_MASK(INT_DTLB_ACCESS) | \ +    (1ULL << INT_SINGLE_STEP_3) | \ +    (1ULL << INT_SINGLE_STEP_2) | \ +    (1ULL << INT_SINGLE_STEP_1) | \ +    (1ULL << INT_SINGLE_STEP_0) | \ +    (1ULL << INT_IDN_COMPLETE) | \ +    (1ULL << INT_UDN_COMPLETE) | \ +    (1ULL << INT_ITLB_MISS) | \ +    (1ULL << INT_ILL) | \ +    (1ULL << INT_GPV) | \ +    (1ULL << INT_IDN_ACCESS) | \ +    (1ULL << INT_UDN_ACCESS) | \ +    (1ULL << INT_SWINT_3) | \ +    (1ULL << INT_SWINT_2) | \ +    (1ULL << INT_SWINT_1) | \ +    (1ULL << INT_SWINT_0) | \ +    (1ULL << INT_ILL_TRANS) | \ +    (1ULL << INT_UNALIGN_DATA) | \ +    (1ULL << INT_DTLB_MISS) | \ +    (1ULL << INT_DTLB_ACCESS) | \      0)  #define NON_SYNC_INTERRUPTS ( \ -    INT_MASK(INT_MEM_ERROR) | \ -    INT_MASK(INT_IDN_FIREWALL) | \ -    INT_MASK(INT_UDN_FIREWALL) | \ -    INT_MASK(INT_TILE_TIMER) | \ -    INT_MASK(INT_AUX_TILE_TIMER) | \ -    INT_MASK(INT_IDN_TIMER) | \ -    INT_MASK(INT_UDN_TIMER) | \ -    INT_MASK(INT_IDN_AVAIL) | \ -    INT_MASK(INT_UDN_AVAIL) | \ -    INT_MASK(INT_IPI_3) | \ -    INT_MASK(INT_IPI_2) | \ -    INT_MASK(INT_IPI_1) | \ -    INT_MASK(INT_IPI_0) | \ -    INT_MASK(INT_PERF_COUNT) | \ -    INT_MASK(INT_AUX_PERF_COUNT) | \ -    INT_MASK(INT_INTCTRL_3) | \ -    INT_MASK(INT_INTCTRL_2) | \ -    INT_MASK(INT_INTCTRL_1) | \ -    INT_MASK(INT_INTCTRL_0) | \ -    INT_MASK(INT_BOOT_ACCESS) | \ -    INT_MASK(INT_WORLD_ACCESS) | \ -    INT_MASK(INT_I_ASID) | \ -    INT_MASK(INT_D_ASID) | \ -    INT_MASK(INT_DOUBLE_FAULT) | \ +    (1ULL << INT_MEM_ERROR) | \ +    (1ULL << INT_IDN_FIREWALL) | \ +    (1ULL << INT_UDN_FIREWALL) | \ +    (1ULL << INT_TILE_TIMER) | \ +    (1ULL << INT_AUX_TILE_TIMER) | \ +    (1ULL << INT_IDN_TIMER) | \ +    (1ULL << INT_UDN_TIMER) | \ +    (1ULL << INT_IDN_AVAIL) | \ +    (1ULL << INT_UDN_AVAIL) | \ +    (1ULL << INT_IPI_3) | \ +    (1ULL << INT_IPI_2) | \ +    (1ULL << INT_IPI_1) | \ +    (1ULL << INT_IPI_0) | \ +    (1ULL << INT_PERF_COUNT) | \ +    (1ULL << INT_AUX_PERF_COUNT) | \ +    (1ULL << INT_INTCTRL_3) | \ +    (1ULL << INT_INTCTRL_2) | \ +    (1ULL << INT_INTCTRL_1) | \ +    (1ULL << INT_INTCTRL_0) | \ +    (1ULL << INT_BOOT_ACCESS) | \ +    (1ULL << INT_WORLD_ACCESS) | \ +    (1ULL << INT_I_ASID) | \ +    (1ULL << INT_D_ASID) | \ +    (1ULL << INT_DOUBLE_FAULT) | \      0)  #endif /* !__ASSEMBLER__ */  #endif /* !__ARCH_INTERRUPTS_H__ */ diff --git a/arch/tile/kernel/intvec_64.S b/arch/tile/kernel/intvec_64.S index 54bc9a6678e..4ea08090265 100644 --- a/arch/tile/kernel/intvec_64.S +++ b/arch/tile/kernel/intvec_64.S @@ -1035,7 +1035,9 @@ handle_syscall:  	/* Ensure that the syscall number is within the legal range. */  	{  	 moveli r20, hw2(sys_call_table) +#ifdef CONFIG_COMPAT  	 blbs   r30, .Lcompat_syscall +#endif  	}  	{  	 cmpltu r21, TREG_SYSCALL_NR_NAME, r21 @@ -1093,6 +1095,7 @@ handle_syscall:  	 j      .Lresume_userspace   /* jump into middle of interrupt_return */  	} +#ifdef CONFIG_COMPAT  .Lcompat_syscall:  	/*  	 * Load the base of the compat syscall table in r20, and @@ -1117,6 +1120,7 @@ handle_syscall:  	{ move r15, r4; addxi r4, r4, 0 }  	{ move r16, r5; addxi r5, r5, 0 }  	j .Lload_syscall_pointer +#endif  .Linvalid_syscall:  	/* Report an invalid syscall back to the user program */ diff --git a/arch/tile/kernel/process.c b/arch/tile/kernel/process.c index 0e5661e7d00..caf93ae1179 100644 --- a/arch/tile/kernel/process.c +++ b/arch/tile/kernel/process.c @@ -159,7 +159,7 @@ static void save_arch_state(struct thread_struct *t);  int copy_thread(unsigned long clone_flags, unsigned long sp,  		unsigned long arg, struct task_struct *p)  { -	struct pt_regs *childregs = task_pt_regs(p), *regs = current_pt_regs(); +	struct pt_regs *childregs = task_pt_regs(p);  	unsigned long ksp;  	unsigned long *callee_regs; diff --git a/arch/tile/kernel/reboot.c b/arch/tile/kernel/reboot.c index baa3d905fee..d1b5c913ae7 100644 --- a/arch/tile/kernel/reboot.c +++ b/arch/tile/kernel/reboot.c @@ -16,6 +16,7 @@  #include <linux/reboot.h>  #include <linux/smp.h>  #include <linux/pm.h> +#include <linux/export.h>  #include <asm/page.h>  #include <asm/setup.h>  #include <hv/hypervisor.h> @@ -49,3 +50,4 @@ void machine_restart(char *cmd)  /* No interesting distinction to be made here. */  void (*pm_power_off)(void) = NULL; +EXPORT_SYMBOL(pm_power_off); diff --git a/arch/tile/kernel/setup.c b/arch/tile/kernel/setup.c index 6a649a4462d..d1e15f7b59c 100644 --- a/arch/tile/kernel/setup.c +++ b/arch/tile/kernel/setup.c @@ -31,6 +31,7 @@  #include <linux/timex.h>  #include <linux/hugetlb.h>  #include <linux/start_kernel.h> +#include <linux/screen_info.h>  #include <asm/setup.h>  #include <asm/sections.h>  #include <asm/cacheflush.h> @@ -49,6 +50,10 @@ static inline int ABS(int x) { return x >= 0 ? x : -x; }  /* Chip information */  char chip_model[64] __write_once; +#ifdef CONFIG_VT +struct screen_info screen_info; +#endif +  struct pglist_data node_data[MAX_NUMNODES] __read_mostly;  EXPORT_SYMBOL(node_data); diff --git a/arch/tile/kernel/stack.c b/arch/tile/kernel/stack.c index b2f44c28dda..ed258b8ae32 100644 --- a/arch/tile/kernel/stack.c +++ b/arch/tile/kernel/stack.c @@ -112,7 +112,7 @@ static struct pt_regs *valid_fault_handler(struct KBacktraceIterator* kbt)  		       p->pc, p->sp, p->ex1);  		p = NULL;  	} -	if (!kbt->profile || (INT_MASK(p->faultnum) & QUEUED_INTERRUPTS) == 0) +	if (!kbt->profile || ((1ULL << p->faultnum) & QUEUED_INTERRUPTS) == 0)  		return p;  	return NULL;  } @@ -484,6 +484,7 @@ void save_stack_trace(struct stack_trace *trace)  {  	save_stack_trace_tsk(NULL, trace);  } +EXPORT_SYMBOL_GPL(save_stack_trace);  #endif diff --git a/arch/tile/lib/cacheflush.c b/arch/tile/lib/cacheflush.c index db4fb89e12d..8f8ad814b13 100644 --- a/arch/tile/lib/cacheflush.c +++ b/arch/tile/lib/cacheflush.c @@ -12,6 +12,7 @@   *   more details.   */ +#include <linux/export.h>  #include <asm/page.h>  #include <asm/cacheflush.h>  #include <arch/icache.h> @@ -165,3 +166,4 @@ void finv_buffer_remote(void *buffer, size_t size, int hfh)  	__insn_mtspr(SPR_DSTREAM_PF, old_dstream_pf);  #endif  } +EXPORT_SYMBOL_GPL(finv_buffer_remote); diff --git a/arch/tile/lib/cpumask.c b/arch/tile/lib/cpumask.c index fdc403614d1..75947edccb2 100644 --- a/arch/tile/lib/cpumask.c +++ b/arch/tile/lib/cpumask.c @@ -16,6 +16,7 @@  #include <linux/ctype.h>  #include <linux/errno.h>  #include <linux/smp.h> +#include <linux/export.h>  /*   * Allow cropping out bits beyond the end of the array. @@ -50,3 +51,4 @@ int bitmap_parselist_crop(const char *bp, unsigned long *maskp, int nmaskbits)  	} while (*bp != '\0' && *bp != '\n');  	return 0;  } +EXPORT_SYMBOL(bitmap_parselist_crop); diff --git a/arch/tile/lib/exports.c b/arch/tile/lib/exports.c index dd5f0a33fda..4385cb6fa00 100644 --- a/arch/tile/lib/exports.c +++ b/arch/tile/lib/exports.c @@ -55,6 +55,8 @@ EXPORT_SYMBOL(hv_dev_poll_cancel);  EXPORT_SYMBOL(hv_dev_close);  EXPORT_SYMBOL(hv_sysconf);  EXPORT_SYMBOL(hv_confstr); +EXPORT_SYMBOL(hv_get_rtc); +EXPORT_SYMBOL(hv_set_rtc);  /* libgcc.a */  uint32_t __udivsi3(uint32_t dividend, uint32_t divisor); diff --git a/arch/tile/mm/homecache.c b/arch/tile/mm/homecache.c index 5f7868dcd6d..1ae911939a1 100644 --- a/arch/tile/mm/homecache.c +++ b/arch/tile/mm/homecache.c @@ -408,6 +408,7 @@ void homecache_change_page_home(struct page *page, int order, int home)  		__set_pte(ptep, pte_set_home(pteval, home));  	}  } +EXPORT_SYMBOL(homecache_change_page_home);  struct page *homecache_alloc_pages(gfp_t gfp_mask,  				   unsigned int order, int home) diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 5d3d61f40b3..36b05ed0bb3 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -2137,6 +2137,7 @@ config OLPC_XO1_RTC  config OLPC_XO1_SCI  	bool "OLPC XO-1 SCI extras"  	depends on OLPC && OLPC_XO1_PM +	depends on INPUT=y  	select POWER_SUPPLY  	select GPIO_CS5535  	select MFD_CORE diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile index ccce0ed67dd..379814bc41e 100644 --- a/arch/x86/boot/Makefile +++ b/arch/x86/boot/Makefile @@ -71,7 +71,7 @@ GCOV_PROFILE := n  $(obj)/bzImage: asflags-y  := $(SVGA_MODE)  quiet_cmd_image = BUILD   $@ -cmd_image = $(obj)/tools/build $(obj)/setup.bin $(obj)/vmlinux.bin > $@ +cmd_image = $(obj)/tools/build $(obj)/setup.bin $(obj)/vmlinux.bin $(obj)/zoffset.h > $@  $(obj)/bzImage: $(obj)/setup.bin $(obj)/vmlinux.bin $(obj)/tools/build FORCE  	$(call if_changed,image) @@ -92,7 +92,7 @@ targets += voffset.h  $(obj)/voffset.h: vmlinux FORCE  	$(call if_changed,voffset) -sed-zoffset := -e 's/^\([0-9a-fA-F]*\) . \(startup_32\|input_data\|_end\|z_.*\)$$/\#define ZO_\2 0x\1/p' +sed-zoffset := -e 's/^\([0-9a-fA-F]*\) . \(startup_32\|startup_64\|efi_pe_entry\|efi_stub_entry\|input_data\|_end\|z_.*\)$$/\#define ZO_\2 0x\1/p'  quiet_cmd_zoffset = ZOFFSET $@        cmd_zoffset = $(NM) $< | sed -n $(sed-zoffset) > $@ diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c index 18e329ca108..f8fa41190c3 100644 --- a/arch/x86/boot/compressed/eboot.c +++ b/arch/x86/boot/compressed/eboot.c @@ -256,10 +256,10 @@ static efi_status_t setup_efi_pci(struct boot_params *params)  	int i;  	struct setup_data *data; -	data = (struct setup_data *)params->hdr.setup_data; +	data = (struct setup_data *)(unsigned long)params->hdr.setup_data;  	while (data && data->next) -		data = (struct setup_data *)data->next; +		data = (struct setup_data *)(unsigned long)data->next;  	status = efi_call_phys5(sys_table->boottime->locate_handle,  				EFI_LOCATE_BY_PROTOCOL, &pci_proto, @@ -295,16 +295,18 @@ static efi_status_t setup_efi_pci(struct boot_params *params)  		if (!pci)  			continue; +#ifdef CONFIG_X86_64  		status = efi_call_phys4(pci->attributes, pci,  					EfiPciIoAttributeOperationGet, 0,  					&attributes); - +#else +		status = efi_call_phys5(pci->attributes, pci, +					EfiPciIoAttributeOperationGet, 0, 0, +					&attributes); +#endif  		if (status != EFI_SUCCESS)  			continue; -		if (!(attributes & EFI_PCI_IO_ATTRIBUTE_EMBEDDED_ROM)) -			continue; -  		if (!pci->romimage || !pci->romsize)  			continue; @@ -345,9 +347,9 @@ static efi_status_t setup_efi_pci(struct boot_params *params)  		memcpy(rom->romdata, pci->romimage, pci->romsize);  		if (data) -			data->next = (uint64_t)rom; +			data->next = (unsigned long)rom;  		else -			params->hdr.setup_data = (uint64_t)rom; +			params->hdr.setup_data = (unsigned long)rom;  		data = (struct setup_data *)rom; @@ -432,10 +434,9 @@ static efi_status_t setup_gop(struct screen_info *si, efi_guid_t *proto,  			 * Once we've found a GOP supporting ConOut,  			 * don't bother looking any further.  			 */ +			first_gop = gop;  			if (conout_found)  				break; - -			first_gop = gop;  		}  	} diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S index aa4aaf1b238..1e3184f6072 100644 --- a/arch/x86/boot/compressed/head_32.S +++ b/arch/x86/boot/compressed/head_32.S @@ -35,11 +35,11 @@ ENTRY(startup_32)  #ifdef CONFIG_EFI_STUB  	jmp	preferred_addr -	.balign	0x10  	/*  	 * We don't need the return address, so set up the stack so -	 * efi_main() can find its arugments. +	 * efi_main() can find its arguments.  	 */ +ENTRY(efi_pe_entry)  	add	$0x4, %esp  	call	make_boot_params @@ -50,8 +50,10 @@ ENTRY(startup_32)  	pushl	%eax  	pushl	%esi  	pushl	%ecx +	sub	$0x4, %esp -	.org 0x30,0x90 +ENTRY(efi_stub_entry) +	add	$0x4, %esp  	call	efi_main  	cmpl	$0, %eax  	movl	%eax, %esi diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S index 2c4b171eec3..f5d1aaa0dec 100644 --- a/arch/x86/boot/compressed/head_64.S +++ b/arch/x86/boot/compressed/head_64.S @@ -201,12 +201,12 @@ ENTRY(startup_64)  	 */  #ifdef CONFIG_EFI_STUB  	/* -	 * The entry point for the PE/COFF executable is 0x210, so only -	 * legacy boot loaders will execute this jmp. +	 * The entry point for the PE/COFF executable is efi_pe_entry, so +	 * only legacy boot loaders will execute this jmp.  	 */  	jmp	preferred_addr -	.org 0x210 +ENTRY(efi_pe_entry)  	mov	%rcx, %rdi  	mov	%rdx, %rsi  	pushq	%rdi @@ -218,7 +218,7 @@ ENTRY(startup_64)  	popq	%rsi  	popq	%rdi -	.org 0x230,0x90 +ENTRY(efi_stub_entry)  	call	efi_main  	movq	%rax,%rsi  	cmpq	$0,%rax diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S index 8c132a625b9..944ce595f76 100644 --- a/arch/x86/boot/header.S +++ b/arch/x86/boot/header.S @@ -21,6 +21,7 @@  #include <asm/e820.h>  #include <asm/page_types.h>  #include <asm/setup.h> +#include <asm/bootparam.h>  #include "boot.h"  #include "voffset.h"  #include "zoffset.h" @@ -255,6 +256,9 @@ section_table:  	# header, from the old boot sector.  	.section ".header", "a" +	.globl	sentinel +sentinel:	.byte 0xff, 0xff        /* Used to detect broken loaders */ +  	.globl	hdr  hdr:  setup_sects:	.byte 0			/* Filled in by build.c */ @@ -279,7 +283,7 @@ _start:  	# Part 2 of the header, from the old setup.S  		.ascii	"HdrS"		# header signature -		.word	0x020b		# header version number (>= 0x0105) +		.word	0x020c		# header version number (>= 0x0105)  					# or else old loadlin-1.5 will fail)  		.globl realmode_swtch  realmode_swtch:	.word	0, 0		# default_switch, SETUPSEG @@ -297,13 +301,7 @@ type_of_loader:	.byte	0		# 0 means ancient bootloader, newer  # flags, unused bits must be zero (RFU) bit within loadflags  loadflags: -LOADED_HIGH	= 1			# If set, the kernel is loaded high -CAN_USE_HEAP	= 0x80			# If set, the loader also has set -					# heap_end_ptr to tell how much -					# space behind setup.S can be used for -					# heap purposes. -					# Only the loader knows what is free -		.byte	LOADED_HIGH +		.byte	LOADED_HIGH	# The kernel is to be loaded high  setup_move_size: .word  0x8000		# size to move, when setup is not  					# loaded at 0x90000. We will move setup @@ -369,7 +367,23 @@ relocatable_kernel:    .byte 1  relocatable_kernel:    .byte 0  #endif  min_alignment:		.byte MIN_KERNEL_ALIGN_LG2	# minimum alignment -pad3:			.word 0 + +xloadflags: +#ifdef CONFIG_X86_64 +# define XLF0 XLF_KERNEL_64			/* 64-bit kernel */ +#else +# define XLF0 0 +#endif +#ifdef CONFIG_EFI_STUB +# ifdef CONFIG_X86_64 +#  define XLF23 XLF_EFI_HANDOVER_64		/* 64-bit EFI handover ok */ +# else +#  define XLF23 XLF_EFI_HANDOVER_32		/* 32-bit EFI handover ok */ +# endif +#else +# define XLF23 0 +#endif +			.word XLF0 | XLF23  cmdline_size:   .long   COMMAND_LINE_SIZE-1     #length of the command line,                                                  #added with boot protocol @@ -397,8 +411,13 @@ pref_address:		.quad LOAD_PHYSICAL_ADDR	# preferred load addr  #define INIT_SIZE VO_INIT_SIZE  #endif  init_size:		.long INIT_SIZE		# kernel initialization size -handover_offset:	.long 0x30		# offset to the handover +handover_offset: +#ifdef CONFIG_EFI_STUB +  			.long 0x30		# offset to the handover  						# protocol entry point +#else +			.long 0 +#endif  # End of setup header ##################################################### diff --git a/arch/x86/boot/setup.ld b/arch/x86/boot/setup.ld index 03c0683636b..96a6c756353 100644 --- a/arch/x86/boot/setup.ld +++ b/arch/x86/boot/setup.ld @@ -13,7 +13,7 @@ SECTIONS  	.bstext		: { *(.bstext) }  	.bsdata		: { *(.bsdata) } -	. = 497; +	. = 495;  	.header		: { *(.header) }  	.entrytext	: { *(.entrytext) }  	.inittext	: { *(.inittext) } diff --git a/arch/x86/boot/tools/build.c b/arch/x86/boot/tools/build.c index 4b8e165ee57..94c54465002 100644 --- a/arch/x86/boot/tools/build.c +++ b/arch/x86/boot/tools/build.c @@ -52,6 +52,10 @@ int is_big_kernel;  #define PECOFF_RELOC_RESERVE 0x20 +unsigned long efi_stub_entry; +unsigned long efi_pe_entry; +unsigned long startup_64; +  /*----------------------------------------------------------------------*/  static const u32 crctab32[] = { @@ -132,7 +136,7 @@ static void die(const char * str, ...)  static void usage(void)  { -	die("Usage: build setup system [> image]"); +	die("Usage: build setup system [zoffset.h] [> image]");  }  #ifdef CONFIG_EFI_STUB @@ -206,30 +210,54 @@ static void update_pecoff_text(unsigned int text_start, unsigned int file_sz)  	 */  	put_unaligned_le32(file_sz - 512, &buf[pe_header + 0x1c]); -#ifdef CONFIG_X86_32  	/* -	 * Address of entry point. -	 * -	 * The EFI stub entry point is +16 bytes from the start of -	 * the .text section. +	 * Address of entry point for PE/COFF executable  	 */ -	put_unaligned_le32(text_start + 16, &buf[pe_header + 0x28]); -#else -	/* -	 * Address of entry point. startup_32 is at the beginning and -	 * the 64-bit entry point (startup_64) is always 512 bytes -	 * after. The EFI stub entry point is 16 bytes after that, as -	 * the first instruction allows legacy loaders to jump over -	 * the EFI stub initialisation -	 */ -	put_unaligned_le32(text_start + 528, &buf[pe_header + 0x28]); -#endif /* CONFIG_X86_32 */ +	put_unaligned_le32(text_start + efi_pe_entry, &buf[pe_header + 0x28]);  	update_pecoff_section_header(".text", text_start, text_sz);  }  #endif /* CONFIG_EFI_STUB */ + +/* + * Parse zoffset.h and find the entry points. We could just #include zoffset.h + * but that would mean tools/build would have to be rebuilt every time. It's + * not as if parsing it is hard... + */ +#define PARSE_ZOFS(p, sym) do { \ +	if (!strncmp(p, "#define ZO_" #sym " ", 11+sizeof(#sym)))	\ +		sym = strtoul(p + 11 + sizeof(#sym), NULL, 16);		\ +} while (0) + +static void parse_zoffset(char *fname) +{ +	FILE *file; +	char *p; +	int c; + +	file = fopen(fname, "r"); +	if (!file) +		die("Unable to open `%s': %m", fname); +	c = fread(buf, 1, sizeof(buf) - 1, file); +	if (ferror(file)) +		die("read-error on `zoffset.h'"); +	buf[c] = 0; + +	p = (char *)buf; + +	while (p && *p) { +		PARSE_ZOFS(p, efi_stub_entry); +		PARSE_ZOFS(p, efi_pe_entry); +		PARSE_ZOFS(p, startup_64); + +		p = strchr(p, '\n'); +		while (p && (*p == '\r' || *p == '\n')) +			p++; +	} +} +  int main(int argc, char ** argv)  {  	unsigned int i, sz, setup_sectors; @@ -241,7 +269,19 @@ int main(int argc, char ** argv)  	void *kernel;  	u32 crc = 0xffffffffUL; -	if (argc != 3) +	/* Defaults for old kernel */ +#ifdef CONFIG_X86_32 +	efi_pe_entry = 0x10; +	efi_stub_entry = 0x30; +#else +	efi_pe_entry = 0x210; +	efi_stub_entry = 0x230; +	startup_64 = 0x200; +#endif + +	if (argc == 4) +		parse_zoffset(argv[3]); +	else if (argc != 3)  		usage();  	/* Copy the setup code */ @@ -299,6 +339,11 @@ int main(int argc, char ** argv)  #ifdef CONFIG_EFI_STUB  	update_pecoff_text(setup_sectors * 512, sz + i + ((sys_size * 16) - sz)); + +#ifdef CONFIG_X86_64 /* Yes, this is really how we defined it :( */ +	efi_stub_entry -= 0x200; +#endif +	put_unaligned_le32(efi_stub_entry, &buf[0x264]);  #endif  	crc = partial_crc32(buf, i, crc); diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S index 102ff7cb3e4..142c4ceff11 100644 --- a/arch/x86/ia32/ia32entry.S +++ b/arch/x86/ia32/ia32entry.S @@ -207,7 +207,7 @@ sysexit_from_sys_call:  	testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)  	jnz ia32_ret_from_sys_call  	TRACE_IRQS_ON -	sti +	ENABLE_INTERRUPTS(CLBR_NONE)  	movl %eax,%esi		/* second arg, syscall return value */  	cmpl $-MAX_ERRNO,%eax	/* is it an error ? */  	jbe 1f @@ -217,7 +217,7 @@ sysexit_from_sys_call:  	call __audit_syscall_exit  	movq RAX-ARGOFFSET(%rsp),%rax	/* reload syscall return value */  	movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi -	cli +	DISABLE_INTERRUPTS(CLBR_NONE)  	TRACE_IRQS_OFF  	testl %edi,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)  	jz \exit diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h index 6e8fdf5ad11..28677c55113 100644 --- a/arch/x86/include/asm/efi.h +++ b/arch/x86/include/asm/efi.h @@ -94,6 +94,7 @@ extern void __iomem *efi_ioremap(unsigned long addr, unsigned long size,  #endif /* CONFIG_X86_32 */  extern int add_efi_memmap; +extern unsigned long x86_efi_facility;  extern void efi_set_executable(efi_memory_desc_t *md, bool executable);  extern int efi_memblock_x86_reserve_range(void);  extern void efi_call_phys_prelog(void); diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h index ecdfee60ee4..f4076af1f4e 100644 --- a/arch/x86/include/asm/mce.h +++ b/arch/x86/include/asm/mce.h @@ -3,6 +3,90 @@  #include <uapi/asm/mce.h> +/* + * Machine Check support for x86 + */ + +/* MCG_CAP register defines */ +#define MCG_BANKCNT_MASK	0xff         /* Number of Banks */ +#define MCG_CTL_P		(1ULL<<8)    /* MCG_CTL register available */ +#define MCG_EXT_P		(1ULL<<9)    /* Extended registers available */ +#define MCG_CMCI_P		(1ULL<<10)   /* CMCI supported */ +#define MCG_EXT_CNT_MASK	0xff0000     /* Number of Extended registers */ +#define MCG_EXT_CNT_SHIFT	16 +#define MCG_EXT_CNT(c)		(((c) & MCG_EXT_CNT_MASK) >> MCG_EXT_CNT_SHIFT) +#define MCG_SER_P		(1ULL<<24)   /* MCA recovery/new status bits */ + +/* MCG_STATUS register defines */ +#define MCG_STATUS_RIPV  (1ULL<<0)   /* restart ip valid */ +#define MCG_STATUS_EIPV  (1ULL<<1)   /* ip points to correct instruction */ +#define MCG_STATUS_MCIP  (1ULL<<2)   /* machine check in progress */ + +/* MCi_STATUS register defines */ +#define MCI_STATUS_VAL   (1ULL<<63)  /* valid error */ +#define MCI_STATUS_OVER  (1ULL<<62)  /* previous errors lost */ +#define MCI_STATUS_UC    (1ULL<<61)  /* uncorrected error */ +#define MCI_STATUS_EN    (1ULL<<60)  /* error enabled */ +#define MCI_STATUS_MISCV (1ULL<<59)  /* misc error reg. valid */ +#define MCI_STATUS_ADDRV (1ULL<<58)  /* addr reg. valid */ +#define MCI_STATUS_PCC   (1ULL<<57)  /* processor context corrupt */ +#define MCI_STATUS_S	 (1ULL<<56)  /* Signaled machine check */ +#define MCI_STATUS_AR	 (1ULL<<55)  /* Action required */ +#define MCACOD		  0xffff     /* MCA Error Code */ + +/* Architecturally defined codes from SDM Vol. 3B Chapter 15 */ +#define MCACOD_SCRUB	0x00C0	/* 0xC0-0xCF Memory Scrubbing */ +#define MCACOD_SCRUBMSK	0xfff0 +#define MCACOD_L3WB	0x017A	/* L3 Explicit Writeback */ +#define MCACOD_DATA	0x0134	/* Data Load */ +#define MCACOD_INSTR	0x0150	/* Instruction Fetch */ + +/* MCi_MISC register defines */ +#define MCI_MISC_ADDR_LSB(m)	((m) & 0x3f) +#define MCI_MISC_ADDR_MODE(m)	(((m) >> 6) & 7) +#define  MCI_MISC_ADDR_SEGOFF	0	/* segment offset */ +#define  MCI_MISC_ADDR_LINEAR	1	/* linear address */ +#define  MCI_MISC_ADDR_PHYS	2	/* physical address */ +#define  MCI_MISC_ADDR_MEM	3	/* memory address */ +#define  MCI_MISC_ADDR_GENERIC	7	/* generic */ + +/* CTL2 register defines */ +#define MCI_CTL2_CMCI_EN		(1ULL << 30) +#define MCI_CTL2_CMCI_THRESHOLD_MASK	0x7fffULL + +#define MCJ_CTX_MASK		3 +#define MCJ_CTX(flags)		((flags) & MCJ_CTX_MASK) +#define MCJ_CTX_RANDOM		0    /* inject context: random */ +#define MCJ_CTX_PROCESS		0x1  /* inject context: process */ +#define MCJ_CTX_IRQ		0x2  /* inject context: IRQ */ +#define MCJ_NMI_BROADCAST	0x4  /* do NMI broadcasting */ +#define MCJ_EXCEPTION		0x8  /* raise as exception */ +#define MCJ_IRQ_BRAODCAST	0x10 /* do IRQ broadcasting */ + +#define MCE_OVERFLOW 0		/* bit 0 in flags means overflow */ + +/* Software defined banks */ +#define MCE_EXTENDED_BANK	128 +#define MCE_THERMAL_BANK	(MCE_EXTENDED_BANK + 0) +#define K8_MCE_THRESHOLD_BASE   (MCE_EXTENDED_BANK + 1) + +#define MCE_LOG_LEN 32 +#define MCE_LOG_SIGNATURE	"MACHINECHECK" + +/* + * This structure contains all data related to the MCE log.  Also + * carries a signature to make it easier to find from external + * debugging tools.  Each entry is only valid when its finished flag + * is set. + */ +struct mce_log { +	char signature[12]; /* "MACHINECHECK" */ +	unsigned len;	    /* = MCE_LOG_LEN */ +	unsigned next; +	unsigned flags; +	unsigned recordlen;	/* length of struct mce */ +	struct mce entry[MCE_LOG_LEN]; +};  struct mca_config {  	bool dont_log_ce; diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h index 5199db2923d..1c1a955e67c 100644 --- a/arch/x86/include/asm/pgtable.h +++ b/arch/x86/include/asm/pgtable.h @@ -142,6 +142,11 @@ static inline unsigned long pmd_pfn(pmd_t pmd)  	return (pmd_val(pmd) & PTE_PFN_MASK) >> PAGE_SHIFT;  } +static inline unsigned long pud_pfn(pud_t pud) +{ +	return (pud_val(pud) & PTE_PFN_MASK) >> PAGE_SHIFT; +} +  #define pte_page(pte)	pfn_to_page(pte_pfn(pte))  static inline int pmd_large(pmd_t pte) diff --git a/arch/x86/include/asm/uv/uv.h b/arch/x86/include/asm/uv/uv.h index b47c2a82ff1..062921ef34e 100644 --- a/arch/x86/include/asm/uv/uv.h +++ b/arch/x86/include/asm/uv/uv.h @@ -16,7 +16,7 @@ extern void uv_system_init(void);  extern const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,  						 struct mm_struct *mm,  						 unsigned long start, -						 unsigned end, +						 unsigned long end,  						 unsigned int cpu);  #else	/* X86_UV */ diff --git a/arch/x86/include/uapi/asm/bootparam.h b/arch/x86/include/uapi/asm/bootparam.h index 92862cd9020..c15ddaf9071 100644 --- a/arch/x86/include/uapi/asm/bootparam.h +++ b/arch/x86/include/uapi/asm/bootparam.h @@ -1,6 +1,31 @@  #ifndef _ASM_X86_BOOTPARAM_H  #define _ASM_X86_BOOTPARAM_H +/* setup_data types */ +#define SETUP_NONE			0 +#define SETUP_E820_EXT			1 +#define SETUP_DTB			2 +#define SETUP_PCI			3 + +/* ram_size flags */ +#define RAMDISK_IMAGE_START_MASK	0x07FF +#define RAMDISK_PROMPT_FLAG		0x8000 +#define RAMDISK_LOAD_FLAG		0x4000 + +/* loadflags */ +#define LOADED_HIGH	(1<<0) +#define QUIET_FLAG	(1<<5) +#define KEEP_SEGMENTS	(1<<6) +#define CAN_USE_HEAP	(1<<7) + +/* xloadflags */ +#define XLF_KERNEL_64			(1<<0) +#define XLF_CAN_BE_LOADED_ABOVE_4G	(1<<1) +#define XLF_EFI_HANDOVER_32		(1<<2) +#define XLF_EFI_HANDOVER_64		(1<<3) + +#ifndef __ASSEMBLY__ +  #include <linux/types.h>  #include <linux/screen_info.h>  #include <linux/apm_bios.h> @@ -9,12 +34,6 @@  #include <asm/ist.h>  #include <video/edid.h> -/* setup data types */ -#define SETUP_NONE			0 -#define SETUP_E820_EXT			1 -#define SETUP_DTB			2 -#define SETUP_PCI			3 -  /* extensible setup data list node */  struct setup_data {  	__u64 next; @@ -28,9 +47,6 @@ struct setup_header {  	__u16	root_flags;  	__u32	syssize;  	__u16	ram_size; -#define RAMDISK_IMAGE_START_MASK	0x07FF -#define RAMDISK_PROMPT_FLAG		0x8000 -#define RAMDISK_LOAD_FLAG		0x4000  	__u16	vid_mode;  	__u16	root_dev;  	__u16	boot_flag; @@ -42,10 +58,6 @@ struct setup_header {  	__u16	kernel_version;  	__u8	type_of_loader;  	__u8	loadflags; -#define LOADED_HIGH	(1<<0) -#define QUIET_FLAG	(1<<5) -#define KEEP_SEGMENTS	(1<<6) -#define CAN_USE_HEAP	(1<<7)  	__u16	setup_move_size;  	__u32	code32_start;  	__u32	ramdisk_image; @@ -58,7 +70,8 @@ struct setup_header {  	__u32	initrd_addr_max;  	__u32	kernel_alignment;  	__u8	relocatable_kernel; -	__u8	_pad2[3]; +	__u8	min_alignment; +	__u16	xloadflags;  	__u32	cmdline_size;  	__u32	hardware_subarch;  	__u64	hardware_subarch_data; @@ -106,7 +119,10 @@ struct boot_params {  	__u8  hd1_info[16];	/* obsolete! */		/* 0x090 */  	struct sys_desc_table sys_desc_table;		/* 0x0a0 */  	struct olpc_ofw_header olpc_ofw_header;		/* 0x0b0 */ -	__u8  _pad4[128];				/* 0x0c0 */ +	__u32 ext_ramdisk_image;			/* 0x0c0 */ +	__u32 ext_ramdisk_size;				/* 0x0c4 */ +	__u32 ext_cmd_line_ptr;				/* 0x0c8 */ +	__u8  _pad4[116];				/* 0x0cc */  	struct edid_info edid_info;			/* 0x140 */  	struct efi_info efi_info;			/* 0x1c0 */  	__u32 alt_mem_k;				/* 0x1e0 */ @@ -115,7 +131,20 @@ struct boot_params {  	__u8  eddbuf_entries;				/* 0x1e9 */  	__u8  edd_mbr_sig_buf_entries;			/* 0x1ea */  	__u8  kbd_status;				/* 0x1eb */ -	__u8  _pad6[5];					/* 0x1ec */ +	__u8  _pad5[3];					/* 0x1ec */ +	/* +	 * The sentinel is set to a nonzero value (0xff) in header.S. +	 * +	 * A bootloader is supposed to only take setup_header and put +	 * it into a clean boot_params buffer. If it turns out that +	 * it is clumsy or too generous with the buffer, it most +	 * probably will pick up the sentinel variable too. The fact +	 * that this variable then is still 0xff will let kernel +	 * know that some variables in boot_params are invalid and +	 * kernel should zero out certain portions of boot_params. +	 */ +	__u8  sentinel;					/* 0x1ef */ +	__u8  _pad6[1];					/* 0x1f0 */  	struct setup_header hdr;    /* setup header */	/* 0x1f1 */  	__u8  _pad7[0x290-0x1f1-sizeof(struct setup_header)];  	__u32 edd_mbr_sig_buffer[EDD_MBR_SIG_MAX];	/* 0x290 */ @@ -134,6 +163,6 @@ enum {  	X86_NR_SUBARCHS,  }; - +#endif /* __ASSEMBLY__ */  #endif /* _ASM_X86_BOOTPARAM_H */ diff --git a/arch/x86/include/uapi/asm/mce.h b/arch/x86/include/uapi/asm/mce.h index 58c829871c3..a0eab85ce7b 100644 --- a/arch/x86/include/uapi/asm/mce.h +++ b/arch/x86/include/uapi/asm/mce.h @@ -4,66 +4,6 @@  #include <linux/types.h>  #include <asm/ioctls.h> -/* - * Machine Check support for x86 - */ - -/* MCG_CAP register defines */ -#define MCG_BANKCNT_MASK	0xff         /* Number of Banks */ -#define MCG_CTL_P		(1ULL<<8)    /* MCG_CTL register available */ -#define MCG_EXT_P		(1ULL<<9)    /* Extended registers available */ -#define MCG_CMCI_P		(1ULL<<10)   /* CMCI supported */ -#define MCG_EXT_CNT_MASK	0xff0000     /* Number of Extended registers */ -#define MCG_EXT_CNT_SHIFT	16 -#define MCG_EXT_CNT(c)		(((c) & MCG_EXT_CNT_MASK) >> MCG_EXT_CNT_SHIFT) -#define MCG_SER_P	 	(1ULL<<24)   /* MCA recovery/new status bits */ - -/* MCG_STATUS register defines */ -#define MCG_STATUS_RIPV  (1ULL<<0)   /* restart ip valid */ -#define MCG_STATUS_EIPV  (1ULL<<1)   /* ip points to correct instruction */ -#define MCG_STATUS_MCIP  (1ULL<<2)   /* machine check in progress */ - -/* MCi_STATUS register defines */ -#define MCI_STATUS_VAL   (1ULL<<63)  /* valid error */ -#define MCI_STATUS_OVER  (1ULL<<62)  /* previous errors lost */ -#define MCI_STATUS_UC    (1ULL<<61)  /* uncorrected error */ -#define MCI_STATUS_EN    (1ULL<<60)  /* error enabled */ -#define MCI_STATUS_MISCV (1ULL<<59)  /* misc error reg. valid */ -#define MCI_STATUS_ADDRV (1ULL<<58)  /* addr reg. valid */ -#define MCI_STATUS_PCC   (1ULL<<57)  /* processor context corrupt */ -#define MCI_STATUS_S	 (1ULL<<56)  /* Signaled machine check */ -#define MCI_STATUS_AR	 (1ULL<<55)  /* Action required */ -#define MCACOD		  0xffff     /* MCA Error Code */ - -/* Architecturally defined codes from SDM Vol. 3B Chapter 15 */ -#define MCACOD_SCRUB	0x00C0	/* 0xC0-0xCF Memory Scrubbing */ -#define MCACOD_SCRUBMSK	0xfff0 -#define MCACOD_L3WB	0x017A	/* L3 Explicit Writeback */ -#define MCACOD_DATA	0x0134	/* Data Load */ -#define MCACOD_INSTR	0x0150	/* Instruction Fetch */ - -/* MCi_MISC register defines */ -#define MCI_MISC_ADDR_LSB(m)	((m) & 0x3f) -#define MCI_MISC_ADDR_MODE(m)	(((m) >> 6) & 7) -#define  MCI_MISC_ADDR_SEGOFF	0	/* segment offset */ -#define  MCI_MISC_ADDR_LINEAR	1	/* linear address */ -#define  MCI_MISC_ADDR_PHYS	2	/* physical address */ -#define  MCI_MISC_ADDR_MEM	3	/* memory address */ -#define  MCI_MISC_ADDR_GENERIC	7	/* generic */ - -/* CTL2 register defines */ -#define MCI_CTL2_CMCI_EN		(1ULL << 30) -#define MCI_CTL2_CMCI_THRESHOLD_MASK	0x7fffULL - -#define MCJ_CTX_MASK		3 -#define MCJ_CTX(flags)		((flags) & MCJ_CTX_MASK) -#define MCJ_CTX_RANDOM		0    /* inject context: random */ -#define MCJ_CTX_PROCESS		0x1  /* inject context: process */ -#define MCJ_CTX_IRQ		0x2  /* inject context: IRQ */ -#define MCJ_NMI_BROADCAST	0x4  /* do NMI broadcasting */ -#define MCJ_EXCEPTION		0x8  /* raise as exception */ -#define MCJ_IRQ_BRAODCAST	0x10 /* do IRQ broadcasting */ -  /* Fields are zero when not available */  struct mce {  	__u64 status; @@ -87,35 +27,8 @@ struct mce {  	__u64 mcgcap;	/* MCGCAP MSR: machine check capabilities of CPU */  }; -/* - * This structure contains all data related to the MCE log.  Also - * carries a signature to make it easier to find from external - * debugging tools.  Each entry is only valid when its finished flag - * is set. - */ - -#define MCE_LOG_LEN 32 - -struct mce_log { -	char signature[12]; /* "MACHINECHECK" */ -	unsigned len;	    /* = MCE_LOG_LEN */ -	unsigned next; -	unsigned flags; -	unsigned recordlen;	/* length of struct mce */ -	struct mce entry[MCE_LOG_LEN]; -}; - -#define MCE_OVERFLOW 0		/* bit 0 in flags means overflow */ - -#define MCE_LOG_SIGNATURE	"MACHINECHECK" -  #define MCE_GET_RECORD_LEN   _IOR('M', 1, int)  #define MCE_GET_LOG_LEN      _IOR('M', 2, int)  #define MCE_GETCLEAR_FLAGS   _IOR('M', 3, int) -/* Software defined banks */ -#define MCE_EXTENDED_BANK	128 -#define MCE_THERMAL_BANK	MCE_EXTENDED_BANK + 0 -#define K8_MCE_THRESHOLD_BASE      (MCE_EXTENDED_BANK + 1) -  #endif /* _UAPI_ASM_X86_MCE_H */ diff --git a/arch/x86/kernel/apic/x2apic_phys.c b/arch/x86/kernel/apic/x2apic_phys.c index e03a1e180e8..562a76d433c 100644 --- a/arch/x86/kernel/apic/x2apic_phys.c +++ b/arch/x86/kernel/apic/x2apic_phys.c @@ -20,18 +20,19 @@ static int set_x2apic_phys_mode(char *arg)  }  early_param("x2apic_phys", set_x2apic_phys_mode); -static int x2apic_acpi_madt_oem_check(char *oem_id, char *oem_table_id) +static bool x2apic_fadt_phys(void)  { -	if (x2apic_phys) -		return x2apic_enabled(); -	else if ((acpi_gbl_FADT.header.revision >= FADT2_REVISION_ID) && -		(acpi_gbl_FADT.flags & ACPI_FADT_APIC_PHYSICAL) && -		x2apic_enabled()) { +	if ((acpi_gbl_FADT.header.revision >= FADT2_REVISION_ID) && +		(acpi_gbl_FADT.flags & ACPI_FADT_APIC_PHYSICAL)) {  		printk(KERN_DEBUG "System requires x2apic physical mode\n"); -		return 1; +		return true;  	} -	else -		return 0; +	return false; +} + +static int x2apic_acpi_madt_oem_check(char *oem_id, char *oem_table_id) +{ +	return x2apic_enabled() && (x2apic_phys || x2apic_fadt_phys());  }  static void @@ -82,7 +83,7 @@ static void init_x2apic_ldr(void)  static int x2apic_phys_probe(void)  { -	if (x2apic_mode && x2apic_phys) +	if (x2apic_mode && (x2apic_phys || x2apic_fadt_phys()))  		return 1;  	return apic == &apic_x2apic_phys; diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c index fe9edec6698..84c1309c4c0 100644 --- a/arch/x86/kernel/cpu/intel_cacheinfo.c +++ b/arch/x86/kernel/cpu/intel_cacheinfo.c @@ -298,8 +298,7 @@ struct _cache_attr {  			 unsigned int);  }; -#ifdef CONFIG_AMD_NB - +#if defined(CONFIG_AMD_NB) && defined(CONFIG_SYSFS)  /*   * L3 cache descriptors   */ @@ -524,9 +523,9 @@ store_subcaches(struct _cpuid4_info *this_leaf, const char *buf, size_t count,  static struct _cache_attr subcaches =  	__ATTR(subcaches, 0644, show_subcaches, store_subcaches); -#else	/* CONFIG_AMD_NB */ +#else  #define amd_init_l3_cache(x, y) -#endif /* CONFIG_AMD_NB */ +#endif  /* CONFIG_AMD_NB && CONFIG_SYSFS */  static int  __cpuinit cpuid4_cache_lookup_regs(int index, diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index 4428fd178bc..6774c17a557 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c @@ -340,9 +340,6 @@ int x86_setup_perfctr(struct perf_event *event)  		/* BTS is currently only allowed for user-mode. */  		if (!attr->exclude_kernel)  			return -EOPNOTSUPP; - -		if (!attr->exclude_guest) -			return -EOPNOTSUPP;  	}  	hwc->config |= config; @@ -385,9 +382,6 @@ int x86_pmu_hw_config(struct perf_event *event)  	if (event->attr.precise_ip) {  		int precise = 0; -		if (!event->attr.exclude_guest) -			return -EOPNOTSUPP; -  		/* Support for constant skid */  		if (x86_pmu.pebs_active && !x86_pmu.pebs_broken) {  			precise++; diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c index 93b9e1181f8..4914e94ad6e 100644 --- a/arch/x86/kernel/cpu/perf_event_intel.c +++ b/arch/x86/kernel/cpu/perf_event_intel.c @@ -2019,7 +2019,10 @@ __init int intel_pmu_init(void)  		break;  	case 28: /* Atom */ -	case 54: /* Cedariew */ +	case 38: /* Lincroft */ +	case 39: /* Penwell */ +	case 53: /* Cloverview */ +	case 54: /* Cedarview */  		memcpy(hw_cache_event_ids, atom_hw_cache_event_ids,  		       sizeof(hw_cache_event_ids)); @@ -2084,6 +2087,7 @@ __init int intel_pmu_init(void)  		pr_cont("SandyBridge events, ");  		break;  	case 58: /* IvyBridge */ +	case 62: /* IvyBridge EP */  		memcpy(hw_cache_event_ids, snb_hw_cache_event_ids,  		       sizeof(hw_cache_event_ids));  		memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs, diff --git a/arch/x86/kernel/cpu/perf_event_p6.c b/arch/x86/kernel/cpu/perf_event_p6.c index f2af39f5dc3..4820c232a0b 100644 --- a/arch/x86/kernel/cpu/perf_event_p6.c +++ b/arch/x86/kernel/cpu/perf_event_p6.c @@ -19,7 +19,7 @@ static const u64 p6_perfmon_event_map[] =  }; -static __initconst u64 p6_hw_cache_event_ids +static u64 p6_hw_cache_event_ids  				[PERF_COUNT_HW_CACHE_MAX]  				[PERF_COUNT_HW_CACHE_OP_MAX]  				[PERF_COUNT_HW_CACHE_RESULT_MAX] = diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S index ff84d5469d7..6ed91d9980e 100644 --- a/arch/x86/kernel/entry_32.S +++ b/arch/x86/kernel/entry_32.S @@ -1065,7 +1065,6 @@ ENTRY(xen_failsafe_callback)  	lea 16(%esp),%esp  	CFI_ADJUST_CFA_OFFSET -16  	jz 5f -	addl $16,%esp  	jmp iret_exc  5:	pushl_cfi $-1 /* orig_ax = -1 => not a system call */  	SAVE_ALL diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S index 07a7a04529b..cb3c591339a 100644 --- a/arch/x86/kernel/entry_64.S +++ b/arch/x86/kernel/entry_64.S @@ -1781,6 +1781,7 @@ first_nmi:  	 * Leave room for the "copied" frame  	 */  	subq $(5*8), %rsp +	CFI_ADJUST_CFA_OFFSET 5*8  	/* Copy the stack frame to the Saved frame */  	.rept 5 @@ -1863,10 +1864,8 @@ end_repeat_nmi:  nmi_swapgs:  	SWAPGS_UNSAFE_STACK  nmi_restore: -	RESTORE_ALL 8 - -	/* Pop the extra iret frame */ -	addq $(5*8), %rsp +	/* Pop the extra iret frame at once */ +	RESTORE_ALL 6*8  	/* Clear the NMI executing stack variable */  	movq $0, 5*8(%rsp) diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S index 8e7f6556028..c8932c79e78 100644 --- a/arch/x86/kernel/head_32.S +++ b/arch/x86/kernel/head_32.S @@ -300,6 +300,12 @@ ENTRY(startup_32_smp)  	leal -__PAGE_OFFSET(%ecx),%esp  default_entry: +#define CR0_STATE	(X86_CR0_PE | X86_CR0_MP | X86_CR0_ET | \ +			 X86_CR0_NE | X86_CR0_WP | X86_CR0_AM | \ +			 X86_CR0_PG) +	movl $(CR0_STATE & ~X86_CR0_PG),%eax +	movl %eax,%cr0 +  /*   *	New page tables may be in 4Mbyte page mode and may   *	be using the global pages.  @@ -364,8 +370,7 @@ default_entry:   */  	movl $pa(initial_page_table), %eax  	movl %eax,%cr3		/* set the page table pointer.. */ -	movl %cr0,%eax -	orl  $X86_CR0_PG,%eax +	movl $CR0_STATE,%eax  	movl %eax,%cr0		/* ..and set paging (PG) bit */  	ljmp $__BOOT_CS,$1f	/* Clear prefetch and normalize %eip */  1: diff --git a/arch/x86/kernel/msr.c b/arch/x86/kernel/msr.c index a7c5661f849..4929502c137 100644 --- a/arch/x86/kernel/msr.c +++ b/arch/x86/kernel/msr.c @@ -174,6 +174,9 @@ static int msr_open(struct inode *inode, struct file *file)  	unsigned int cpu;  	struct cpuinfo_x86 *c; +	if (!capable(CAP_SYS_RAWIO)) +		return -EPERM; +  	cpu = iminor(file->f_path.dentry->d_inode);  	if (cpu >= nr_cpu_ids || !cpu_online(cpu))  		return -ENXIO;	/* No such CPU */ diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c index 0f5dec5c80e..872079a67e4 100644 --- a/arch/x86/kernel/pci-dma.c +++ b/arch/x86/kernel/pci-dma.c @@ -56,7 +56,7 @@ struct device x86_dma_fallback_dev = {  EXPORT_SYMBOL(x86_dma_fallback_dev);  /* Number of entries preallocated for DMA-API debugging */ -#define PREALLOC_DMA_DEBUG_ENTRIES       32768 +#define PREALLOC_DMA_DEBUG_ENTRIES       65536  int dma_set_mask(struct device *dev, u64 mask)  { diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c index 4e8ba39eaf0..76fa1e9a2b3 100644 --- a/arch/x86/kernel/reboot.c +++ b/arch/x86/kernel/reboot.c @@ -584,7 +584,7 @@ static void native_machine_emergency_restart(void)  			break;  		case BOOT_EFI: -			if (efi_enabled) +			if (efi_enabled(EFI_RUNTIME_SERVICES))  				efi.reset_system(reboot_mode ?  						 EFI_RESET_WARM :  						 EFI_RESET_COLD, diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index 00f6c1472b8..8b24289cc10 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -807,15 +807,15 @@ void __init setup_arch(char **cmdline_p)  #ifdef CONFIG_EFI  	if (!strncmp((char *)&boot_params.efi_info.efi_loader_signature,  		     "EL32", 4)) { -		efi_enabled = 1; -		efi_64bit = false; +		set_bit(EFI_BOOT, &x86_efi_facility);  	} else if (!strncmp((char *)&boot_params.efi_info.efi_loader_signature,  		     "EL64", 4)) { -		efi_enabled = 1; -		efi_64bit = true; +		set_bit(EFI_BOOT, &x86_efi_facility); +		set_bit(EFI_64BIT, &x86_efi_facility);  	} -	if (efi_enabled && efi_memblock_x86_reserve_range()) -		efi_enabled = 0; + +	if (efi_enabled(EFI_BOOT)) +		efi_memblock_x86_reserve_range();  #endif  	x86_init.oem.arch_setup(); @@ -888,7 +888,7 @@ void __init setup_arch(char **cmdline_p)  	finish_e820_parsing(); -	if (efi_enabled) +	if (efi_enabled(EFI_BOOT))  		efi_init();  	dmi_scan_machine(); @@ -971,7 +971,7 @@ void __init setup_arch(char **cmdline_p)  	 * The EFI specification says that boot service code won't be called  	 * after ExitBootServices(). This is, in fact, a lie.  	 */ -	if (efi_enabled) +	if (efi_enabled(EFI_MEMMAP))  		efi_reserve_boot_services();  	/* preallocate 4k for mptable mpc */ @@ -1114,7 +1114,7 @@ void __init setup_arch(char **cmdline_p)  #ifdef CONFIG_VT  #if defined(CONFIG_VGA_CONSOLE) -	if (!efi_enabled || (efi_mem_type(0xa0000) != EFI_CONVENTIONAL_MEMORY)) +	if (!efi_enabled(EFI_BOOT) || (efi_mem_type(0xa0000) != EFI_CONVENTIONAL_MEMORY))  		conswitchp = &vga_con;  #elif defined(CONFIG_DUMMY_CONSOLE)  	conswitchp = &dummy_con; @@ -1131,14 +1131,14 @@ void __init setup_arch(char **cmdline_p)  	register_refined_jiffies(CLOCK_TICK_RATE);  #ifdef CONFIG_EFI -	/* Once setup is done above, disable efi_enabled on mismatched -	 * firmware/kernel archtectures since there is no support for -	 * runtime services. +	/* Once setup is done above, unmap the EFI memory map on +	 * mismatched firmware/kernel archtectures since there is no +	 * support for runtime services.  	 */ -	if (efi_enabled && IS_ENABLED(CONFIG_X86_64) != efi_64bit) { +	if (efi_enabled(EFI_BOOT) && +	    IS_ENABLED(CONFIG_X86_64) != efi_enabled(EFI_64BIT)) {  		pr_info("efi: Setup done, disabling due to 32/64-bit mismatch\n");  		efi_unmap_memmap(); -		efi_enabled = 0;  	}  #endif  } diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c index cd3b2438a98..9b4d51d0c0d 100644 --- a/arch/x86/kernel/step.c +++ b/arch/x86/kernel/step.c @@ -165,10 +165,11 @@ void set_task_blockstep(struct task_struct *task, bool on)  	 * Ensure irq/preemption can't change debugctl in between.  	 * Note also that both TIF_BLOCKSTEP and debugctl should  	 * be changed atomically wrt preemption. -	 * FIXME: this means that set/clear TIF_BLOCKSTEP is simply -	 * wrong if task != current, SIGKILL can wakeup the stopped -	 * tracee and set/clear can play with the running task, this -	 * can confuse the next __switch_to_xtra(). +	 * +	 * NOTE: this means that set/clear TIF_BLOCKSTEP is only safe if +	 * task is current or it can't be running, otherwise we can race +	 * with __switch_to_xtra(). We rely on ptrace_freeze_traced() but +	 * PTRACE_KILL is not safe.  	 */  	local_irq_disable();  	debugctl = get_debugctlmsr(); diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index 027088f2f7d..fb674fd3fc2 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -748,13 +748,15 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,  				return;  		}  #endif +		/* Kernel addresses are always protection faults: */ +		if (address >= TASK_SIZE) +			error_code |= PF_PROT; -		if (unlikely(show_unhandled_signals)) +		if (likely(show_unhandled_signals))  			show_signal_msg(regs, error_code, address, tsk); -		/* Kernel addresses are always protection faults: */  		tsk->thread.cr2		= address; -		tsk->thread.error_code	= error_code | (address >= TASK_SIZE); +		tsk->thread.error_code	= error_code;  		tsk->thread.trap_nr	= X86_TRAP_PF;  		force_sig_info_fault(SIGSEGV, si_code, address, tsk, 0); diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index 2ead3c8a4c8..75c9a6a5969 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c @@ -831,6 +831,9 @@ int kern_addr_valid(unsigned long addr)  	if (pud_none(*pud))  		return 0; +	if (pud_large(*pud)) +		return pfn_valid(pud_pfn(*pud)); +  	pmd = pmd_offset(pud, addr);  	if (pmd_none(*pmd))  		return 0; diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c index ad4439145f8..928bf837040 100644 --- a/arch/x86/platform/efi/efi.c +++ b/arch/x86/platform/efi/efi.c @@ -51,9 +51,6 @@  #define EFI_DEBUG	1 -int efi_enabled; -EXPORT_SYMBOL(efi_enabled); -  struct efi __read_mostly efi = {  	.mps        = EFI_INVALID_TABLE_ADDR,  	.acpi       = EFI_INVALID_TABLE_ADDR, @@ -69,19 +66,28 @@ EXPORT_SYMBOL(efi);  struct efi_memory_map memmap; -bool efi_64bit; -  static struct efi efi_phys __initdata;  static efi_system_table_t efi_systab __initdata;  static inline bool efi_is_native(void)  { -	return IS_ENABLED(CONFIG_X86_64) == efi_64bit; +	return IS_ENABLED(CONFIG_X86_64) == efi_enabled(EFI_64BIT); +} + +unsigned long x86_efi_facility; + +/* + * Returns 1 if 'facility' is enabled, 0 otherwise. + */ +int efi_enabled(int facility) +{ +	return test_bit(facility, &x86_efi_facility) != 0;  } +EXPORT_SYMBOL(efi_enabled);  static int __init setup_noefi(char *arg)  { -	efi_enabled = 0; +	clear_bit(EFI_RUNTIME_SERVICES, &x86_efi_facility);  	return 0;  }  early_param("noefi", setup_noefi); @@ -426,6 +432,7 @@ void __init efi_reserve_boot_services(void)  void __init efi_unmap_memmap(void)  { +	clear_bit(EFI_MEMMAP, &x86_efi_facility);  	if (memmap.map) {  		early_iounmap(memmap.map, memmap.nr_map * memmap.desc_size);  		memmap.map = NULL; @@ -460,7 +467,7 @@ void __init efi_free_boot_services(void)  static int __init efi_systab_init(void *phys)  { -	if (efi_64bit) { +	if (efi_enabled(EFI_64BIT)) {  		efi_system_table_64_t *systab64;  		u64 tmp = 0; @@ -552,7 +559,7 @@ static int __init efi_config_init(u64 tables, int nr_tables)  	void *config_tables, *tablep;  	int i, sz; -	if (efi_64bit) +	if (efi_enabled(EFI_64BIT))  		sz = sizeof(efi_config_table_64_t);  	else  		sz = sizeof(efi_config_table_32_t); @@ -572,7 +579,7 @@ static int __init efi_config_init(u64 tables, int nr_tables)  		efi_guid_t guid;  		unsigned long table; -		if (efi_64bit) { +		if (efi_enabled(EFI_64BIT)) {  			u64 table64;  			guid = ((efi_config_table_64_t *)tablep)->guid;  			table64 = ((efi_config_table_64_t *)tablep)->table; @@ -684,7 +691,6 @@ void __init efi_init(void)  	if (boot_params.efi_info.efi_systab_hi ||  	    boot_params.efi_info.efi_memmap_hi) {  		pr_info("Table located above 4GB, disabling EFI.\n"); -		efi_enabled = 0;  		return;  	}  	efi_phys.systab = (efi_system_table_t *)boot_params.efi_info.efi_systab; @@ -694,10 +700,10 @@ void __init efi_init(void)  			  ((__u64)boot_params.efi_info.efi_systab_hi<<32));  #endif -	if (efi_systab_init(efi_phys.systab)) { -		efi_enabled = 0; +	if (efi_systab_init(efi_phys.systab))  		return; -	} + +	set_bit(EFI_SYSTEM_TABLES, &x86_efi_facility);  	/*  	 * Show what we know for posterity @@ -715,10 +721,10 @@ void __init efi_init(void)  		efi.systab->hdr.revision >> 16,  		efi.systab->hdr.revision & 0xffff, vendor); -	if (efi_config_init(efi.systab->tables, efi.systab->nr_tables)) { -		efi_enabled = 0; +	if (efi_config_init(efi.systab->tables, efi.systab->nr_tables))  		return; -	} + +	set_bit(EFI_CONFIG_TABLES, &x86_efi_facility);  	/*  	 * Note: We currently don't support runtime services on an EFI @@ -727,15 +733,17 @@ void __init efi_init(void)  	if (!efi_is_native())  		pr_info("No EFI runtime due to 32/64-bit mismatch with kernel\n"); -	else if (efi_runtime_init()) { -		efi_enabled = 0; -		return; +	else { +		if (efi_runtime_init()) +			return; +		set_bit(EFI_RUNTIME_SERVICES, &x86_efi_facility);  	} -	if (efi_memmap_init()) { -		efi_enabled = 0; +	if (efi_memmap_init())  		return; -	} + +	set_bit(EFI_MEMMAP, &x86_efi_facility); +  #ifdef CONFIG_X86_32  	if (efi_is_native()) {  		x86_platform.get_wallclock = efi_get_time; @@ -941,7 +949,7 @@ void __init efi_enter_virtual_mode(void)  	 *  	 * Call EFI services through wrapper functions.  	 */ -	efi.runtime_version = efi_systab.fw_revision; +	efi.runtime_version = efi_systab.hdr.revision;  	efi.get_time = virt_efi_get_time;  	efi.set_time = virt_efi_set_time;  	efi.get_wakeup_time = virt_efi_get_wakeup_time; @@ -969,6 +977,9 @@ u32 efi_mem_type(unsigned long phys_addr)  	efi_memory_desc_t *md;  	void *p; +	if (!efi_enabled(EFI_MEMMAP)) +		return 0; +  	for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {  		md = p;  		if ((md->phys_addr <= phys_addr) && diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c index 95fd505dfeb..2b200386061 100644 --- a/arch/x86/platform/efi/efi_64.c +++ b/arch/x86/platform/efi/efi_64.c @@ -38,7 +38,7 @@  #include <asm/cacheflush.h>  #include <asm/fixmap.h> -static pgd_t save_pgd __initdata; +static pgd_t *save_pgd __initdata;  static unsigned long efi_flags __initdata;  static void __init early_code_mapping_set_exec(int executable) @@ -61,12 +61,20 @@ static void __init early_code_mapping_set_exec(int executable)  void __init efi_call_phys_prelog(void)  {  	unsigned long vaddress; +	int pgd; +	int n_pgds;  	early_code_mapping_set_exec(1);  	local_irq_save(efi_flags); -	vaddress = (unsigned long)__va(0x0UL); -	save_pgd = *pgd_offset_k(0x0UL); -	set_pgd(pgd_offset_k(0x0UL), *pgd_offset_k(vaddress)); + +	n_pgds = DIV_ROUND_UP((max_pfn << PAGE_SHIFT), PGDIR_SIZE); +	save_pgd = kmalloc(n_pgds * sizeof(pgd_t), GFP_KERNEL); + +	for (pgd = 0; pgd < n_pgds; pgd++) { +		save_pgd[pgd] = *pgd_offset_k(pgd * PGDIR_SIZE); +		vaddress = (unsigned long)__va(pgd * PGDIR_SIZE); +		set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), *pgd_offset_k(vaddress)); +	}  	__flush_tlb_all();  } @@ -75,7 +83,11 @@ void __init efi_call_phys_epilog(void)  	/*  	 * After the lock is released, the original page table is restored.  	 */ -	set_pgd(pgd_offset_k(0x0UL), save_pgd); +	int pgd; +	int n_pgds = DIV_ROUND_UP((max_pfn << PAGE_SHIFT) , PGDIR_SIZE); +	for (pgd = 0; pgd < n_pgds; pgd++) +		set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), save_pgd[pgd]); +	kfree(save_pgd);  	__flush_tlb_all();  	local_irq_restore(efi_flags);  	early_code_mapping_set_exec(0); diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c index b8b3a37c80c..dbbdca5f508 100644 --- a/arch/x86/platform/uv/tlb_uv.c +++ b/arch/x86/platform/uv/tlb_uv.c @@ -1034,7 +1034,8 @@ static int set_distrib_bits(struct cpumask *flush_mask, struct bau_control *bcp,   * globally purge translation cache of a virtual address or all TLB's   * @cpumask: mask of all cpu's in which the address is to be removed   * @mm: mm_struct containing virtual address range - * @va: virtual address to be removed (or TLB_FLUSH_ALL for all TLB's on cpu) + * @start: start virtual address to be removed from TLB + * @end: end virtual address to be remove from TLB   * @cpu: the current cpu   *   * This is the entry point for initiating any UV global TLB shootdown. @@ -1056,7 +1057,7 @@ static int set_distrib_bits(struct cpumask *flush_mask, struct bau_control *bcp,   */  const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,  				struct mm_struct *mm, unsigned long start, -				unsigned end, unsigned int cpu) +				unsigned long end, unsigned int cpu)  {  	int locals = 0;  	int remotes = 0; @@ -1113,7 +1114,10 @@ const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,  	record_send_statistics(stat, locals, hubs, remotes, bau_desc); -	bau_desc->payload.address = start; +	if (!end || (end - start) <= PAGE_SIZE) +		bau_desc->payload.address = start; +	else +		bau_desc->payload.address = TLB_FLUSH_ALL;  	bau_desc->payload.sending_cpu = cpu;  	/*  	 * uv_flush_send_and_wait returns 0 if all cpu's were messaged, diff --git a/arch/x86/tools/insn_sanity.c b/arch/x86/tools/insn_sanity.c index cc2f8c13128..872eb60e780 100644 --- a/arch/x86/tools/insn_sanity.c +++ b/arch/x86/tools/insn_sanity.c @@ -55,7 +55,7 @@ static FILE		*input_file;	/* Input file name */  static void usage(const char *err)  {  	if (err) -		fprintf(stderr, "Error: %s\n\n", err); +		fprintf(stderr, "%s: Error: %s\n\n", prog, err);  	fprintf(stderr, "Usage: %s [-y|-n|-v] [-s seed[,no]] [-m max] [-i input]\n", prog);  	fprintf(stderr, "\t-y	64bit mode\n");  	fprintf(stderr, "\t-n	32bit mode\n"); @@ -269,7 +269,13 @@ int main(int argc, char **argv)  		insns++;  	} -	fprintf(stdout, "%s: decoded and checked %d %s instructions with %d errors (seed:0x%x)\n", (errors) ? "Failure" : "Success", insns, (input_file) ? "given" : "random", errors, seed); +	fprintf(stdout, "%s: %s: decoded and checked %d %s instructions with %d errors (seed:0x%x)\n", +		prog, +		(errors) ? "Failure" : "Success", +		insns, +		(input_file) ? "given" : "random", +		errors, +		seed);  	return errors ? 1 : 0;  } diff --git a/arch/x86/tools/relocs.c b/arch/x86/tools/relocs.c index 5a1847d6193..79d67bd507f 100644 --- a/arch/x86/tools/relocs.c +++ b/arch/x86/tools/relocs.c @@ -814,12 +814,14 @@ int main(int argc, char **argv)  	read_relocs(fp);  	if (show_absolute_syms) {  		print_absolute_symbols(); -		return 0; +		goto out;  	}  	if (show_absolute_relocs) {  		print_absolute_relocs(); -		return 0; +		goto out;  	}  	emit_relocs(as_text, use_real_mode); +out: +	fclose(fp);  	return 0;  } diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index 138e5667409..e0140923062 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c @@ -1517,72 +1517,51 @@ asmlinkage void __init xen_start_kernel(void)  #endif  } -#ifdef CONFIG_XEN_PVHVM -#define HVM_SHARED_INFO_ADDR 0xFE700000UL -static struct shared_info *xen_hvm_shared_info; -static unsigned long xen_hvm_sip_phys; -static int xen_major, xen_minor; - -static void xen_hvm_connect_shared_info(unsigned long pfn) +void __ref xen_hvm_init_shared_info(void)  { +	int cpu;  	struct xen_add_to_physmap xatp; +	static struct shared_info *shared_info_page = 0; +	if (!shared_info_page) +		shared_info_page = (struct shared_info *) +			extend_brk(PAGE_SIZE, PAGE_SIZE);  	xatp.domid = DOMID_SELF;  	xatp.idx = 0;  	xatp.space = XENMAPSPACE_shared_info; -	xatp.gpfn = pfn; +	xatp.gpfn = __pa(shared_info_page) >> PAGE_SHIFT;  	if (HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp))  		BUG(); -} -static void __init xen_hvm_set_shared_info(struct shared_info *sip) -{ -	int cpu; - -	HYPERVISOR_shared_info = sip; +	HYPERVISOR_shared_info = (struct shared_info *)shared_info_page;  	/* xen_vcpu is a pointer to the vcpu_info struct in the shared_info  	 * page, we use it in the event channel upcall and in some pvclock  	 * related functions. We don't need the vcpu_info placement  	 * optimizations because we don't use any pv_mmu or pv_irq op on -	 * HVM. */ -	for_each_online_cpu(cpu) +	 * HVM. +	 * When xen_hvm_init_shared_info is run at boot time only vcpu 0 is +	 * online but xen_hvm_init_shared_info is run at resume time too and +	 * in that case multiple vcpus might be online. */ +	for_each_online_cpu(cpu) {  		per_cpu(xen_vcpu, cpu) = &HYPERVISOR_shared_info->vcpu_info[cpu]; -} - -/* Reconnect the shared_info pfn to a (new) mfn */ -void xen_hvm_resume_shared_info(void) -{ -	xen_hvm_connect_shared_info(xen_hvm_sip_phys >> PAGE_SHIFT); -} - -/* Xen tools prior to Xen 4 do not provide a E820_Reserved area for guest usage. - * On these old tools the shared info page will be placed in E820_Ram. - * Xen 4 provides a E820_Reserved area at 0xFC000000, and this code expects - * that nothing is mapped up to HVM_SHARED_INFO_ADDR. - * Xen 4.3+ provides an explicit 1MB area at HVM_SHARED_INFO_ADDR which is used - * here for the shared info page. */ -static void __init xen_hvm_init_shared_info(void) -{ -	if (xen_major < 4) { -		xen_hvm_shared_info = extend_brk(PAGE_SIZE, PAGE_SIZE); -		xen_hvm_sip_phys = __pa(xen_hvm_shared_info); -	} else { -		xen_hvm_sip_phys = HVM_SHARED_INFO_ADDR; -		set_fixmap(FIX_PARAVIRT_BOOTMAP, xen_hvm_sip_phys); -		xen_hvm_shared_info = -		(struct shared_info *)fix_to_virt(FIX_PARAVIRT_BOOTMAP);  	} -	xen_hvm_connect_shared_info(xen_hvm_sip_phys >> PAGE_SHIFT); -	xen_hvm_set_shared_info(xen_hvm_shared_info);  } +#ifdef CONFIG_XEN_PVHVM  static void __init init_hvm_pv_info(void)  { -	uint32_t ecx, edx, pages, msr, base; +	int major, minor; +	uint32_t eax, ebx, ecx, edx, pages, msr, base;  	u64 pfn;  	base = xen_cpuid_base(); +	cpuid(base + 1, &eax, &ebx, &ecx, &edx); + +	major = eax >> 16; +	minor = eax & 0xffff; +	printk(KERN_INFO "Xen version %d.%d.\n", major, minor); +  	cpuid(base + 2, &pages, &msr, &ecx, &edx);  	pfn = __pa(hypercall_page); @@ -1633,22 +1612,12 @@ static void __init xen_hvm_guest_init(void)  static bool __init xen_hvm_platform(void)  { -	uint32_t eax, ebx, ecx, edx, base; -  	if (xen_pv_domain())  		return false; -	base = xen_cpuid_base(); -	if (!base) +	if (!xen_cpuid_base())  		return false; -	cpuid(base + 1, &eax, &ebx, &ecx, &edx); - -	xen_major = eax >> 16; -	xen_minor = eax & 0xffff; - -	printk(KERN_INFO "Xen version %d.%d.\n", xen_major, xen_minor); -  	return true;  } diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c index 4f7d2599b48..34bc4cee888 100644 --- a/arch/x86/xen/smp.c +++ b/arch/x86/xen/smp.c @@ -432,13 +432,6 @@ static void __cpuinit xen_play_dead(void) /* used only with HOTPLUG_CPU */  	play_dead_common();  	HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL);  	cpu_bringup(); -	/* -	 * Balance out the preempt calls - as we are running in cpu_idle -	 * loop which has been called at bootup from cpu_bringup_and_idle. -	 * The cpucpu_bringup_and_idle called cpu_bringup which made a -	 * preempt_disable() So this preempt_enable will balance it out. -	 */ -	preempt_enable();  }  #else /* !CONFIG_HOTPLUG_CPU */ diff --git a/arch/x86/xen/suspend.c b/arch/x86/xen/suspend.c index ae8a00c39de..45329c8c226 100644 --- a/arch/x86/xen/suspend.c +++ b/arch/x86/xen/suspend.c @@ -30,7 +30,7 @@ void xen_arch_hvm_post_suspend(int suspend_cancelled)  {  #ifdef CONFIG_XEN_PVHVM  	int cpu; -	xen_hvm_resume_shared_info(); +	xen_hvm_init_shared_info();  	xen_callback_vector();  	xen_unplug_emulated_devices();  	if (xen_feature(XENFEAT_hvm_safe_pvclock)) { diff --git a/arch/x86/xen/xen-asm_32.S b/arch/x86/xen/xen-asm_32.S index f9643fc50de..33ca6e42a4c 100644 --- a/arch/x86/xen/xen-asm_32.S +++ b/arch/x86/xen/xen-asm_32.S @@ -89,11 +89,11 @@ ENTRY(xen_iret)  	 */  #ifdef CONFIG_SMP  	GET_THREAD_INFO(%eax) -	movl TI_cpu(%eax), %eax -	movl __per_cpu_offset(,%eax,4), %eax -	mov xen_vcpu(%eax), %eax +	movl %ss:TI_cpu(%eax), %eax +	movl %ss:__per_cpu_offset(,%eax,4), %eax +	mov %ss:xen_vcpu(%eax), %eax  #else -	movl xen_vcpu, %eax +	movl %ss:xen_vcpu, %eax  #endif  	/* check IF state we're restoring */ @@ -106,11 +106,11 @@ ENTRY(xen_iret)  	 * resuming the code, so we don't have to be worried about  	 * being preempted to another CPU.  	 */ -	setz XEN_vcpu_info_mask(%eax) +	setz %ss:XEN_vcpu_info_mask(%eax)  xen_iret_start_crit:  	/* check for unmasked and pending */ -	cmpw $0x0001, XEN_vcpu_info_pending(%eax) +	cmpw $0x0001, %ss:XEN_vcpu_info_pending(%eax)  	/*  	 * If there's something pending, mask events again so we can @@ -118,7 +118,7 @@ xen_iret_start_crit:  	 * touch XEN_vcpu_info_mask.  	 */  	jne 1f -	movb $1, XEN_vcpu_info_mask(%eax) +	movb $1, %ss:XEN_vcpu_info_mask(%eax)  1:	popl %eax diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h index d2e73d19d36..a95b41744ad 100644 --- a/arch/x86/xen/xen-ops.h +++ b/arch/x86/xen/xen-ops.h @@ -40,7 +40,7 @@ void xen_enable_syscall(void);  void xen_vcpu_restore(void);  void xen_callback_vector(void); -void xen_hvm_resume_shared_info(void); +void xen_hvm_init_shared_info(void);  void xen_unplug_emulated_devices(void);  void __init xen_build_dynamic_phys_to_machine(void); diff --git a/arch/xtensa/include/asm/dma-mapping.h b/arch/xtensa/include/asm/dma-mapping.h index 4acb5feba1f..172a02a6ad1 100644 --- a/arch/xtensa/include/asm/dma-mapping.h +++ b/arch/xtensa/include/asm/dma-mapping.h @@ -170,4 +170,19 @@ dma_cache_sync(struct device *dev, void *vaddr, size_t size,  	consistent_sync(vaddr, size, direction);  } +/* Not supported for now */ +static inline int dma_mmap_coherent(struct device *dev, +				    struct vm_area_struct *vma, void *cpu_addr, +				    dma_addr_t dma_addr, size_t size) +{ +	return -EINVAL; +} + +static inline int dma_get_sgtable(struct device *dev, struct sg_table *sgt, +				  void *cpu_addr, dma_addr_t dma_addr, +				  size_t size) +{ +	return -EINVAL; +} +  #endif	/* _XTENSA_DMA_MAPPING_H */ diff --git a/block/genhd.c b/block/genhd.c index 9a289d7c84b..3993ebf4135 100644 --- a/block/genhd.c +++ b/block/genhd.c @@ -35,6 +35,8 @@ static DEFINE_IDR(ext_devt_idr);  static struct device_type disk_type; +static void disk_check_events(struct disk_events *ev, +			      unsigned int *clearing_ptr);  static void disk_alloc_events(struct gendisk *disk);  static void disk_add_events(struct gendisk *disk);  static void disk_del_events(struct gendisk *disk); @@ -1549,6 +1551,7 @@ unsigned int disk_clear_events(struct gendisk *disk, unsigned int mask)  	const struct block_device_operations *bdops = disk->fops;  	struct disk_events *ev = disk->ev;  	unsigned int pending; +	unsigned int clearing = mask;  	if (!ev) {  		/* for drivers still using the old ->media_changed method */ @@ -1558,34 +1561,53 @@ unsigned int disk_clear_events(struct gendisk *disk, unsigned int mask)  		return 0;  	} -	/* tell the workfn about the events being cleared */ +	disk_block_events(disk); + +	/* +	 * store the union of mask and ev->clearing on the stack so that the +	 * race with disk_flush_events does not cause ambiguity (ev->clearing +	 * can still be modified even if events are blocked). +	 */  	spin_lock_irq(&ev->lock); -	ev->clearing |= mask; +	clearing |= ev->clearing; +	ev->clearing = 0;  	spin_unlock_irq(&ev->lock); -	/* uncondtionally schedule event check and wait for it to finish */ -	disk_block_events(disk); -	queue_delayed_work(system_freezable_wq, &ev->dwork, 0); -	flush_delayed_work(&ev->dwork); -	__disk_unblock_events(disk, false); +	disk_check_events(ev, &clearing); +	/* +	 * if ev->clearing is not 0, the disk_flush_events got called in the +	 * middle of this function, so we want to run the workfn without delay. +	 */ +	__disk_unblock_events(disk, ev->clearing ? true : false);  	/* then, fetch and clear pending events */  	spin_lock_irq(&ev->lock); -	WARN_ON_ONCE(ev->clearing & mask);	/* cleared by workfn */  	pending = ev->pending & mask;  	ev->pending &= ~mask;  	spin_unlock_irq(&ev->lock); +	WARN_ON_ONCE(clearing & mask);  	return pending;  } +/* + * Separate this part out so that a different pointer for clearing_ptr can be + * passed in for disk_clear_events. + */  static void disk_events_workfn(struct work_struct *work)  {  	struct delayed_work *dwork = to_delayed_work(work);  	struct disk_events *ev = container_of(dwork, struct disk_events, dwork); + +	disk_check_events(ev, &ev->clearing); +} + +static void disk_check_events(struct disk_events *ev, +			      unsigned int *clearing_ptr) +{  	struct gendisk *disk = ev->disk;  	char *envp[ARRAY_SIZE(disk_uevents) + 1] = { }; -	unsigned int clearing = ev->clearing; +	unsigned int clearing = *clearing_ptr;  	unsigned int events;  	unsigned long intv;  	int nr_events = 0, i; @@ -1598,7 +1620,7 @@ static void disk_events_workfn(struct work_struct *work)  	events &= ~ev->pending;  	ev->pending |= events; -	ev->clearing &= ~clearing; +	*clearing_ptr &= ~clearing;  	intv = disk_events_poll_jiffies(disk);  	if (!ev->block && intv) diff --git a/drivers/acpi/apei/apei-base.c b/drivers/acpi/apei/apei-base.c index 00a783661d0..46f80e2c92f 100644 --- a/drivers/acpi/apei/apei-base.c +++ b/drivers/acpi/apei/apei-base.c @@ -590,6 +590,9 @@ static int apei_check_gar(struct acpi_generic_address *reg, u64 *paddr,  	if (bit_width == 32 && bit_offset == 0 && (*paddr & 0x03) == 0 &&  	    *access_bit_width < 32)  		*access_bit_width = 32; +	else if (bit_width == 64 && bit_offset == 0 && (*paddr & 0x07) == 0 && +	    *access_bit_width < 64) +		*access_bit_width = 64;  	if ((bit_width + bit_offset) > *access_bit_width) {  		pr_warning(FW_BUG APEI_PFX diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c index 3ff26786154..bd22f8667ee 100644 --- a/drivers/acpi/osl.c +++ b/drivers/acpi/osl.c @@ -250,7 +250,7 @@ acpi_physical_address __init acpi_os_get_root_pointer(void)  		return acpi_rsdp;  #endif -	if (efi_enabled) { +	if (efi_enabled(EFI_CONFIG_TABLES)) {  		if (efi.acpi20 != EFI_INVALID_TABLE_ADDR)  			return efi.acpi20;  		else if (efi.acpi != EFI_INVALID_TABLE_ADDR) diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index f1a5da44591..ed9a1cc690b 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c @@ -958,6 +958,9 @@ static int acpi_processor_setup_cpuidle_cx(struct acpi_processor *pr)  		return -EINVAL;  	} +	if (!dev) +		return -EINVAL; +  	dev->cpu = pr->id;  	if (max_cstate == 0) @@ -1149,6 +1152,7 @@ int acpi_processor_cst_has_changed(struct acpi_processor *pr)  		}  		/* Populate Updated C-state information */ +		acpi_processor_get_power_info(pr);  		acpi_processor_setup_cpuidle_states(pr);  		/* Enable all cpuidle devices */ diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c index 836bfe06904..53e7ac9403a 100644 --- a/drivers/acpi/processor_perflib.c +++ b/drivers/acpi/processor_perflib.c @@ -340,6 +340,13 @@ static void amd_fixup_frequency(struct acpi_processor_px *px, int i)  	if ((boot_cpu_data.x86 == 0x10 && boot_cpu_data.x86_model < 10)  	    || boot_cpu_data.x86 == 0x11) {  		rdmsr(MSR_AMD_PSTATE_DEF_BASE + index, lo, hi); +		/* +		 * MSR C001_0064+: +		 * Bit 63: PstateEn. Read-write. If set, the P-state is valid. +		 */ +		if (!(hi & BIT(31))) +			return; +  		fid = lo & 0x3f;  		did = (lo >> 6) & 7;  		if (boot_cpu_data.x86 == 0x10) diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c index 7862d17976b..49791273256 100644 --- a/drivers/ata/ahci.c +++ b/drivers/ata/ahci.c @@ -53,6 +53,7 @@  enum {  	AHCI_PCI_BAR_STA2X11	= 0, +	AHCI_PCI_BAR_ENMOTUS	= 2,  	AHCI_PCI_BAR_STANDARD	= 5,  }; @@ -410,6 +411,9 @@ static const struct pci_device_id ahci_pci_tbl[] = {  	{ PCI_VDEVICE(ASMEDIA, 0x0611), board_ahci },	/* ASM1061 */  	{ PCI_VDEVICE(ASMEDIA, 0x0612), board_ahci },	/* ASM1062 */ +	/* Enmotus */ +	{ PCI_DEVICE(0x1c44, 0x8000), board_ahci }, +  	/* Generic, PCI class code for AHCI */  	{ PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,  	  PCI_CLASS_STORAGE_SATA_AHCI, 0xffffff, board_ahci }, @@ -1098,9 +1102,11 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)  		dev_info(&pdev->dev,  			 "PDC42819 can only drive SATA devices with this driver\n"); -	/* The Connext uses non-standard BAR */ +	/* Both Connext and Enmotus devices use non-standard BARs */  	if (pdev->vendor == PCI_VENDOR_ID_STMICRO && pdev->device == 0xCC06)  		ahci_pci_bar = AHCI_PCI_BAR_STA2X11; +	else if (pdev->vendor == 0x1c44 && pdev->device == 0x8000) +		ahci_pci_bar = AHCI_PCI_BAR_ENMOTUS;  	/* acquire resources */  	rc = pcim_enable_device(pdev); diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c index 320712a7b9e..6cd7805e47c 100644 --- a/drivers/ata/libahci.c +++ b/drivers/ata/libahci.c @@ -1951,13 +1951,13 @@ static void ahci_set_aggressive_devslp(struct ata_port *ap, bool sleep)  	/* Use the nominal value 10 ms if the read MDAT is zero,  	 * the nominal value of DETO is 20 ms.  	 */ -	if (dev->sata_settings[ATA_LOG_DEVSLP_VALID] & +	if (dev->devslp_timing[ATA_LOG_DEVSLP_VALID] &  	    ATA_LOG_DEVSLP_VALID_MASK) { -		mdat = dev->sata_settings[ATA_LOG_DEVSLP_MDAT] & +		mdat = dev->devslp_timing[ATA_LOG_DEVSLP_MDAT] &  		       ATA_LOG_DEVSLP_MDAT_MASK;  		if (!mdat)  			mdat = 10; -		deto = dev->sata_settings[ATA_LOG_DEVSLP_DETO]; +		deto = dev->devslp_timing[ATA_LOG_DEVSLP_DETO];  		if (!deto)  			deto = 20;  	} else { diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index 9e8b99af400..46cd3f4c6aa 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c @@ -2325,24 +2325,28 @@ int ata_dev_configure(struct ata_device *dev)  			}  		} -		/* check and mark DevSlp capability */ -		if (ata_id_has_devslp(dev->id)) -			dev->flags |= ATA_DFLAG_DEVSLP; - -		/* Obtain SATA Settings page from Identify Device Data Log, -		 * which contains DevSlp timing variables etc. -		 * Exclude old devices with ata_id_has_ncq() +		/* Check and mark DevSlp capability. Get DevSlp timing variables +		 * from SATA Settings page of Identify Device Data Log.  		 */ -		if (ata_id_has_ncq(dev->id)) { +		if (ata_id_has_devslp(dev->id)) { +			u8 sata_setting[ATA_SECT_SIZE]; +			int i, j; + +			dev->flags |= ATA_DFLAG_DEVSLP;  			err_mask = ata_read_log_page(dev,  						     ATA_LOG_SATA_ID_DEV_DATA,  						     ATA_LOG_SATA_SETTINGS, -						     dev->sata_settings, +						     sata_setting,  						     1);  			if (err_mask)  				ata_dev_dbg(dev,  					    "failed to get Identify Device Data, Emask 0x%x\n",  					    err_mask); +			else +				for (i = 0; i < ATA_LOG_DEVSLP_SIZE; i++) { +					j = ATA_LOG_DEVSLP_OFFSET + i; +					dev->devslp_timing[i] = sata_setting[j]; +				}  		}  		dev->cdb_len = 16; diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c index bf039b0e97b..bcf4437214f 100644 --- a/drivers/ata/libata-eh.c +++ b/drivers/ata/libata-eh.c @@ -2094,7 +2094,7 @@ static unsigned int ata_eh_speed_down(struct ata_device *dev,   */  static inline int ata_eh_worth_retry(struct ata_queued_cmd *qc)  { -	if (qc->flags & AC_ERR_MEDIA) +	if (qc->err_mask & AC_ERR_MEDIA)  		return 0;	/* don't retry media errors */  	if (qc->flags & ATA_QCFLAG_IO)  		return 1;	/* otherwise retry anything from fs stack */ diff --git a/drivers/atm/iphase.h b/drivers/atm/iphase.h index 6a0955e6d4f..53ecac5a216 100644 --- a/drivers/atm/iphase.h +++ b/drivers/atm/iphase.h @@ -636,82 +636,82 @@ struct rx_buf_desc {  #define SEG_BASE IPHASE5575_FRAG_CONTROL_REG_BASE    #define REASS_BASE IPHASE5575_REASS_CONTROL_REG_BASE   -typedef volatile u_int  freg_t; +typedef volatile u_int	ffreg_t;  typedef u_int   rreg_t;  typedef struct _ffredn_t { -        freg_t  idlehead_high;  /* Idle cell header (high)              */ -        freg_t  idlehead_low;   /* Idle cell header (low)               */ -        freg_t  maxrate;        /* Maximum rate                         */ -        freg_t  stparms;        /* Traffic Management Parameters        */ -        freg_t  abrubr_abr;     /* ABRUBR Priority Byte 1, TCR Byte 0   */ -        freg_t  rm_type;        /*                                      */ -        u_int   filler5[0x17 - 0x06]; -        freg_t  cmd_reg;        /* Command register                     */ -        u_int   filler18[0x20 - 0x18]; -        freg_t  cbr_base;       /* CBR Pointer Base                     */ -        freg_t  vbr_base;       /* VBR Pointer Base                     */ -        freg_t  abr_base;       /* ABR Pointer Base                     */ -        freg_t  ubr_base;       /* UBR Pointer Base                     */ -        u_int   filler24; -        freg_t  vbrwq_base;     /* VBR Wait Queue Base                  */ -        freg_t  abrwq_base;     /* ABR Wait Queue Base                  */ -        freg_t  ubrwq_base;     /* UBR Wait Queue Base                  */ -        freg_t  vct_base;       /* Main VC Table Base                   */ -        freg_t  vcte_base;      /* Extended Main VC Table Base          */ -        u_int   filler2a[0x2C - 0x2A]; -        freg_t  cbr_tab_beg;    /* CBR Table Begin                      */ -        freg_t  cbr_tab_end;    /* CBR Table End                        */ -        freg_t  cbr_pointer;    /* CBR Pointer                          */ -        u_int   filler2f[0x30 - 0x2F]; -        freg_t  prq_st_adr;     /* Packet Ready Queue Start Address     */ -        freg_t  prq_ed_adr;     /* Packet Ready Queue End Address       */ -        freg_t  prq_rd_ptr;     /* Packet Ready Queue read pointer      */ -        freg_t  prq_wr_ptr;     /* Packet Ready Queue write pointer     */ -        freg_t  tcq_st_adr;     /* Transmit Complete Queue Start Address*/ -        freg_t  tcq_ed_adr;     /* Transmit Complete Queue End Address  */ -        freg_t  tcq_rd_ptr;     /* Transmit Complete Queue read pointer */ -        freg_t  tcq_wr_ptr;     /* Transmit Complete Queue write pointer*/ -        u_int   filler38[0x40 - 0x38]; -        freg_t  queue_base;     /* Base address for PRQ and TCQ         */ -        freg_t  desc_base;      /* Base address of descriptor table     */ -        u_int   filler42[0x45 - 0x42]; -        freg_t  mode_reg_0;     /* Mode register 0                      */ -        freg_t  mode_reg_1;     /* Mode register 1                      */ -        freg_t  intr_status_reg;/* Interrupt Status register            */ -        freg_t  mask_reg;       /* Mask Register                        */ -        freg_t  cell_ctr_high1; /* Total cell transfer count (high)     */ -        freg_t  cell_ctr_lo1;   /* Total cell transfer count (low)      */ -        freg_t  state_reg;      /* Status register                      */ -        u_int   filler4c[0x58 - 0x4c]; -        freg_t  curr_desc_num;  /* Contains the current descriptor num  */ -        freg_t  next_desc;      /* Next descriptor                      */ -        freg_t  next_vc;        /* Next VC                              */ -        u_int   filler5b[0x5d - 0x5b]; -        freg_t  present_slot_cnt;/* Present slot count                  */ -        u_int   filler5e[0x6a - 0x5e]; -        freg_t  new_desc_num;   /* New descriptor number                */ -        freg_t  new_vc;         /* New VC                               */ -        freg_t  sched_tbl_ptr;  /* Schedule table pointer               */ -        freg_t  vbrwq_wptr;     /* VBR wait queue write pointer         */ -        freg_t  vbrwq_rptr;     /* VBR wait queue read pointer          */ -        freg_t  abrwq_wptr;     /* ABR wait queue write pointer         */ -        freg_t  abrwq_rptr;     /* ABR wait queue read pointer          */ -        freg_t  ubrwq_wptr;     /* UBR wait queue write pointer         */ -        freg_t  ubrwq_rptr;     /* UBR wait queue read pointer          */ -        freg_t  cbr_vc;         /* CBR VC                               */ -        freg_t  vbr_sb_vc;      /* VBR SB VC                            */ -        freg_t  abr_sb_vc;      /* ABR SB VC                            */ -        freg_t  ubr_sb_vc;      /* UBR SB VC                            */ -        freg_t  vbr_next_link;  /* VBR next link                        */ -        freg_t  abr_next_link;  /* ABR next link                        */ -        freg_t  ubr_next_link;  /* UBR next link                        */ -        u_int   filler7a[0x7c-0x7a]; -        freg_t  out_rate_head;  /* Out of rate head                     */ -        u_int   filler7d[0xca-0x7d]; /* pad out to full address space   */ -        freg_t  cell_ctr_high1_nc;/* Total cell transfer count (high)   */ -        freg_t  cell_ctr_lo1_nc;/* Total cell transfer count (low)      */ -        u_int   fillercc[0x100-0xcc]; /* pad out to full address space   */ +	ffreg_t	idlehead_high;	/* Idle cell header (high)		*/ +	ffreg_t	idlehead_low;	/* Idle cell header (low)		*/ +	ffreg_t	maxrate;	/* Maximum rate				*/ +	ffreg_t	stparms;	/* Traffic Management Parameters	*/ +	ffreg_t	abrubr_abr;	/* ABRUBR Priority Byte 1, TCR Byte 0	*/ +	ffreg_t	rm_type;	/*					*/ +	u_int	filler5[0x17 - 0x06]; +	ffreg_t	cmd_reg;	/* Command register			*/ +	u_int	filler18[0x20 - 0x18]; +	ffreg_t	cbr_base;	/* CBR Pointer Base			*/ +	ffreg_t	vbr_base;	/* VBR Pointer Base			*/ +	ffreg_t	abr_base;	/* ABR Pointer Base			*/ +	ffreg_t	ubr_base;	/* UBR Pointer Base			*/ +	u_int	filler24; +	ffreg_t	vbrwq_base;	/* VBR Wait Queue Base			*/ +	ffreg_t	abrwq_base;	/* ABR Wait Queue Base			*/ +	ffreg_t	ubrwq_base;	/* UBR Wait Queue Base			*/ +	ffreg_t	vct_base;	/* Main VC Table Base			*/ +	ffreg_t	vcte_base;	/* Extended Main VC Table Base		*/ +	u_int	filler2a[0x2C - 0x2A]; +	ffreg_t	cbr_tab_beg;	/* CBR Table Begin			*/ +	ffreg_t	cbr_tab_end;	/* CBR Table End			*/ +	ffreg_t	cbr_pointer;	/* CBR Pointer				*/ +	u_int	filler2f[0x30 - 0x2F]; +	ffreg_t	prq_st_adr;	/* Packet Ready Queue Start Address	*/ +	ffreg_t	prq_ed_adr;	/* Packet Ready Queue End Address	*/ +	ffreg_t	prq_rd_ptr;	/* Packet Ready Queue read pointer	*/ +	ffreg_t	prq_wr_ptr;	/* Packet Ready Queue write pointer	*/ +	ffreg_t	tcq_st_adr;	/* Transmit Complete Queue Start Address*/ +	ffreg_t	tcq_ed_adr;	/* Transmit Complete Queue End Address	*/ +	ffreg_t	tcq_rd_ptr;	/* Transmit Complete Queue read pointer */ +	ffreg_t	tcq_wr_ptr;	/* Transmit Complete Queue write pointer*/ +	u_int	filler38[0x40 - 0x38]; +	ffreg_t	queue_base;	/* Base address for PRQ and TCQ		*/ +	ffreg_t	desc_base;	/* Base address of descriptor table	*/ +	u_int	filler42[0x45 - 0x42]; +	ffreg_t	mode_reg_0;	/* Mode register 0			*/ +	ffreg_t	mode_reg_1;	/* Mode register 1			*/ +	ffreg_t	intr_status_reg;/* Interrupt Status register		*/ +	ffreg_t	mask_reg;	/* Mask Register			*/ +	ffreg_t	cell_ctr_high1; /* Total cell transfer count (high)	*/ +	ffreg_t	cell_ctr_lo1;	/* Total cell transfer count (low)	*/ +	ffreg_t	state_reg;	/* Status register			*/ +	u_int	filler4c[0x58 - 0x4c]; +	ffreg_t	curr_desc_num;	/* Contains the current descriptor num	*/ +	ffreg_t	next_desc;	/* Next descriptor			*/ +	ffreg_t	next_vc;	/* Next VC				*/ +	u_int	filler5b[0x5d - 0x5b]; +	ffreg_t	present_slot_cnt;/* Present slot count			*/ +	u_int	filler5e[0x6a - 0x5e]; +	ffreg_t	new_desc_num;	/* New descriptor number		*/ +	ffreg_t	new_vc;		/* New VC				*/ +	ffreg_t	sched_tbl_ptr;	/* Schedule table pointer		*/ +	ffreg_t	vbrwq_wptr;	/* VBR wait queue write pointer		*/ +	ffreg_t	vbrwq_rptr;	/* VBR wait queue read pointer		*/ +	ffreg_t	abrwq_wptr;	/* ABR wait queue write pointer		*/ +	ffreg_t	abrwq_rptr;	/* ABR wait queue read pointer		*/ +	ffreg_t	ubrwq_wptr;	/* UBR wait queue write pointer		*/ +	ffreg_t	ubrwq_rptr;	/* UBR wait queue read pointer		*/ +	ffreg_t	cbr_vc;		/* CBR VC				*/ +	ffreg_t	vbr_sb_vc;	/* VBR SB VC				*/ +	ffreg_t	abr_sb_vc;	/* ABR SB VC				*/ +	ffreg_t	ubr_sb_vc;	/* UBR SB VC				*/ +	ffreg_t	vbr_next_link;	/* VBR next link			*/ +	ffreg_t	abr_next_link;	/* ABR next link			*/ +	ffreg_t	ubr_next_link;	/* UBR next link			*/ +	u_int	filler7a[0x7c-0x7a]; +	ffreg_t	out_rate_head;	/* Out of rate head			*/ +	u_int	filler7d[0xca-0x7d]; /* pad out to full address space	*/ +	ffreg_t	cell_ctr_high1_nc;/* Total cell transfer count (high)	*/ +	ffreg_t	cell_ctr_lo1_nc;/* Total cell transfer count (low)	*/ +	u_int	fillercc[0x100-0xcc]; /* pad out to full address space	 */  } ffredn_t;  typedef struct _rfredn_t { diff --git a/drivers/base/regmap/regmap-debugfs.c b/drivers/base/regmap/regmap-debugfs.c index 46a213a596e..d9a6c94ce42 100644 --- a/drivers/base/regmap/regmap-debugfs.c +++ b/drivers/base/regmap/regmap-debugfs.c @@ -121,8 +121,6 @@ static unsigned int regmap_debugfs_get_dump_start(struct regmap *map,  		c->max = p - 1;  		list_add_tail(&c->list,  			      &map->debugfs_off_cache); -	} else { -		return base;  	}  	/* diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c index 42d5cb0f503..f00b059c057 100644 --- a/drivers/base/regmap/regmap.c +++ b/drivers/base/regmap/regmap.c @@ -1106,7 +1106,7 @@ EXPORT_SYMBOL_GPL(regmap_raw_write);   * @val_count: Number of registers to write   *   * This function is intended to be used for writing a large block of - * data to be device either in single transfer or multiple transfer. + * data to the device either in single transfer or multiple transfer.   *   * A value of zero will be returned on success, a negative errno will   * be returned in error cases. diff --git a/drivers/bcma/bcma_private.h b/drivers/bcma/bcma_private.h index 19e3fbfd575..cb0c4548857 100644 --- a/drivers/bcma/bcma_private.h +++ b/drivers/bcma/bcma_private.h @@ -94,11 +94,16 @@ void bcma_core_pci_hostmode_init(struct bcma_drv_pci *pc);  #ifdef CONFIG_BCMA_DRIVER_GPIO  /* driver_gpio.c */  int bcma_gpio_init(struct bcma_drv_cc *cc); +int bcma_gpio_unregister(struct bcma_drv_cc *cc);  #else  static inline int bcma_gpio_init(struct bcma_drv_cc *cc)  {  	return -ENOTSUPP;  } +static inline int bcma_gpio_unregister(struct bcma_drv_cc *cc) +{ +	return 0; +}  #endif /* CONFIG_BCMA_DRIVER_GPIO */  #endif diff --git a/drivers/bcma/driver_chipcommon_nflash.c b/drivers/bcma/driver_chipcommon_nflash.c index dbda91e4dff..1f0b83e18f6 100644 --- a/drivers/bcma/driver_chipcommon_nflash.c +++ b/drivers/bcma/driver_chipcommon_nflash.c @@ -21,7 +21,7 @@ int bcma_nflash_init(struct bcma_drv_cc *cc)  	struct bcma_bus *bus = cc->core->bus;  	if (bus->chipinfo.id != BCMA_CHIP_ID_BCM4706 && -	    cc->core->id.rev != 0x38) { +	    cc->core->id.rev != 38) {  		bcma_err(bus, "NAND flash on unsupported board!\n");  		return -ENOTSUPP;  	} diff --git a/drivers/bcma/driver_gpio.c b/drivers/bcma/driver_gpio.c index 9a6f585da2d..71f755c06fc 100644 --- a/drivers/bcma/driver_gpio.c +++ b/drivers/bcma/driver_gpio.c @@ -96,3 +96,8 @@ int bcma_gpio_init(struct bcma_drv_cc *cc)  	return gpiochip_add(chip);  } + +int bcma_gpio_unregister(struct bcma_drv_cc *cc) +{ +	return gpiochip_remove(&cc->gpio); +} diff --git a/drivers/bcma/main.c b/drivers/bcma/main.c index 4a92f647b58..324f9debda8 100644 --- a/drivers/bcma/main.c +++ b/drivers/bcma/main.c @@ -268,6 +268,13 @@ int bcma_bus_register(struct bcma_bus *bus)  void bcma_bus_unregister(struct bcma_bus *bus)  {  	struct bcma_device *cores[3]; +	int err; + +	err = bcma_gpio_unregister(&bus->drv_cc); +	if (err == -EBUSY) +		bcma_err(bus, "Some GPIOs are still in use.\n"); +	else if (err) +		bcma_err(bus, "Can not unregister GPIO driver: %i\n", err);  	cores[0] = bcma_find_core(bus, BCMA_CORE_MIPS_74K);  	cores[1] = bcma_find_core(bus, BCMA_CORE_PCIE); diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c index f58a4a4b4df..2b8303ad63c 100644 --- a/drivers/block/drbd/drbd_req.c +++ b/drivers/block/drbd/drbd_req.c @@ -168,7 +168,7 @@ static void wake_all_senders(struct drbd_tconn *tconn) {  }  /* must hold resource->req_lock */ -static void start_new_tl_epoch(struct drbd_tconn *tconn) +void start_new_tl_epoch(struct drbd_tconn *tconn)  {  	/* no point closing an epoch, if it is empty, anyways. */  	if (tconn->current_tle_writes == 0) diff --git a/drivers/block/drbd/drbd_req.h b/drivers/block/drbd/drbd_req.h index 016de6b8bb5..c08d22964d0 100644 --- a/drivers/block/drbd/drbd_req.h +++ b/drivers/block/drbd/drbd_req.h @@ -267,6 +267,7 @@ struct bio_and_error {  	int error;  }; +extern void start_new_tl_epoch(struct drbd_tconn *tconn);  extern void drbd_req_destroy(struct kref *kref);  extern void _req_may_be_done(struct drbd_request *req,  		struct bio_and_error *m); diff --git a/drivers/block/drbd/drbd_state.c b/drivers/block/drbd/drbd_state.c index 53bf6182bac..0fe220cfb9e 100644 --- a/drivers/block/drbd/drbd_state.c +++ b/drivers/block/drbd/drbd_state.c @@ -931,6 +931,7 @@ __drbd_set_state(struct drbd_conf *mdev, union drbd_state ns,  	enum drbd_state_rv rv = SS_SUCCESS;  	enum sanitize_state_warnings ssw;  	struct after_state_chg_work *ascw; +	bool did_remote, should_do_remote;  	os = drbd_read_state(mdev); @@ -981,11 +982,17 @@ __drbd_set_state(struct drbd_conf *mdev, union drbd_state ns,  	    (os.disk != D_DISKLESS && ns.disk == D_DISKLESS))  		atomic_inc(&mdev->local_cnt); +	did_remote = drbd_should_do_remote(mdev->state);  	mdev->state.i = ns.i; +	should_do_remote = drbd_should_do_remote(mdev->state);  	mdev->tconn->susp = ns.susp;  	mdev->tconn->susp_nod = ns.susp_nod;  	mdev->tconn->susp_fen = ns.susp_fen; +	/* put replicated vs not-replicated requests in seperate epochs */ +	if (did_remote != should_do_remote) +		start_new_tl_epoch(mdev->tconn); +  	if (os.disk == D_ATTACHING && ns.disk >= D_NEGOTIATING)  		drbd_print_uuids(mdev, "attached to UUIDs"); diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c index 9694dd99bbb..3fd10099045 100644 --- a/drivers/block/mtip32xx/mtip32xx.c +++ b/drivers/block/mtip32xx/mtip32xx.c @@ -626,12 +626,13 @@ static void mtip_timeout_function(unsigned long int data)  		}  	} -	if (cmdto_cnt && !test_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags)) { +	if (cmdto_cnt) {  		print_tags(port->dd, "timed out", tagaccum, cmdto_cnt); - -		mtip_restart_port(port); +		if (!test_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags)) { +			mtip_restart_port(port); +			wake_up_interruptible(&port->svc_wait); +		}  		clear_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags); -		wake_up_interruptible(&port->svc_wait);  	}  	if (port->ic_pause_timer) { @@ -3887,7 +3888,12 @@ static int mtip_block_remove(struct driver_data *dd)  	 * Delete our gendisk structure. This also removes the device  	 * from /dev  	 */ -	del_gendisk(dd->disk); +	if (dd->disk) { +		if (dd->disk->queue) +			del_gendisk(dd->disk); +		else +			put_disk(dd->disk); +	}  	spin_lock(&rssd_index_lock);  	ida_remove(&rssd_index_ida, dd->index); @@ -3921,7 +3927,13 @@ static int mtip_block_shutdown(struct driver_data *dd)  		"Shutting down %s ...\n", dd->disk->disk_name);  	/* Delete our gendisk structure, and cleanup the blk queue. */ -	del_gendisk(dd->disk); +	if (dd->disk) { +		if (dd->disk->queue) +			del_gendisk(dd->disk); +		else +			put_disk(dd->disk); +	} +  	spin_lock(&rssd_index_lock);  	ida_remove(&rssd_index_ida, dd->index); diff --git a/drivers/block/sunvdc.c b/drivers/block/sunvdc.c index 564156a8e57..5814deb6963 100644 --- a/drivers/block/sunvdc.c +++ b/drivers/block/sunvdc.c @@ -461,7 +461,7 @@ static int generic_request(struct vdc_port *port, u8 op, void *buf, int len)  	int op_len, err;  	void *req_buf; -	if (!(((u64)1 << ((u64)op - 1)) & port->operations)) +	if (!(((u64)1 << (u64)op) & port->operations))  		return -EOPNOTSUPP;  	switch (op) { diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index 9d8409c0208..8ad21a25bc0 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c @@ -889,6 +889,7 @@ static void virtblk_remove(struct virtio_device *vdev)  {  	struct virtio_blk *vblk = vdev->priv;  	int index = vblk->index; +	int refc;  	/* Prevent config work handler from accessing the device. */  	mutex_lock(&vblk->config_lock); @@ -903,11 +904,15 @@ static void virtblk_remove(struct virtio_device *vdev)  	flush_work(&vblk->config_work); +	refc = atomic_read(&disk_to_dev(vblk->disk)->kobj.kref.refcount);  	put_disk(vblk->disk);  	mempool_destroy(vblk->pool);  	vdev->config->del_vqs(vdev);  	kfree(vblk); -	ida_simple_remove(&vd_index_ida, index); + +	/* Only free device id if we don't have any users */ +	if (refc == 1) +		ida_simple_remove(&vd_index_ida, index);  }  #ifdef CONFIG_PM diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c index 74374fb762a..5ac841ff6cc 100644 --- a/drivers/block/xen-blkback/blkback.c +++ b/drivers/block/xen-blkback/blkback.c @@ -161,10 +161,12 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif,  static void make_response(struct xen_blkif *blkif, u64 id,  			  unsigned short op, int st); -#define foreach_grant(pos, rbtree, node) \ -	for ((pos) = container_of(rb_first((rbtree)), typeof(*(pos)), node); \ +#define foreach_grant_safe(pos, n, rbtree, node) \ +	for ((pos) = container_of(rb_first((rbtree)), typeof(*(pos)), node), \ +	     (n) = rb_next(&(pos)->node); \  	     &(pos)->node != NULL; \ -	     (pos) = container_of(rb_next(&(pos)->node), typeof(*(pos)), node)) +	     (pos) = container_of(n, typeof(*(pos)), node), \ +	     (n) = (&(pos)->node != NULL) ? rb_next(&(pos)->node) : NULL)  static void add_persistent_gnt(struct rb_root *root, @@ -217,10 +219,11 @@ static void free_persistent_gnts(struct rb_root *root, unsigned int num)  	struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];  	struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST];  	struct persistent_gnt *persistent_gnt; +	struct rb_node *n;  	int ret = 0;  	int segs_to_unmap = 0; -	foreach_grant(persistent_gnt, root, node) { +	foreach_grant_safe(persistent_gnt, n, root, node) {  		BUG_ON(persistent_gnt->handle ==  			BLKBACK_INVALID_HANDLE);  		gnttab_set_unmap_op(&unmap[segs_to_unmap], @@ -230,9 +233,6 @@ static void free_persistent_gnts(struct rb_root *root, unsigned int num)  			persistent_gnt->handle);  		pages[segs_to_unmap] = persistent_gnt->page; -		rb_erase(&persistent_gnt->node, root); -		kfree(persistent_gnt); -		num--;  		if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST ||  			!rb_next(&persistent_gnt->node)) { @@ -241,6 +241,10 @@ static void free_persistent_gnts(struct rb_root *root, unsigned int num)  			BUG_ON(ret);  			segs_to_unmap = 0;  		} + +		rb_erase(&persistent_gnt->node, root); +		kfree(persistent_gnt); +		num--;  	}  	BUG_ON(num != 0);  } diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index 96e9b00db08..11043c18ac5 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c @@ -792,6 +792,7 @@ static void blkif_free(struct blkfront_info *info, int suspend)  {  	struct llist_node *all_gnts;  	struct grant *persistent_gnt; +	struct llist_node *n;  	/* Prevent new requests being issued until we fix things up. */  	spin_lock_irq(&info->io_lock); @@ -804,7 +805,7 @@ static void blkif_free(struct blkfront_info *info, int suspend)  	/* Remove all persistent grants */  	if (info->persistent_gnts_c) {  		all_gnts = llist_del_all(&info->persistent_gnts); -		llist_for_each_entry(persistent_gnt, all_gnts, node) { +		llist_for_each_entry_safe(persistent_gnt, n, all_gnts, node) {  			gnttab_end_foreign_access(persistent_gnt->gref, 0, 0UL);  			__free_page(pfn_to_page(persistent_gnt->pfn));  			kfree(persistent_gnt); @@ -835,7 +836,7 @@ static void blkif_free(struct blkfront_info *info, int suspend)  static void blkif_completion(struct blk_shadow *s, struct blkfront_info *info,  			     struct blkif_response *bret)  { -	int i; +	int i = 0;  	struct bio_vec *bvec;  	struct req_iterator iter;  	unsigned long flags; @@ -852,7 +853,8 @@ static void blkif_completion(struct blk_shadow *s, struct blkfront_info *info,  		 */  		rq_for_each_segment(bvec, s->request, iter) {  			BUG_ON((bvec->bv_offset + bvec->bv_len) > PAGE_SIZE); -			i = offset >> PAGE_SHIFT; +			if (bvec->bv_offset < offset) +				i++;  			BUG_ON(i >= s->req.u.rw.nr_segments);  			shared_data = kmap_atomic(  				pfn_to_page(s->grants_used[i]->pfn)); @@ -861,7 +863,7 @@ static void blkif_completion(struct blk_shadow *s, struct blkfront_info *info,  				bvec->bv_len);  			bvec_kunmap_irq(bvec_data, &flags);  			kunmap_atomic(shared_data); -			offset += bvec->bv_len; +			offset = bvec->bv_offset + bvec->bv_len;  		}  	}  	/* Add the persistent grant into the list of free grants */ diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c index b00000e8aef..33c9a44a967 100644 --- a/drivers/bluetooth/ath3k.c +++ b/drivers/bluetooth/ath3k.c @@ -77,10 +77,15 @@ static struct usb_device_id ath3k_table[] = {  	{ USB_DEVICE(0x0CF3, 0x311D) },  	{ USB_DEVICE(0x13d3, 0x3375) },  	{ USB_DEVICE(0x04CA, 0x3005) }, +	{ USB_DEVICE(0x04CA, 0x3006) }, +	{ USB_DEVICE(0x04CA, 0x3008) },  	{ USB_DEVICE(0x13d3, 0x3362) },  	{ USB_DEVICE(0x0CF3, 0xE004) },  	{ USB_DEVICE(0x0930, 0x0219) },  	{ USB_DEVICE(0x0489, 0xe057) }, +	{ USB_DEVICE(0x13d3, 0x3393) }, +	{ USB_DEVICE(0x0489, 0xe04e) }, +	{ USB_DEVICE(0x0489, 0xe056) },  	/* Atheros AR5BBU12 with sflash firmware */  	{ USB_DEVICE(0x0489, 0xE02C) }, @@ -104,10 +109,15 @@ static struct usb_device_id ath3k_blist_tbl[] = {  	{ USB_DEVICE(0x0cf3, 0x311D), .driver_info = BTUSB_ATH3012 },  	{ USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 },  	{ USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 }, +	{ USB_DEVICE(0x04ca, 0x3006), .driver_info = BTUSB_ATH3012 }, +	{ USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 },  	{ USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 },  	{ USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 },  	{ USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },  	{ USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 }, +	{ USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 }, +	{ USB_DEVICE(0x0489, 0xe04e), .driver_info = BTUSB_ATH3012 }, +	{ USB_DEVICE(0x0489, 0xe056), .driver_info = BTUSB_ATH3012 },  	/* Atheros AR5BBU22 with sflash firmware */  	{ USB_DEVICE(0x0489, 0xE03C), .driver_info = BTUSB_ATH3012 }, diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index a1d4ede5b89..7e351e34547 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c @@ -135,10 +135,15 @@ static struct usb_device_id blacklist_table[] = {  	{ USB_DEVICE(0x0cf3, 0x311d), .driver_info = BTUSB_ATH3012 },  	{ USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 },  	{ USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 }, +	{ USB_DEVICE(0x04ca, 0x3006), .driver_info = BTUSB_ATH3012 }, +	{ USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 },  	{ USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 },  	{ USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 },  	{ USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },  	{ USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 }, +	{ USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 }, +	{ USB_DEVICE(0x0489, 0xe04e), .driver_info = BTUSB_ATH3012 }, +	{ USB_DEVICE(0x0489, 0xe056), .driver_info = BTUSB_ATH3012 },  	/* Atheros AR5BBU12 with sflash firmware */  	{ USB_DEVICE(0x0489, 0xe02c), .driver_info = BTUSB_IGNORE }, diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c index 684b0d53764..ee4dbeafb37 100644 --- a/drivers/char/virtio_console.c +++ b/drivers/char/virtio_console.c @@ -2062,7 +2062,8 @@ static void virtcons_remove(struct virtio_device *vdev)  	/* Disable interrupts for vqs */  	vdev->config->reset(vdev);  	/* Finish up work that's lined up */ -	cancel_work_sync(&portdev->control_work); +	if (use_multiport(portdev)) +		cancel_work_sync(&portdev->control_work);  	list_for_each_entry_safe(port, port2, &portdev->ports, list)  		unplug_port(port); diff --git a/drivers/clk/mvebu/clk-cpu.c b/drivers/clk/mvebu/clk-cpu.c index ff004578a11..9dd2551a0a4 100644 --- a/drivers/clk/mvebu/clk-cpu.c +++ b/drivers/clk/mvebu/clk-cpu.c @@ -124,7 +124,7 @@ void __init of_cpu_clk_setup(struct device_node *node)  	clks = kzalloc(ncpus * sizeof(*clks), GFP_KERNEL);  	if (WARN_ON(!clks)) -		return; +		goto clks_out;  	for_each_node_by_type(dn, "cpu") {  		struct clk_init_data init; @@ -134,11 +134,11 @@ void __init of_cpu_clk_setup(struct device_node *node)  		int cpu, err;  		if (WARN_ON(!clk_name)) -			return; +			goto bail_out;  		err = of_property_read_u32(dn, "reg", &cpu);  		if (WARN_ON(err)) -			return; +			goto bail_out;  		sprintf(clk_name, "cpu%d", cpu);  		parent_clk = of_clk_get(node, 0); @@ -167,6 +167,9 @@ void __init of_cpu_clk_setup(struct device_node *node)  	return;  bail_out:  	kfree(clks); +	while(ncpus--) +		kfree(cpuclk[ncpus].clk_name); +clks_out:  	kfree(cpuclk);  } diff --git a/drivers/cpufreq/Kconfig.x86 b/drivers/cpufreq/Kconfig.x86 index 934854ae5eb..7227cd73404 100644 --- a/drivers/cpufreq/Kconfig.x86 +++ b/drivers/cpufreq/Kconfig.x86 @@ -106,7 +106,7 @@ config X86_POWERNOW_K7_ACPI  config X86_POWERNOW_K8  	tristate "AMD Opteron/Athlon64 PowerNow!"  	select CPU_FREQ_TABLE -	depends on ACPI && ACPI_PROCESSOR +	depends on ACPI && ACPI_PROCESSOR && X86_ACPI_CPUFREQ  	help  	  This adds the CPUFreq driver for K8/early Opteron/Athlon64 processors.  	  Support for K10 and newer processors is now in acpi-cpufreq. diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c index 0d048f6a2b2..7b0d49d78c6 100644 --- a/drivers/cpufreq/acpi-cpufreq.c +++ b/drivers/cpufreq/acpi-cpufreq.c @@ -1030,4 +1030,11 @@ MODULE_PARM_DESC(acpi_pstate_strict,  late_initcall(acpi_cpufreq_init);  module_exit(acpi_cpufreq_exit); +static const struct x86_cpu_id acpi_cpufreq_ids[] = { +	X86_FEATURE_MATCH(X86_FEATURE_ACPI), +	X86_FEATURE_MATCH(X86_FEATURE_HW_PSTATE), +	{} +}; +MODULE_DEVICE_TABLE(x86cpu, acpi_cpufreq_ids); +  MODULE_ALIAS("acpi"); diff --git a/drivers/cpufreq/cpufreq-cpu0.c b/drivers/cpufreq/cpufreq-cpu0.c index 52bf36d599f..debc5a7c8db 100644 --- a/drivers/cpufreq/cpufreq-cpu0.c +++ b/drivers/cpufreq/cpufreq-cpu0.c @@ -71,12 +71,15 @@ static int cpu0_set_target(struct cpufreq_policy *policy,  	}  	if (cpu_reg) { +		rcu_read_lock();  		opp = opp_find_freq_ceil(cpu_dev, &freq_Hz);  		if (IS_ERR(opp)) { +			rcu_read_unlock();  			pr_err("failed to find OPP for %ld\n", freq_Hz);  			return PTR_ERR(opp);  		}  		volt = opp_get_voltage(opp); +		rcu_read_unlock();  		tol = volt * voltage_tolerance / 100;  		volt_old = regulator_get_voltage(cpu_reg);  	} @@ -236,12 +239,14 @@ static int cpu0_cpufreq_driver_init(void)  		 */  		for (i = 0; freq_table[i].frequency != CPUFREQ_TABLE_END; i++)  			; +		rcu_read_lock();  		opp = opp_find_freq_exact(cpu_dev,  				freq_table[0].frequency * 1000, true);  		min_uV = opp_get_voltage(opp);  		opp = opp_find_freq_exact(cpu_dev,  				freq_table[i-1].frequency * 1000, true);  		max_uV = opp_get_voltage(opp); +		rcu_read_unlock();  		ret = regulator_set_voltage_time(cpu_reg, min_uV, max_uV);  		if (ret > 0)  			transition_latency += ret * 1000; diff --git a/drivers/cpufreq/omap-cpufreq.c b/drivers/cpufreq/omap-cpufreq.c index 1f3417a8322..97102b05843 100644 --- a/drivers/cpufreq/omap-cpufreq.c +++ b/drivers/cpufreq/omap-cpufreq.c @@ -110,13 +110,16 @@ static int omap_target(struct cpufreq_policy *policy,  	freq = ret;  	if (mpu_reg) { +		rcu_read_lock();  		opp = opp_find_freq_ceil(mpu_dev, &freq);  		if (IS_ERR(opp)) { +			rcu_read_unlock();  			dev_err(mpu_dev, "%s: unable to find MPU OPP for %d\n",  				__func__, freqs.new);  			return -EINVAL;  		}  		volt = opp_get_voltage(opp); +		rcu_read_unlock();  		tol = volt * OPP_TOLERANCE / 100;  		volt_old = regulator_get_voltage(mpu_reg);  	} diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c index 53766f39aad..3b367973a80 100644 --- a/drivers/devfreq/devfreq.c +++ b/drivers/devfreq/devfreq.c @@ -994,6 +994,11 @@ module_exit(devfreq_exit);   * @freq:	The frequency given to target function   * @flags:	Flags handed from devfreq framework.   * + * Locking: This function must be called under rcu_read_lock(). opp is a rcu + * protected pointer. The reason for the same is that the opp pointer which is + * returned will remain valid for use with opp_get_{voltage, freq} only while + * under the locked area. The pointer returned must be used prior to unlocking + * with rcu_read_unlock() to maintain the integrity of the pointer.   */  struct opp *devfreq_recommended_opp(struct device *dev, unsigned long *freq,  				    u32 flags) diff --git a/drivers/devfreq/exynos4_bus.c b/drivers/devfreq/exynos4_bus.c index 80c745e8308..46d94e9e95b 100644 --- a/drivers/devfreq/exynos4_bus.c +++ b/drivers/devfreq/exynos4_bus.c @@ -73,6 +73,16 @@ enum busclk_level_idx {  #define EX4210_LV_NUM	(LV_2 + 1)  #define EX4x12_LV_NUM	(LV_4 + 1) +/** + * struct busfreq_opp_info - opp information for bus + * @rate:	Frequency in hertz + * @volt:	Voltage in microvolts corresponding to this OPP + */ +struct busfreq_opp_info { +	unsigned long rate; +	unsigned long volt; +}; +  struct busfreq_data {  	enum exynos4_busf_type type;  	struct device *dev; @@ -80,7 +90,7 @@ struct busfreq_data {  	bool disabled;  	struct regulator *vdd_int;  	struct regulator *vdd_mif; /* Exynos4412/4212 only */ -	struct opp *curr_opp; +	struct busfreq_opp_info curr_oppinfo;  	struct exynos4_ppmu dmc[2];  	struct notifier_block pm_notifier; @@ -296,13 +306,14 @@ static unsigned int exynos4x12_clkdiv_sclkip[][3] = {  }; -static int exynos4210_set_busclk(struct busfreq_data *data, struct opp *opp) +static int exynos4210_set_busclk(struct busfreq_data *data, +				 struct busfreq_opp_info *oppi)  {  	unsigned int index;  	unsigned int tmp;  	for (index = LV_0; index < EX4210_LV_NUM; index++) -		if (opp_get_freq(opp) == exynos4210_busclk_table[index].clk) +		if (oppi->rate == exynos4210_busclk_table[index].clk)  			break;  	if (index == EX4210_LV_NUM) @@ -361,13 +372,14 @@ static int exynos4210_set_busclk(struct busfreq_data *data, struct opp *opp)  	return 0;  } -static int exynos4x12_set_busclk(struct busfreq_data *data, struct opp *opp) +static int exynos4x12_set_busclk(struct busfreq_data *data, +				 struct busfreq_opp_info *oppi)  {  	unsigned int index;  	unsigned int tmp;  	for (index = LV_0; index < EX4x12_LV_NUM; index++) -		if (opp_get_freq(opp) == exynos4x12_mifclk_table[index].clk) +		if (oppi->rate == exynos4x12_mifclk_table[index].clk)  			break;  	if (index == EX4x12_LV_NUM) @@ -576,11 +588,12 @@ static int exynos4x12_get_intspec(unsigned long mifclk)  	return -EINVAL;  } -static int exynos4_bus_setvolt(struct busfreq_data *data, struct opp *opp, -			       struct opp *oldopp) +static int exynos4_bus_setvolt(struct busfreq_data *data, +			       struct busfreq_opp_info *oppi, +			       struct busfreq_opp_info *oldoppi)  {  	int err = 0, tmp; -	unsigned long volt = opp_get_voltage(opp); +	unsigned long volt = oppi->volt;  	switch (data->type) {  	case TYPE_BUSF_EXYNOS4210: @@ -595,11 +608,11 @@ static int exynos4_bus_setvolt(struct busfreq_data *data, struct opp *opp,  		if (err)  			break; -		tmp = exynos4x12_get_intspec(opp_get_freq(opp)); +		tmp = exynos4x12_get_intspec(oppi->rate);  		if (tmp < 0) {  			err = tmp;  			regulator_set_voltage(data->vdd_mif, -					      opp_get_voltage(oldopp), +					      oldoppi->volt,  					      MAX_SAFEVOLT);  			break;  		} @@ -609,7 +622,7 @@ static int exynos4_bus_setvolt(struct busfreq_data *data, struct opp *opp,  		/*  Try to recover */  		if (err)  			regulator_set_voltage(data->vdd_mif, -					      opp_get_voltage(oldopp), +					      oldoppi->volt,  					      MAX_SAFEVOLT);  		break;  	default: @@ -626,17 +639,26 @@ static int exynos4_bus_target(struct device *dev, unsigned long *_freq,  	struct platform_device *pdev = container_of(dev, struct platform_device,  						    dev);  	struct busfreq_data *data = platform_get_drvdata(pdev); -	struct opp *opp = devfreq_recommended_opp(dev, _freq, flags); -	unsigned long freq = opp_get_freq(opp); -	unsigned long old_freq = opp_get_freq(data->curr_opp); +	struct opp *opp; +	unsigned long freq; +	unsigned long old_freq = data->curr_oppinfo.rate; +	struct busfreq_opp_info	new_oppinfo; -	if (IS_ERR(opp)) +	rcu_read_lock(); +	opp = devfreq_recommended_opp(dev, _freq, flags); +	if (IS_ERR(opp)) { +		rcu_read_unlock();  		return PTR_ERR(opp); +	} +	new_oppinfo.rate = opp_get_freq(opp); +	new_oppinfo.volt = opp_get_voltage(opp); +	rcu_read_unlock(); +	freq = new_oppinfo.rate;  	if (old_freq == freq)  		return 0; -	dev_dbg(dev, "targetting %lukHz %luuV\n", freq, opp_get_voltage(opp)); +	dev_dbg(dev, "targetting %lukHz %luuV\n", freq, new_oppinfo.volt);  	mutex_lock(&data->lock); @@ -644,17 +666,18 @@ static int exynos4_bus_target(struct device *dev, unsigned long *_freq,  		goto out;  	if (old_freq < freq) -		err = exynos4_bus_setvolt(data, opp, data->curr_opp); +		err = exynos4_bus_setvolt(data, &new_oppinfo, +					  &data->curr_oppinfo);  	if (err)  		goto out;  	if (old_freq != freq) {  		switch (data->type) {  		case TYPE_BUSF_EXYNOS4210: -			err = exynos4210_set_busclk(data, opp); +			err = exynos4210_set_busclk(data, &new_oppinfo);  			break;  		case TYPE_BUSF_EXYNOS4x12: -			err = exynos4x12_set_busclk(data, opp); +			err = exynos4x12_set_busclk(data, &new_oppinfo);  			break;  		default:  			err = -EINVAL; @@ -664,11 +687,12 @@ static int exynos4_bus_target(struct device *dev, unsigned long *_freq,  		goto out;  	if (old_freq > freq) -		err = exynos4_bus_setvolt(data, opp, data->curr_opp); +		err = exynos4_bus_setvolt(data, &new_oppinfo, +					  &data->curr_oppinfo);  	if (err)  		goto out; -	data->curr_opp = opp; +	data->curr_oppinfo = new_oppinfo;  out:  	mutex_unlock(&data->lock);  	return err; @@ -702,7 +726,7 @@ static int exynos4_bus_get_dev_status(struct device *dev,  	exynos4_read_ppmu(data);  	busier_dmc = exynos4_get_busier_dmc(data); -	stat->current_frequency = opp_get_freq(data->curr_opp); +	stat->current_frequency = data->curr_oppinfo.rate;  	if (busier_dmc)  		addr = S5P_VA_DMC1; @@ -933,6 +957,7 @@ static int exynos4_busfreq_pm_notifier_event(struct notifier_block *this,  	struct busfreq_data *data = container_of(this, struct busfreq_data,  						 pm_notifier);  	struct opp *opp; +	struct busfreq_opp_info	new_oppinfo;  	unsigned long maxfreq = ULONG_MAX;  	int err = 0; @@ -943,18 +968,29 @@ static int exynos4_busfreq_pm_notifier_event(struct notifier_block *this,  		data->disabled = true; +		rcu_read_lock();  		opp = opp_find_freq_floor(data->dev, &maxfreq); +		if (IS_ERR(opp)) { +			rcu_read_unlock(); +			dev_err(data->dev, "%s: unable to find a min freq\n", +				__func__); +			return PTR_ERR(opp); +		} +		new_oppinfo.rate = opp_get_freq(opp); +		new_oppinfo.volt = opp_get_voltage(opp); +		rcu_read_unlock(); -		err = exynos4_bus_setvolt(data, opp, data->curr_opp); +		err = exynos4_bus_setvolt(data, &new_oppinfo, +					  &data->curr_oppinfo);  		if (err)  			goto unlock;  		switch (data->type) {  		case TYPE_BUSF_EXYNOS4210: -			err = exynos4210_set_busclk(data, opp); +			err = exynos4210_set_busclk(data, &new_oppinfo);  			break;  		case TYPE_BUSF_EXYNOS4x12: -			err = exynos4x12_set_busclk(data, opp); +			err = exynos4x12_set_busclk(data, &new_oppinfo);  			break;  		default:  			err = -EINVAL; @@ -962,7 +998,7 @@ static int exynos4_busfreq_pm_notifier_event(struct notifier_block *this,  		if (err)  			goto unlock; -		data->curr_opp = opp; +		data->curr_oppinfo = new_oppinfo;  unlock:  		mutex_unlock(&data->lock);  		if (err) @@ -1027,13 +1063,17 @@ static int exynos4_busfreq_probe(struct platform_device *pdev)  		}  	} +	rcu_read_lock();  	opp = opp_find_freq_floor(dev, &exynos4_devfreq_profile.initial_freq);  	if (IS_ERR(opp)) { +		rcu_read_unlock();  		dev_err(dev, "Invalid initial frequency %lu kHz.\n",  			exynos4_devfreq_profile.initial_freq);  		return PTR_ERR(opp);  	} -	data->curr_opp = opp; +	data->curr_oppinfo.rate = opp_get_freq(opp); +	data->curr_oppinfo.volt = opp_get_voltage(opp); +	rcu_read_unlock();  	platform_set_drvdata(pdev, data); diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c index dbf0e6f8de8..a7dcf78b1ff 100644 --- a/drivers/dma/imx-dma.c +++ b/drivers/dma/imx-dma.c @@ -684,9 +684,8 @@ static int imxdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,  			break;  		} -		imxdmac->hw_chaining = 1; -		if (!imxdma_hw_chain(imxdmac)) -			return -EINVAL; +		imxdmac->hw_chaining = 0; +  		imxdmac->ccr_from_device = (mode | IMX_DMA_TYPE_FIFO) |  			((IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR) << 2) |  			CCR_REN; diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c index e5fc944de1f..3e9d66920eb 100644 --- a/drivers/dma/ioat/dma_v3.c +++ b/drivers/dma/ioat/dma_v3.c @@ -951,7 +951,7 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device)  			goto free_resources;  		}  	} -	dma_sync_single_for_device(dev, dest_dma, PAGE_SIZE, DMA_TO_DEVICE); +	dma_sync_single_for_device(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE);  	/* skip validate if the capability is not present */  	if (!dma_has_cap(DMA_XOR_VAL, dma_chan->device->cap_mask)) diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c index c39e61bc817..3cad856fe67 100644 --- a/drivers/dma/tegra20-apb-dma.c +++ b/drivers/dma/tegra20-apb-dma.c @@ -266,6 +266,7 @@ static struct tegra_dma_desc *tegra_dma_desc_get(  		if (async_tx_test_ack(&dma_desc->txd)) {  			list_del(&dma_desc->node);  			spin_unlock_irqrestore(&tdc->lock, flags); +			dma_desc->txd.flags = 0;  			return dma_desc;  		}  	} @@ -1050,7 +1051,9 @@ struct dma_async_tx_descriptor *tegra_dma_prep_dma_cyclic(  					TEGRA_APBDMA_AHBSEQ_WRAP_SHIFT;  	ahb_seq |= TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_32; -	csr |= TEGRA_APBDMA_CSR_FLOW | TEGRA_APBDMA_CSR_IE_EOC; +	csr |= TEGRA_APBDMA_CSR_FLOW; +	if (flags & DMA_PREP_INTERRUPT) +		csr |= TEGRA_APBDMA_CSR_IE_EOC;  	csr |= tdc->dma_sconfig.slave_id << TEGRA_APBDMA_CSR_REQ_SEL_SHIFT;  	apb_seq |= TEGRA_APBDMA_APBSEQ_WRAP_WORD_1; @@ -1095,7 +1098,8 @@ struct dma_async_tx_descriptor *tegra_dma_prep_dma_cyclic(  		mem += len;  	}  	sg_req->last_sg = true; -	dma_desc->txd.flags = 0; +	if (flags & DMA_CTRL_ACK) +		dma_desc->txd.flags = DMA_CTRL_ACK;  	/*  	 * Make sure that mode should not be conflicting with currently diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c index 281f566a551..d1e9eb191f2 100644 --- a/drivers/edac/edac_mc.c +++ b/drivers/edac/edac_mc.c @@ -340,7 +340,7 @@ struct mem_ctl_info *edac_mc_alloc(unsigned mc_num,  	/*  	 * Alocate and fill the csrow/channels structs  	 */ -	mci->csrows = kcalloc(sizeof(*mci->csrows), tot_csrows, GFP_KERNEL); +	mci->csrows = kcalloc(tot_csrows, sizeof(*mci->csrows), GFP_KERNEL);  	if (!mci->csrows)  		goto error;  	for (row = 0; row < tot_csrows; row++) { @@ -351,7 +351,7 @@ struct mem_ctl_info *edac_mc_alloc(unsigned mc_num,  		csr->csrow_idx = row;  		csr->mci = mci;  		csr->nr_channels = tot_channels; -		csr->channels = kcalloc(sizeof(*csr->channels), tot_channels, +		csr->channels = kcalloc(tot_channels, sizeof(*csr->channels),  					GFP_KERNEL);  		if (!csr->channels)  			goto error; @@ -369,7 +369,7 @@ struct mem_ctl_info *edac_mc_alloc(unsigned mc_num,  	/*  	 * Allocate and fill the dimm structs  	 */ -	mci->dimms  = kcalloc(sizeof(*mci->dimms), tot_dimms, GFP_KERNEL); +	mci->dimms  = kcalloc(tot_dimms, sizeof(*mci->dimms), GFP_KERNEL);  	if (!mci->dimms)  		goto error; diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c index dc6e905ee1a..0056c4dae9d 100644 --- a/drivers/edac/edac_pci_sysfs.c +++ b/drivers/edac/edac_pci_sysfs.c @@ -256,7 +256,7 @@ static ssize_t edac_pci_dev_store(struct kobject *kobj,  	struct edac_pci_dev_attribute *edac_pci_dev;  	edac_pci_dev = (struct edac_pci_dev_attribute *)attr; -	if (edac_pci_dev->show) +	if (edac_pci_dev->store)  		return edac_pci_dev->store(edac_pci_dev->value, buffer, count);  	return -EIO;  } diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c index fd3ae6290d7..982f1f5f574 100644 --- a/drivers/firmware/dmi_scan.c +++ b/drivers/firmware/dmi_scan.c @@ -471,7 +471,7 @@ void __init dmi_scan_machine(void)  	char __iomem *p, *q;  	int rc; -	if (efi_enabled) { +	if (efi_enabled(EFI_CONFIG_TABLES)) {  		if (efi.smbios == EFI_INVALID_TABLE_ADDR)  			goto error; diff --git a/drivers/firmware/efivars.c b/drivers/firmware/efivars.c index 7b1c37497c9..f5596db0cf5 100644 --- a/drivers/firmware/efivars.c +++ b/drivers/firmware/efivars.c @@ -674,7 +674,7 @@ static int efi_status_to_err(efi_status_t status)  		err = -EACCES;  		break;  	case EFI_NOT_FOUND: -		err = -ENOENT; +		err = -EIO;  		break;  	default:  		err = -EINVAL; @@ -793,6 +793,7 @@ static ssize_t efivarfs_file_write(struct file *file,  		spin_unlock(&efivars->lock);  		efivar_unregister(var);  		drop_nlink(inode); +		d_delete(file->f_dentry);  		dput(file->f_dentry);  	} else { @@ -994,7 +995,7 @@ static int efivarfs_unlink(struct inode *dir, struct dentry *dentry)  		list_del(&var->list);  		spin_unlock(&efivars->lock);  		efivar_unregister(var); -		drop_nlink(dir); +		drop_nlink(dentry->d_inode);  		dput(dentry);  		return 0;  	} @@ -1782,7 +1783,7 @@ efivars_init(void)  	printk(KERN_INFO "EFI Variables Facility v%s %s\n", EFIVARS_VERSION,  	       EFIVARS_DATE); -	if (!efi_enabled) +	if (!efi_enabled(EFI_RUNTIME_SERVICES))  		return 0;  	/* For now we'll register the efi directory at /sys/firmware/efi */ @@ -1822,7 +1823,7 @@ err_put:  static void __exit  efivars_exit(void)  { -	if (efi_enabled) { +	if (efi_enabled(EFI_RUNTIME_SERVICES)) {  		unregister_efivars(&__efivars);  		kobject_put(efi_kobj);  	} diff --git a/drivers/firmware/iscsi_ibft_find.c b/drivers/firmware/iscsi_ibft_find.c index 4da4eb9ae92..2224f1dc074 100644 --- a/drivers/firmware/iscsi_ibft_find.c +++ b/drivers/firmware/iscsi_ibft_find.c @@ -99,7 +99,7 @@ unsigned long __init find_ibft_region(unsigned long *sizep)  	/* iBFT 1.03 section 1.4.3.1 mandates that UEFI machines will  	 * only use ACPI for this */ -	if (!efi_enabled) +	if (!efi_enabled(EFI_BOOT))  		find_ibft_in_mem();  	if (ibft_addr) { diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c index 7d9bd94be8d..6819d63cb16 100644 --- a/drivers/gpio/gpio-mvebu.c +++ b/drivers/gpio/gpio-mvebu.c @@ -547,7 +547,6 @@ static int mvebu_gpio_probe(struct platform_device *pdev)  	mvchip->membase = devm_request_and_ioremap(&pdev->dev, res);  	if (! mvchip->membase) {  		dev_err(&pdev->dev, "Cannot ioremap\n"); -		kfree(mvchip->chip.label);  		return -ENOMEM;  	} @@ -557,14 +556,12 @@ static int mvebu_gpio_probe(struct platform_device *pdev)  		res = platform_get_resource(pdev, IORESOURCE_MEM, 1);  		if (! res) {  			dev_err(&pdev->dev, "Cannot get memory resource\n"); -			kfree(mvchip->chip.label);  			return -ENODEV;  		}  		mvchip->percpu_membase = devm_request_and_ioremap(&pdev->dev, res);  		if (! mvchip->percpu_membase) {  			dev_err(&pdev->dev, "Cannot ioremap\n"); -			kfree(mvchip->chip.label);  			return -ENOMEM;  		}  	} @@ -625,7 +622,6 @@ static int mvebu_gpio_probe(struct platform_device *pdev)  	mvchip->irqbase = irq_alloc_descs(-1, 0, ngpios, -1);  	if (mvchip->irqbase < 0) {  		dev_err(&pdev->dev, "no irqs\n"); -		kfree(mvchip->chip.label);  		return -ENOMEM;  	} @@ -633,7 +629,6 @@ static int mvebu_gpio_probe(struct platform_device *pdev)  				    mvchip->membase, handle_level_irq);  	if (! gc) {  		dev_err(&pdev->dev, "Cannot allocate generic irq_chip\n"); -		kfree(mvchip->chip.label);  		return -ENOMEM;  	} @@ -668,7 +663,6 @@ static int mvebu_gpio_probe(struct platform_device *pdev)  		irq_remove_generic_chip(gc, IRQ_MSK(ngpios), IRQ_NOREQUEST,  					IRQ_LEVEL | IRQ_NOPROBE);  		kfree(gc); -		kfree(mvchip->chip.label);  		return -ENODEV;  	} diff --git a/drivers/gpio/gpio-samsung.c b/drivers/gpio/gpio-samsung.c index 01f7fe95559..76be7eed79d 100644 --- a/drivers/gpio/gpio-samsung.c +++ b/drivers/gpio/gpio-samsung.c @@ -32,7 +32,6 @@  #include <mach/hardware.h>  #include <mach/map.h> -#include <mach/regs-clock.h>  #include <mach/regs-gpio.h>  #include <plat/cpu.h> @@ -446,7 +445,7 @@ static struct samsung_gpio_cfg s3c24xx_gpiocfg_banka = {  };  #endif -#if defined(CONFIG_ARCH_EXYNOS4) || defined(CONFIG_ARCH_EXYNOS5) +#if defined(CONFIG_ARCH_EXYNOS4) || defined(CONFIG_SOC_EXYNOS5250)  static struct samsung_gpio_cfg exynos_gpio_cfg = {  	.set_pull	= exynos_gpio_setpull,  	.get_pull	= exynos_gpio_getpull, @@ -2446,7 +2445,7 @@ static struct samsung_gpio_chip exynos4_gpios_3[] = {  };  #endif -#ifdef CONFIG_ARCH_EXYNOS5 +#ifdef CONFIG_SOC_EXYNOS5250  static struct samsung_gpio_chip exynos5_gpios_1[] = {  	{  		.chip	= { @@ -2614,7 +2613,7 @@ static struct samsung_gpio_chip exynos5_gpios_1[] = {  };  #endif -#ifdef CONFIG_ARCH_EXYNOS5 +#ifdef CONFIG_SOC_EXYNOS5250  static struct samsung_gpio_chip exynos5_gpios_2[] = {  	{  		.chip	= { @@ -2675,7 +2674,7 @@ static struct samsung_gpio_chip exynos5_gpios_2[] = {  };  #endif -#ifdef CONFIG_ARCH_EXYNOS5 +#ifdef CONFIG_SOC_EXYNOS5250  static struct samsung_gpio_chip exynos5_gpios_3[] = {  	{  		.chip	= { @@ -2711,7 +2710,7 @@ static struct samsung_gpio_chip exynos5_gpios_3[] = {  };  #endif -#ifdef CONFIG_ARCH_EXYNOS5 +#ifdef CONFIG_SOC_EXYNOS5250  static struct samsung_gpio_chip exynos5_gpios_4[] = {  	{  		.chip	= { @@ -3010,7 +3009,7 @@ static __init int samsung_gpiolib_init(void)  	int i, nr_chips;  	int group = 0; -#ifdef CONFIG_PINCTRL_SAMSUNG +#if defined(CONFIG_PINCTRL_EXYNOS) || defined(CONFIG_PINCTRL_EXYNOS5440)  	/*  	* This gpio driver includes support for device tree support and there  	* are platforms using it. In order to maintain compatibility with those @@ -3026,6 +3025,7 @@ static __init int samsung_gpiolib_init(void)  	static const struct of_device_id exynos_pinctrl_ids[] = {  		{ .compatible = "samsung,pinctrl-exynos4210", },  		{ .compatible = "samsung,pinctrl-exynos4x12", }, +		{ .compatible = "samsung,pinctrl-exynos5440", },  	};  	for_each_matching_node(pctrl_np, exynos_pinctrl_ids)  		if (pctrl_np && of_device_is_available(pctrl_np)) diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig index 1d1f1e5e33f..046bcda36ab 100644 --- a/drivers/gpu/drm/exynos/Kconfig +++ b/drivers/gpu/drm/exynos/Kconfig @@ -24,7 +24,7 @@ config DRM_EXYNOS_DMABUF  config DRM_EXYNOS_FIMD  	bool "Exynos DRM FIMD" -	depends on DRM_EXYNOS && !FB_S3C +	depends on DRM_EXYNOS && !FB_S3C && !ARCH_MULTIPLATFORM  	help  	  Choose this option if you want to use Exynos FIMD for DRM. @@ -48,7 +48,7 @@ config DRM_EXYNOS_G2D  config DRM_EXYNOS_IPP  	bool "Exynos DRM IPP" -	depends on DRM_EXYNOS +	depends on DRM_EXYNOS && !ARCH_MULTIPLATFORM  	help  	  Choose this option if you want to use IPP feature for DRM. diff --git a/drivers/gpu/drm/exynos/exynos_drm_connector.c b/drivers/gpu/drm/exynos/exynos_drm_connector.c index ab37437bad8..4c5b6859c9e 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_connector.c +++ b/drivers/gpu/drm/exynos/exynos_drm_connector.c @@ -18,7 +18,6 @@  #include "exynos_drm_drv.h"  #include "exynos_drm_encoder.h" -#define MAX_EDID 256  #define to_exynos_connector(x)	container_of(x, struct exynos_drm_connector,\  				drm_connector) @@ -96,7 +95,9 @@ static int exynos_drm_connector_get_modes(struct drm_connector *connector)  					to_exynos_connector(connector);  	struct exynos_drm_manager *manager = exynos_connector->manager;  	struct exynos_drm_display_ops *display_ops = manager->display_ops; -	unsigned int count; +	struct edid *edid = NULL; +	unsigned int count = 0; +	int ret;  	DRM_DEBUG_KMS("%s\n", __FILE__); @@ -114,27 +115,21 @@ static int exynos_drm_connector_get_modes(struct drm_connector *connector)  	 * because lcd panel has only one mode.  	 */  	if (display_ops->get_edid) { -		int ret; -		void *edid; - -		edid = kzalloc(MAX_EDID, GFP_KERNEL); -		if (!edid) { -			DRM_ERROR("failed to allocate edid\n"); -			return 0; +		edid = display_ops->get_edid(manager->dev, connector); +		if (IS_ERR_OR_NULL(edid)) { +			ret = PTR_ERR(edid); +			edid = NULL; +			DRM_ERROR("Panel operation get_edid failed %d\n", ret); +			goto out;  		} -		ret = display_ops->get_edid(manager->dev, connector, -						edid, MAX_EDID); -		if (ret < 0) { -			DRM_ERROR("failed to get edid data.\n"); -			kfree(edid); -			edid = NULL; -			return 0; +		count = drm_add_edid_modes(connector, edid); +		if (count < 0) { +			DRM_ERROR("Add edid modes failed %d\n", count); +			goto out;  		}  		drm_mode_connector_update_edid_property(connector, edid); -		count = drm_add_edid_modes(connector, edid); -		kfree(edid);  	} else {  		struct exynos_drm_panel_info *panel;  		struct drm_display_mode *mode = drm_mode_create(connector->dev); @@ -161,6 +156,8 @@ static int exynos_drm_connector_get_modes(struct drm_connector *connector)  		count = 1;  	} +out: +	kfree(edid);  	return count;  } diff --git a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c index 9df97714b6c..ba0a3aa7854 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c +++ b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c @@ -19,6 +19,7 @@  struct exynos_drm_dmabuf_attachment {  	struct sg_table sgt;  	enum dma_data_direction dir; +	bool is_mapped;  };  static int exynos_gem_attach_dma_buf(struct dma_buf *dmabuf, @@ -72,17 +73,10 @@ static struct sg_table *  	DRM_DEBUG_PRIME("%s\n", __FILE__); -	if (WARN_ON(dir == DMA_NONE)) -		return ERR_PTR(-EINVAL); -  	/* just return current sgt if already requested. */ -	if (exynos_attach->dir == dir) +	if (exynos_attach->dir == dir && exynos_attach->is_mapped)  		return &exynos_attach->sgt; -	/* reattaching is not allowed. */ -	if (WARN_ON(exynos_attach->dir != DMA_NONE)) -		return ERR_PTR(-EBUSY); -  	buf = gem_obj->buffer;  	if (!buf) {  		DRM_ERROR("buffer is null.\n"); @@ -107,13 +101,17 @@ static struct sg_table *  		wr = sg_next(wr);  	} -	nents = dma_map_sg(attach->dev, sgt->sgl, sgt->orig_nents, dir); -	if (!nents) { -		DRM_ERROR("failed to map sgl with iommu.\n"); -		sgt = ERR_PTR(-EIO); -		goto err_unlock; +	if (dir != DMA_NONE) { +		nents = dma_map_sg(attach->dev, sgt->sgl, sgt->orig_nents, dir); +		if (!nents) { +			DRM_ERROR("failed to map sgl with iommu.\n"); +			sg_free_table(sgt); +			sgt = ERR_PTR(-EIO); +			goto err_unlock; +		}  	} +	exynos_attach->is_mapped = true;  	exynos_attach->dir = dir;  	attach->priv = exynos_attach; diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h index b9e51bc09e8..4606fac7241 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_drv.h +++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h @@ -148,8 +148,8 @@ struct exynos_drm_overlay {  struct exynos_drm_display_ops {  	enum exynos_drm_output_type type;  	bool (*is_connected)(struct device *dev); -	int (*get_edid)(struct device *dev, struct drm_connector *connector, -				u8 *edid, int len); +	struct edid *(*get_edid)(struct device *dev, +			struct drm_connector *connector);  	void *(*get_panel)(struct device *dev);  	int (*check_timing)(struct device *dev, void *timing);  	int (*power_on)(struct device *dev, int mode); diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c index 36c3905536a..9a4c08e7453 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c +++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c @@ -324,7 +324,7 @@ out:  	g2d_userptr = NULL;  } -dma_addr_t *g2d_userptr_get_dma_addr(struct drm_device *drm_dev, +static dma_addr_t *g2d_userptr_get_dma_addr(struct drm_device *drm_dev,  					unsigned long userptr,  					unsigned long size,  					struct drm_file *filp, diff --git a/drivers/gpu/drm/exynos/exynos_drm_hdmi.c b/drivers/gpu/drm/exynos/exynos_drm_hdmi.c index 850e9950b7d..28644539b30 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_hdmi.c +++ b/drivers/gpu/drm/exynos/exynos_drm_hdmi.c @@ -108,18 +108,17 @@ static bool drm_hdmi_is_connected(struct device *dev)  	return false;  } -static int drm_hdmi_get_edid(struct device *dev, -		struct drm_connector *connector, u8 *edid, int len) +static struct edid *drm_hdmi_get_edid(struct device *dev, +			struct drm_connector *connector)  {  	struct drm_hdmi_context *ctx = to_context(dev);  	DRM_DEBUG_KMS("%s\n", __FILE__);  	if (hdmi_ops && hdmi_ops->get_edid) -		return hdmi_ops->get_edid(ctx->hdmi_ctx->ctx, connector, edid, -					  len); +		return hdmi_ops->get_edid(ctx->hdmi_ctx->ctx, connector); -	return 0; +	return NULL;  }  static int drm_hdmi_check_timing(struct device *dev, void *timing) diff --git a/drivers/gpu/drm/exynos/exynos_drm_hdmi.h b/drivers/gpu/drm/exynos/exynos_drm_hdmi.h index 784a7e9a766..d80516fc9ed 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_hdmi.h +++ b/drivers/gpu/drm/exynos/exynos_drm_hdmi.h @@ -30,8 +30,8 @@ struct exynos_drm_hdmi_context {  struct exynos_hdmi_ops {  	/* display */  	bool (*is_connected)(void *ctx); -	int (*get_edid)(void *ctx, struct drm_connector *connector, -			u8 *edid, int len); +	struct edid *(*get_edid)(void *ctx, +			struct drm_connector *connector);  	int (*check_timing)(void *ctx, void *timing);  	int (*power_on)(void *ctx, int mode); diff --git a/drivers/gpu/drm/exynos/exynos_drm_ipp.c b/drivers/gpu/drm/exynos/exynos_drm_ipp.c index 0bda96454a0..1a556354e92 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_ipp.c +++ b/drivers/gpu/drm/exynos/exynos_drm_ipp.c @@ -869,7 +869,7 @@ static void ipp_put_event(struct drm_exynos_ipp_cmd_node *c_node,  	}  } -void ipp_handle_cmd_work(struct device *dev, +static void ipp_handle_cmd_work(struct device *dev,  		struct exynos_drm_ippdrv *ippdrv,  		struct drm_exynos_ipp_cmd_work *cmd_work,  		struct drm_exynos_ipp_cmd_node *c_node) diff --git a/drivers/gpu/drm/exynos/exynos_drm_rotator.c b/drivers/gpu/drm/exynos/exynos_drm_rotator.c index e9e83ef688f..f976e29def6 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_rotator.c +++ b/drivers/gpu/drm/exynos/exynos_drm_rotator.c @@ -734,7 +734,7 @@ static int rotator_remove(struct platform_device *pdev)  	return 0;  } -struct rot_limit_table rot_limit_tbl = { +static struct rot_limit_table rot_limit_tbl = {  	.ycbcr420_2p = {  		.min_w = 32,  		.min_h = 32, @@ -751,7 +751,7 @@ struct rot_limit_table rot_limit_tbl = {  	},  }; -struct platform_device_id rotator_driver_ids[] = { +static struct platform_device_id rotator_driver_ids[] = {  	{  		.name		= "exynos-rot",  		.driver_data	= (unsigned long)&rot_limit_tbl, diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c index d0ca3c4e06c..13ccbd4bcfa 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c +++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c @@ -98,10 +98,12 @@ static bool vidi_display_is_connected(struct device *dev)  	return ctx->connected ? true : false;  } -static int vidi_get_edid(struct device *dev, struct drm_connector *connector, -				u8 *edid, int len) +static struct edid *vidi_get_edid(struct device *dev, +			struct drm_connector *connector)  {  	struct vidi_context *ctx = get_vidi_context(dev); +	struct edid *edid; +	int edid_len;  	DRM_DEBUG_KMS("%s\n", __FILE__); @@ -111,13 +113,18 @@ static int vidi_get_edid(struct device *dev, struct drm_connector *connector,  	 */  	if (!ctx->raw_edid) {  		DRM_DEBUG_KMS("raw_edid is null.\n"); -		return -EFAULT; +		return ERR_PTR(-EFAULT);  	} -	memcpy(edid, ctx->raw_edid, min((1 + ctx->raw_edid->extensions) -					* EDID_LENGTH, len)); +	edid_len = (1 + ctx->raw_edid->extensions) * EDID_LENGTH; +	edid = kzalloc(edid_len, GFP_KERNEL); +	if (!edid) { +		DRM_DEBUG_KMS("failed to allocate edid\n"); +		return ERR_PTR(-ENOMEM); +	} -	return 0; +	memcpy(edid, ctx->raw_edid, edid_len); +	return edid;  }  static void *vidi_get_panel(struct device *dev) @@ -514,7 +521,6 @@ int vidi_connection_ioctl(struct drm_device *drm_dev, void *data,  	struct exynos_drm_manager *manager;  	struct exynos_drm_display_ops *display_ops;  	struct drm_exynos_vidi_connection *vidi = data; -	struct edid *raw_edid;  	int edid_len;  	DRM_DEBUG_KMS("%s\n", __FILE__); @@ -551,11 +557,11 @@ int vidi_connection_ioctl(struct drm_device *drm_dev, void *data,  	}  	if (vidi->connection) { -		if (!vidi->edid) { -			DRM_DEBUG_KMS("edid data is null.\n"); +		struct edid *raw_edid  = (struct edid *)(uint32_t)vidi->edid; +		if (!drm_edid_is_valid(raw_edid)) { +			DRM_DEBUG_KMS("edid data is invalid.\n");  			return -EINVAL;  		} -		raw_edid = (struct edid *)(uint32_t)vidi->edid;  		edid_len = (1 + raw_edid->extensions) * EDID_LENGTH;  		ctx->raw_edid = kzalloc(edid_len, GFP_KERNEL);  		if (!ctx->raw_edid) { diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c index 41ff79d8ac8..fbab3c46860 100644 --- a/drivers/gpu/drm/exynos/exynos_hdmi.c +++ b/drivers/gpu/drm/exynos/exynos_hdmi.c @@ -34,7 +34,6 @@  #include <linux/regulator/consumer.h>  #include <linux/io.h>  #include <linux/of_gpio.h> -#include <plat/gpio-cfg.h>  #include <drm/exynos_drm.h> @@ -98,8 +97,7 @@ struct hdmi_context {  	void __iomem			*regs;  	void				*parent_ctx; -	int				external_irq; -	int				internal_irq; +	int				irq;  	struct i2c_client		*ddc_port;  	struct i2c_client		*hdmiphy_port; @@ -1391,8 +1389,7 @@ static bool hdmi_is_connected(void *ctx)  	return hdata->hpd;  } -static int hdmi_get_edid(void *ctx, struct drm_connector *connector, -				u8 *edid, int len) +static struct edid *hdmi_get_edid(void *ctx, struct drm_connector *connector)  {  	struct edid *raw_edid;  	struct hdmi_context *hdata = ctx; @@ -1400,22 +1397,18 @@ static int hdmi_get_edid(void *ctx, struct drm_connector *connector,  	DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);  	if (!hdata->ddc_port) -		return -ENODEV; +		return ERR_PTR(-ENODEV);  	raw_edid = drm_get_edid(connector, hdata->ddc_port->adapter); -	if (raw_edid) { -		hdata->dvi_mode = !drm_detect_hdmi_monitor(raw_edid); -		memcpy(edid, raw_edid, min((1 + raw_edid->extensions) -					* EDID_LENGTH, len)); -		DRM_DEBUG_KMS("%s : width[%d] x height[%d]\n", -			(hdata->dvi_mode ? "dvi monitor" : "hdmi monitor"), -			raw_edid->width_cm, raw_edid->height_cm); -		kfree(raw_edid); -	} else { -		return -ENODEV; -	} +	if (!raw_edid) +		return ERR_PTR(-ENODEV); -	return 0; +	hdata->dvi_mode = !drm_detect_hdmi_monitor(raw_edid); +	DRM_DEBUG_KMS("%s : width[%d] x height[%d]\n", +		(hdata->dvi_mode ? "dvi monitor" : "hdmi monitor"), +		raw_edid->width_cm, raw_edid->height_cm); + +	return raw_edid;  }  static int hdmi_v13_check_timing(struct fb_videomode *check_timing) @@ -1652,16 +1645,16 @@ static void hdmi_conf_reset(struct hdmi_context *hdata)  	/* resetting HDMI core */  	hdmi_reg_writemask(hdata, reg,  0, HDMI_CORE_SW_RSTOUT); -	mdelay(10); +	usleep_range(10000, 12000);  	hdmi_reg_writemask(hdata, reg, ~0, HDMI_CORE_SW_RSTOUT); -	mdelay(10); +	usleep_range(10000, 12000);  }  static void hdmi_conf_init(struct hdmi_context *hdata)  {  	struct hdmi_infoframe infoframe; -	/* disable HPD interrupts */ +	/* disable HPD interrupts from HDMI IP block, use GPIO instead */  	hdmi_reg_writemask(hdata, HDMI_INTC_CON, 0, HDMI_INTC_EN_GLOBAL |  		HDMI_INTC_EN_HPD_PLUG | HDMI_INTC_EN_HPD_UNPLUG); @@ -1779,7 +1772,7 @@ static void hdmi_v13_timing_apply(struct hdmi_context *hdata)  		u32 val = hdmi_reg_read(hdata, HDMI_V13_PHY_STATUS);  		if (val & HDMI_PHY_STATUS_READY)  			break; -		mdelay(1); +		usleep_range(1000, 2000);  	}  	/* steady state not achieved */  	if (tries == 0) { @@ -1946,7 +1939,7 @@ static void hdmi_v14_timing_apply(struct hdmi_context *hdata)  		u32 val = hdmi_reg_read(hdata, HDMI_PHY_STATUS_0);  		if (val & HDMI_PHY_STATUS_READY)  			break; -		mdelay(1); +		usleep_range(1000, 2000);  	}  	/* steady state not achieved */  	if (tries == 0) { @@ -1998,9 +1991,9 @@ static void hdmiphy_conf_reset(struct hdmi_context *hdata)  	/* reset hdmiphy */  	hdmi_reg_writemask(hdata, reg, ~0, HDMI_PHY_SW_RSTOUT); -	mdelay(10); +	usleep_range(10000, 12000);  	hdmi_reg_writemask(hdata, reg,  0, HDMI_PHY_SW_RSTOUT); -	mdelay(10); +	usleep_range(10000, 12000);  }  static void hdmiphy_poweron(struct hdmi_context *hdata) @@ -2048,7 +2041,7 @@ static void hdmiphy_conf_apply(struct hdmi_context *hdata)  		return;  	} -	mdelay(10); +	usleep_range(10000, 12000);  	/* operation mode */  	operation[0] = 0x1f; @@ -2170,6 +2163,13 @@ static void hdmi_commit(void *ctx)  	DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__); +	mutex_lock(&hdata->hdmi_mutex); +	if (!hdata->powered) { +		mutex_unlock(&hdata->hdmi_mutex); +		return; +	} +	mutex_unlock(&hdata->hdmi_mutex); +  	hdmi_conf_apply(hdata);  } @@ -2265,7 +2265,7 @@ static struct exynos_hdmi_ops hdmi_ops = {  	.dpms		= hdmi_dpms,  }; -static irqreturn_t hdmi_external_irq_thread(int irq, void *arg) +static irqreturn_t hdmi_irq_thread(int irq, void *arg)  {  	struct exynos_drm_hdmi_context *ctx = arg;  	struct hdmi_context *hdata = ctx->ctx; @@ -2280,31 +2280,6 @@ static irqreturn_t hdmi_external_irq_thread(int irq, void *arg)  	return IRQ_HANDLED;  } -static irqreturn_t hdmi_internal_irq_thread(int irq, void *arg) -{ -	struct exynos_drm_hdmi_context *ctx = arg; -	struct hdmi_context *hdata = ctx->ctx; -	u32 intc_flag; - -	intc_flag = hdmi_reg_read(hdata, HDMI_INTC_FLAG); -	/* clearing flags for HPD plug/unplug */ -	if (intc_flag & HDMI_INTC_FLAG_HPD_UNPLUG) { -		DRM_DEBUG_KMS("unplugged\n"); -		hdmi_reg_writemask(hdata, HDMI_INTC_FLAG, ~0, -			HDMI_INTC_FLAG_HPD_UNPLUG); -	} -	if (intc_flag & HDMI_INTC_FLAG_HPD_PLUG) { -		DRM_DEBUG_KMS("plugged\n"); -		hdmi_reg_writemask(hdata, HDMI_INTC_FLAG, ~0, -			HDMI_INTC_FLAG_HPD_PLUG); -	} - -	if (ctx->drm_dev) -		drm_helper_hpd_irq_event(ctx->drm_dev); - -	return IRQ_HANDLED; -} -  static int hdmi_resources_init(struct hdmi_context *hdata)  {  	struct device *dev = hdata->dev; @@ -2555,39 +2530,24 @@ static int hdmi_probe(struct platform_device *pdev)  	hdata->hdmiphy_port = hdmi_hdmiphy; -	hdata->external_irq = gpio_to_irq(hdata->hpd_gpio); -	if (hdata->external_irq < 0) { -		DRM_ERROR("failed to get GPIO external irq\n"); -		ret = hdata->external_irq; -		goto err_hdmiphy; -	} - -	hdata->internal_irq = platform_get_irq(pdev, 0); -	if (hdata->internal_irq < 0) { -		DRM_ERROR("failed to get platform internal irq\n"); -		ret = hdata->internal_irq; +	hdata->irq = gpio_to_irq(hdata->hpd_gpio); +	if (hdata->irq < 0) { +		DRM_ERROR("failed to get GPIO irq\n"); +		ret = hdata->irq;  		goto err_hdmiphy;  	}  	hdata->hpd = gpio_get_value(hdata->hpd_gpio); -	ret = request_threaded_irq(hdata->external_irq, NULL, -			hdmi_external_irq_thread, IRQF_TRIGGER_RISING | +	ret = request_threaded_irq(hdata->irq, NULL, +			hdmi_irq_thread, IRQF_TRIGGER_RISING |  			IRQF_TRIGGER_FALLING | IRQF_ONESHOT, -			"hdmi_external", drm_hdmi_ctx); +			"hdmi", drm_hdmi_ctx);  	if (ret) { -		DRM_ERROR("failed to register hdmi external interrupt\n"); +		DRM_ERROR("failed to register hdmi interrupt\n");  		goto err_hdmiphy;  	} -	ret = request_threaded_irq(hdata->internal_irq, NULL, -			hdmi_internal_irq_thread, IRQF_ONESHOT, -			"hdmi_internal", drm_hdmi_ctx); -	if (ret) { -		DRM_ERROR("failed to register hdmi internal interrupt\n"); -		goto err_free_irq; -	} -  	/* Attach HDMI Driver to common hdmi. */  	exynos_hdmi_drv_attach(drm_hdmi_ctx); @@ -2598,8 +2558,6 @@ static int hdmi_probe(struct platform_device *pdev)  	return 0; -err_free_irq: -	free_irq(hdata->external_irq, drm_hdmi_ctx);  err_hdmiphy:  	i2c_del_driver(&hdmiphy_driver);  err_ddc: @@ -2617,8 +2575,7 @@ static int hdmi_remove(struct platform_device *pdev)  	pm_runtime_disable(dev); -	free_irq(hdata->internal_irq, hdata); -	free_irq(hdata->external_irq, hdata); +	free_irq(hdata->irq, hdata);  	/* hdmiphy i2c driver */ @@ -2637,8 +2594,7 @@ static int hdmi_suspend(struct device *dev)  	DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__); -	disable_irq(hdata->internal_irq); -	disable_irq(hdata->external_irq); +	disable_irq(hdata->irq);  	hdata->hpd = false;  	if (ctx->drm_dev) @@ -2663,8 +2619,7 @@ static int hdmi_resume(struct device *dev)  	hdata->hpd = gpio_get_value(hdata->hpd_gpio); -	enable_irq(hdata->external_irq); -	enable_irq(hdata->internal_irq); +	enable_irq(hdata->irq);  	if (!pm_runtime_suspended(dev)) {  		DRM_DEBUG_KMS("%s : Already resumed\n", __func__); diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c index c187ea33b74..c414584bfba 100644 --- a/drivers/gpu/drm/exynos/exynos_mixer.c +++ b/drivers/gpu/drm/exynos/exynos_mixer.c @@ -600,7 +600,7 @@ static void vp_win_reset(struct mixer_context *ctx)  		/* waiting until VP_SRESET_PROCESSING is 0 */  		if (~vp_reg_read(res, VP_SRESET) & VP_SRESET_PROCESSING)  			break; -		mdelay(10); +		usleep_range(10000, 12000);  	}  	WARN(tries == 0, "failed to reset Video Processor\n");  } @@ -776,6 +776,13 @@ static void mixer_win_commit(void *ctx, int win)  	DRM_DEBUG_KMS("[%d] %s, win: %d\n", __LINE__, __func__, win); +	mutex_lock(&mixer_ctx->mixer_mutex); +	if (!mixer_ctx->powered) { +		mutex_unlock(&mixer_ctx->mixer_mutex); +		return; +	} +	mutex_unlock(&mixer_ctx->mixer_mutex); +  	if (win > 1 && mixer_ctx->vp_enabled)  		vp_video_buffer(mixer_ctx, win);  	else diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index e6a11ca85ea..9d4a2c2adf0 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -30,6 +30,7 @@  #include <linux/debugfs.h>  #include <linux/slab.h>  #include <linux/export.h> +#include <generated/utsrelease.h>  #include <drm/drmP.h>  #include "intel_drv.h"  #include "intel_ringbuffer.h" @@ -641,6 +642,7 @@ static void i915_ring_error_state(struct seq_file *m,  	seq_printf(m, "%s command stream:\n", ring_str(ring));  	seq_printf(m, "  HEAD: 0x%08x\n", error->head[ring]);  	seq_printf(m, "  TAIL: 0x%08x\n", error->tail[ring]); +	seq_printf(m, "  CTL: 0x%08x\n", error->ctl[ring]);  	seq_printf(m, "  ACTHD: 0x%08x\n", error->acthd[ring]);  	seq_printf(m, "  IPEIR: 0x%08x\n", error->ipeir[ring]);  	seq_printf(m, "  IPEHR: 0x%08x\n", error->ipehr[ring]); @@ -689,10 +691,13 @@ static int i915_error_state(struct seq_file *m, void *unused)  	seq_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec,  		   error->time.tv_usec); +	seq_printf(m, "Kernel: " UTS_RELEASE);  	seq_printf(m, "PCI ID: 0x%04x\n", dev->pci_device);  	seq_printf(m, "EIR: 0x%08x\n", error->eir);  	seq_printf(m, "IER: 0x%08x\n", error->ier);  	seq_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er); +	seq_printf(m, "FORCEWAKE: 0x%08x\n", error->forcewake); +	seq_printf(m, "DERRMR: 0x%08x\n", error->derrmr);  	seq_printf(m, "CCID: 0x%08x\n", error->ccid);  	for (i = 0; i < dev_priv->num_fence_regs; i++) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index ed305957557..12ab3bdea54 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -188,10 +188,13 @@ struct drm_i915_error_state {  	u32 pgtbl_er;  	u32 ier;  	u32 ccid; +	u32 derrmr; +	u32 forcewake;  	bool waiting[I915_NUM_RINGS];  	u32 pipestat[I915_MAX_PIPES];  	u32 tail[I915_NUM_RINGS];  	u32 head[I915_NUM_RINGS]; +	u32 ctl[I915_NUM_RINGS];  	u32 ipeir[I915_NUM_RINGS];  	u32 ipehr[I915_NUM_RINGS];  	u32 instdone[I915_NUM_RINGS]; diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index d6a994a0739..26d08bb5821 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -539,6 +539,8 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,  	total = 0;  	for (i = 0; i < count; i++) {  		struct drm_i915_gem_relocation_entry __user *user_relocs; +		u64 invalid_offset = (u64)-1; +		int j;  		user_relocs = (void __user *)(uintptr_t)exec[i].relocs_ptr; @@ -549,6 +551,25 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,  			goto err;  		} +		/* As we do not update the known relocation offsets after +		 * relocating (due to the complexities in lock handling), +		 * we need to mark them as invalid now so that we force the +		 * relocation processing next time. Just in case the target +		 * object is evicted and then rebound into its old +		 * presumed_offset before the next execbuffer - if that +		 * happened we would make the mistake of assuming that the +		 * relocations were valid. +		 */ +		for (j = 0; j < exec[i].relocation_count; j++) { +			if (copy_to_user(&user_relocs[j].presumed_offset, +					 &invalid_offset, +					 sizeof(invalid_offset))) { +				ret = -EFAULT; +				mutex_lock(&dev->struct_mutex); +				goto err; +			} +		} +  		reloc_offset[i] = total;  		total += exec[i].relocation_count;  	} diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 2220dec3e5d..fe843389c7b 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -1157,6 +1157,7 @@ static void i915_record_ring_state(struct drm_device *dev,  	error->acthd[ring->id] = intel_ring_get_active_head(ring);  	error->head[ring->id] = I915_READ_HEAD(ring);  	error->tail[ring->id] = I915_READ_TAIL(ring); +	error->ctl[ring->id] = I915_READ_CTL(ring);  	error->cpu_ring_head[ring->id] = ring->head;  	error->cpu_ring_tail[ring->id] = ring->tail; @@ -1251,6 +1252,16 @@ static void i915_capture_error_state(struct drm_device *dev)  	else  		error->ier = I915_READ(IER); +	if (INTEL_INFO(dev)->gen >= 6) +		error->derrmr = I915_READ(DERRMR); + +	if (IS_VALLEYVIEW(dev)) +		error->forcewake = I915_READ(FORCEWAKE_VLV); +	else if (INTEL_INFO(dev)->gen >= 7) +		error->forcewake = I915_READ(FORCEWAKE_MT); +	else if (INTEL_INFO(dev)->gen == 6) +		error->forcewake = I915_READ(FORCEWAKE); +  	for_each_pipe(pipe)  		error->pipestat[pipe] = I915_READ(PIPESTAT(pipe)); diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 186ee5c85b5..59afb7eb6db 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -512,6 +512,8 @@  #define GEN7_ERR_INT	0x44040  #define   ERR_INT_MMIO_UNCLAIMED (1<<13) +#define DERRMR		0x44050 +  /* GM45+ chicken bits -- debug workaround bits that may be required   * for various sorts of correct behavior.  The top 16 bits of each are   * the enables for writing to the corresponding low bit. @@ -531,6 +533,7 @@  #define MI_MODE		0x0209c  # define VS_TIMER_DISPATCH				(1 << 6)  # define MI_FLUSH_ENABLE				(1 << 12) +# define ASYNC_FLIP_PERF_DISABLE			(1 << 14)  #define GEN6_GT_MODE	0x20d0  #define   GEN6_GT_MODE_HI				(1 << 9) diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 1b63d55318a..fb3715b4b09 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -2579,7 +2579,8 @@ intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connect  static void  intel_dp_init_panel_power_sequencer(struct drm_device *dev, -				    struct intel_dp *intel_dp) +				    struct intel_dp *intel_dp, +				    struct edp_power_seq *out)  {  	struct drm_i915_private *dev_priv = dev->dev_private;  	struct edp_power_seq cur, vbt, spec, final; @@ -2650,16 +2651,35 @@ intel_dp_init_panel_power_sequencer(struct drm_device *dev,  	intel_dp->panel_power_cycle_delay = get_delay(t11_t12);  #undef get_delay +	DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n", +		      intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay, +		      intel_dp->panel_power_cycle_delay); + +	DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n", +		      intel_dp->backlight_on_delay, intel_dp->backlight_off_delay); + +	if (out) +		*out = final; +} + +static void +intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev, +					      struct intel_dp *intel_dp, +					      struct edp_power_seq *seq) +{ +	struct drm_i915_private *dev_priv = dev->dev_private; +	u32 pp_on, pp_off, pp_div; +  	/* And finally store the new values in the power sequencer. */ -	pp_on = (final.t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) | -		(final.t8 << PANEL_LIGHT_ON_DELAY_SHIFT); -	pp_off = (final.t9 << PANEL_LIGHT_OFF_DELAY_SHIFT) | -		 (final.t10 << PANEL_POWER_DOWN_DELAY_SHIFT); +	pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) | +		(seq->t8 << PANEL_LIGHT_ON_DELAY_SHIFT); +	pp_off = (seq->t9 << PANEL_LIGHT_OFF_DELAY_SHIFT) | +		 (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);  	/* Compute the divisor for the pp clock, simply match the Bspec  	 * formula. */  	pp_div = ((100 * intel_pch_rawclk(dev))/2 - 1)  			<< PP_REFERENCE_DIVIDER_SHIFT; -	pp_div |= (DIV_ROUND_UP(final.t11_t12, 1000) +	pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)  			<< PANEL_POWER_CYCLE_DELAY_SHIFT);  	/* Haswell doesn't have any port selection bits for the panel @@ -2675,14 +2695,6 @@ intel_dp_init_panel_power_sequencer(struct drm_device *dev,  	I915_WRITE(PCH_PP_OFF_DELAYS, pp_off);  	I915_WRITE(PCH_PP_DIVISOR, pp_div); - -	DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n", -		      intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay, -		      intel_dp->panel_power_cycle_delay); - -	DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n", -		      intel_dp->backlight_on_delay, intel_dp->backlight_off_delay); -  	DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",  		      I915_READ(PCH_PP_ON_DELAYS),  		      I915_READ(PCH_PP_OFF_DELAYS), @@ -2699,6 +2711,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,  	struct drm_device *dev = intel_encoder->base.dev;  	struct drm_i915_private *dev_priv = dev->dev_private;  	struct drm_display_mode *fixed_mode = NULL; +	struct edp_power_seq power_seq = { 0 };  	enum port port = intel_dig_port->port;  	const char *name = NULL;  	int type; @@ -2771,7 +2784,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,  	}  	if (is_edp(intel_dp)) -		intel_dp_init_panel_power_sequencer(dev, intel_dp); +		intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq);  	intel_dp_i2c_init(intel_dp, intel_connector, name); @@ -2798,6 +2811,10 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,  			return;  		} +		/* We now know it's not a ghost, init power sequence regs. */ +		intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, +							      &power_seq); +  		ironlake_edp_panel_vdd_on(intel_dp);  		edid = drm_get_edid(connector, &intel_dp->adapter);  		if (edid) { diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index e83a1179417..3280cffe50f 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -4250,7 +4250,8 @@ static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)  static void __gen6_gt_force_wake_mt_reset(struct drm_i915_private *dev_priv)  {  	I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_DISABLE(0xffff)); -	POSTING_READ(ECOBUS); /* something from same cacheline, but !FORCEWAKE */ +	/* something from same cacheline, but !FORCEWAKE_MT */ +	POSTING_READ(ECOBUS);  }  static void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv) @@ -4267,7 +4268,8 @@ static void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv)  		DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");  	I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL)); -	POSTING_READ(ECOBUS); /* something from same cacheline, but !FORCEWAKE */ +	/* something from same cacheline, but !FORCEWAKE_MT */ +	POSTING_READ(ECOBUS);  	if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & 1),  			    FORCEWAKE_ACK_TIMEOUT_MS)) @@ -4304,14 +4306,16 @@ void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv)  static void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)  {  	I915_WRITE_NOTRACE(FORCEWAKE, 0); -	/* gen6_gt_check_fifodbg doubles as the POSTING_READ */ +	/* something from same cacheline, but !FORCEWAKE */ +	POSTING_READ(ECOBUS);  	gen6_gt_check_fifodbg(dev_priv);  }  static void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv)  {  	I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL)); -	/* gen6_gt_check_fifodbg doubles as the POSTING_READ */ +	/* something from same cacheline, but !FORCEWAKE_MT */ +	POSTING_READ(ECOBUS);  	gen6_gt_check_fifodbg(dev_priv);  } @@ -4351,6 +4355,8 @@ int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)  static void vlv_force_wake_reset(struct drm_i915_private *dev_priv)  {  	I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_DISABLE(0xffff)); +	/* something from same cacheline, but !FORCEWAKE_VLV */ +	POSTING_READ(FORCEWAKE_ACK_VLV);  }  static void vlv_force_wake_get(struct drm_i915_private *dev_priv) @@ -4371,7 +4377,8 @@ static void vlv_force_wake_get(struct drm_i915_private *dev_priv)  static void vlv_force_wake_put(struct drm_i915_private *dev_priv)  {  	I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL)); -	/* The below doubles as a POSTING_READ */ +	/* something from same cacheline, but !FORCEWAKE_VLV */ +	POSTING_READ(FORCEWAKE_ACK_VLV);  	gen6_gt_check_fifodbg(dev_priv);  } diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index ae253e04c39..42ff97d667d 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -505,13 +505,25 @@ static int init_render_ring(struct intel_ring_buffer *ring)  	struct drm_i915_private *dev_priv = dev->dev_private;  	int ret = init_ring_common(ring); -	if (INTEL_INFO(dev)->gen > 3) { +	if (INTEL_INFO(dev)->gen > 3)  		I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH)); -		if (IS_GEN7(dev)) -			I915_WRITE(GFX_MODE_GEN7, -				   _MASKED_BIT_DISABLE(GFX_TLB_INVALIDATE_ALWAYS) | -				   _MASKED_BIT_ENABLE(GFX_REPLAY_MODE)); -	} + +	/* We need to disable the AsyncFlip performance optimisations in order +	 * to use MI_WAIT_FOR_EVENT within the CS. It should already be +	 * programmed to '1' on all products. +	 */ +	if (INTEL_INFO(dev)->gen >= 6) +		I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE)); + +	/* Required for the hardware to program scanline values for waiting */ +	if (INTEL_INFO(dev)->gen == 6) +		I915_WRITE(GFX_MODE, +			   _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_ALWAYS)); + +	if (IS_GEN7(dev)) +		I915_WRITE(GFX_MODE_GEN7, +			   _MASKED_BIT_DISABLE(GFX_TLB_INVALIDATE_ALWAYS) | +			   _MASKED_BIT_ENABLE(GFX_REPLAY_MODE));  	if (INTEL_INFO(dev)->gen >= 5) {  		ret = init_pipe_control(ring); diff --git a/drivers/gpu/drm/nouveau/core/core/falcon.c b/drivers/gpu/drm/nouveau/core/core/falcon.c index 6b0843c3387..e05c1577758 100644 --- a/drivers/gpu/drm/nouveau/core/core/falcon.c +++ b/drivers/gpu/drm/nouveau/core/core/falcon.c @@ -73,8 +73,11 @@ _nouveau_falcon_init(struct nouveau_object *object)  	nv_debug(falcon, "data limit: %d\n", falcon->data.limit);  	/* wait for 'uc halted' to be signalled before continuing */ -	if (falcon->secret) { -		nv_wait(falcon, 0x008, 0x00000010, 0x00000010); +	if (falcon->secret && falcon->version < 4) { +		if (!falcon->version) +			nv_wait(falcon, 0x008, 0x00000010, 0x00000010); +		else +			nv_wait(falcon, 0x180, 0x80000000, 0);  		nv_wo32(falcon, 0x004, 0x00000010);  	} diff --git a/drivers/gpu/drm/nouveau/core/core/subdev.c b/drivers/gpu/drm/nouveau/core/core/subdev.c index f74c30aa33a..48f06378d3f 100644 --- a/drivers/gpu/drm/nouveau/core/core/subdev.c +++ b/drivers/gpu/drm/nouveau/core/core/subdev.c @@ -99,7 +99,7 @@ nouveau_subdev_create_(struct nouveau_object *parent,  	if (ret)  		return ret; -	mutex_init(&subdev->mutex); +	__mutex_init(&subdev->mutex, subname, &oclass->lock_class_key);  	subdev->name = subname;  	if (parent) { diff --git a/drivers/gpu/drm/nouveau/core/include/core/object.h b/drivers/gpu/drm/nouveau/core/include/core/object.h index 5982935ee23..106bb19fdd9 100644 --- a/drivers/gpu/drm/nouveau/core/include/core/object.h +++ b/drivers/gpu/drm/nouveau/core/include/core/object.h @@ -50,10 +50,13 @@ int  nouveau_object_fini(struct nouveau_object *, bool suspend);  extern struct nouveau_ofuncs nouveau_object_ofuncs; +/* Don't allocate dynamically, because lockdep needs lock_class_keys to be in + * ".data". */  struct nouveau_oclass {  	u32 handle; -	struct nouveau_ofuncs *ofuncs; -	struct nouveau_omthds *omthds; +	struct nouveau_ofuncs * const ofuncs; +	struct nouveau_omthds * const omthds; +	struct lock_class_key lock_class_key;  };  #define nv_oclass(o)    nv_object(o)->oclass diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/base.c b/drivers/gpu/drm/nouveau/core/subdev/fb/base.c index d6d16007ec1..d62045f454b 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/fb/base.c +++ b/drivers/gpu/drm/nouveau/core/subdev/fb/base.c @@ -86,8 +86,8 @@ nouveau_fb_preinit(struct nouveau_fb *pfb)  			return ret;  	} -	if (!nouveau_mm_initialised(&pfb->tags) && tags) { -		ret = nouveau_mm_init(&pfb->tags, 0, ++tags, 1); +	if (!nouveau_mm_initialised(&pfb->tags)) { +		ret = nouveau_mm_init(&pfb->tags, 0, tags ? ++tags : 0, 1);  		if (ret)  			return ret;  	} diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c index 487cb8c6c20..eac236ed19b 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c +++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c @@ -99,7 +99,7 @@ nv50_fb_vram_init(struct nouveau_fb *pfb)  	struct nouveau_bios *bios = nouveau_bios(device);  	const u32 rsvd_head = ( 256 * 1024) >> 12; /* vga memory */  	const u32 rsvd_tail = (1024 * 1024) >> 12; /* vbios etc */ -	u32 size; +	u32 size, tags = 0;  	int ret;  	pfb->ram.size = nv_rd32(pfb, 0x10020c); @@ -140,10 +140,11 @@ nv50_fb_vram_init(struct nouveau_fb *pfb)  			return ret;  		pfb->ram.ranks = (nv_rd32(pfb, 0x100200) & 0x4) ? 2 : 1; +		tags = nv_rd32(pfb, 0x100320);  		break;  	} -	return nv_rd32(pfb, 0x100320); +	return tags;  }  static int diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index 69d7b1d0b9d..1699a9083a2 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -28,6 +28,7 @@   */  #include <core/engine.h> +#include <linux/swiotlb.h>  #include <subdev/fb.h>  #include <subdev/vm.h> diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c index 8b090f1eb51..5e7aef23825 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drm.c +++ b/drivers/gpu/drm/nouveau/nouveau_drm.c @@ -245,6 +245,8 @@ static int nouveau_drm_probe(struct pci_dev *pdev,  	return 0;  } +static struct lock_class_key drm_client_lock_class_key; +  static int  nouveau_drm_load(struct drm_device *dev, unsigned long flags)  { @@ -256,6 +258,7 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags)  	ret = nouveau_cli_create(pdev, "DRM", sizeof(*drm), (void**)&drm);  	if (ret)  		return ret; +	lockdep_set_class(&drm->client.mutex, &drm_client_lock_class_key);  	dev->dev_private = drm;  	drm->dev = dev; diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index 061fa0a2890..a2d478e8692 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c @@ -1313,14 +1313,18 @@ void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *sav  				if (!(tmp & EVERGREEN_CRTC_BLANK_DATA_EN)) {  					radeon_wait_for_vblank(rdev, i);  					tmp |= EVERGREEN_CRTC_BLANK_DATA_EN; +					WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);  					WREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i], tmp); +					WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);  				}  			} else {  				tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);  				if (!(tmp & EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE)) {  					radeon_wait_for_vblank(rdev, i);  					tmp |= EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE; +					WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);  					WREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i], tmp); +					WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);  				}  			}  			/* wait for the next frame */ @@ -1345,6 +1349,8 @@ void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *sav  		blackout &= ~BLACKOUT_MODE_MASK;  		WREG32(MC_SHARED_BLACKOUT_CNTL, blackout | 1);  	} +	/* wait for the MC to settle */ +	udelay(100);  }  void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save) @@ -1378,11 +1384,15 @@ void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *s  			if (ASIC_IS_DCE6(rdev)) {  				tmp = RREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i]);  				tmp |= EVERGREEN_CRTC_BLANK_DATA_EN; +				WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);  				WREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i], tmp); +				WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);  			} else {  				tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);  				tmp &= ~EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE; +				WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);  				WREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i], tmp); +				WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);  			}  			/* wait for the next frame */  			frame_count = radeon_get_vblank_counter(rdev, i); @@ -2036,9 +2046,20 @@ static void evergreen_gpu_init(struct radeon_device *rdev)  	WREG32(HDP_ADDR_CONFIG, gb_addr_config);  	WREG32(DMA_TILING_CONFIG, gb_addr_config); -	tmp = gb_addr_config & NUM_PIPES_MASK; -	tmp = r6xx_remap_render_backend(rdev, tmp, rdev->config.evergreen.max_backends, -					EVERGREEN_MAX_BACKENDS, disabled_rb_mask); +	if ((rdev->config.evergreen.max_backends == 1) && +	    (rdev->flags & RADEON_IS_IGP)) { +		if ((disabled_rb_mask & 3) == 1) { +			/* RB0 disabled, RB1 enabled */ +			tmp = 0x11111111; +		} else { +			/* RB1 disabled, RB0 enabled */ +			tmp = 0x00000000; +		} +	} else { +		tmp = gb_addr_config & NUM_PIPES_MASK; +		tmp = r6xx_remap_render_backend(rdev, tmp, rdev->config.evergreen.max_backends, +						EVERGREEN_MAX_BACKENDS, disabled_rb_mask); +	}  	WREG32(GB_BACKEND_MAP, tmp);  	WREG32(CGTS_SYS_TCC_DISABLE, 0); @@ -2401,6 +2422,12 @@ static int evergreen_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)  {  	struct evergreen_mc_save save; +	if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE)) +		reset_mask &= ~(RADEON_RESET_GFX | RADEON_RESET_COMPUTE); + +	if (RREG32(DMA_STATUS_REG) & DMA_IDLE) +		reset_mask &= ~RADEON_RESET_DMA; +  	if (reset_mask == 0)  		return 0; diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c index 7a445666e71..ee4cff534f1 100644 --- a/drivers/gpu/drm/radeon/evergreen_cs.c +++ b/drivers/gpu/drm/radeon/evergreen_cs.c @@ -2909,14 +2909,14 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)  				return -EINVAL;  			}  			if (tiled) { -				dst_offset = ib[idx+1]; +				dst_offset = radeon_get_ib_value(p, idx+1);  				dst_offset <<= 8;  				ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);  				p->idx += count + 7;  			} else { -				dst_offset = ib[idx+1]; -				dst_offset |= ((u64)(ib[idx+2] & 0xff)) << 32; +				dst_offset = radeon_get_ib_value(p, idx+1); +				dst_offset |= ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32;  				ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);  				ib[idx+2] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff; @@ -2954,12 +2954,12 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)  							DRM_ERROR("bad L2T, frame to fields DMA_PACKET_COPY\n");  							return -EINVAL;  						} -						dst_offset = ib[idx+1]; +						dst_offset = radeon_get_ib_value(p, idx+1);  						dst_offset <<= 8; -						dst2_offset = ib[idx+2]; +						dst2_offset = radeon_get_ib_value(p, idx+2);  						dst2_offset <<= 8; -						src_offset = ib[idx+8]; -						src_offset |= ((u64)(ib[idx+9] & 0xff)) << 32; +						src_offset = radeon_get_ib_value(p, idx+8); +						src_offset |= ((u64)(radeon_get_ib_value(p, idx+9) & 0xff)) << 32;  						if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {  							dev_warn(p->dev, "DMA L2T, frame to fields src buffer too small (%llu %lu)\n",  								 src_offset + (count * 4), radeon_bo_size(src_reloc->robj)); @@ -3014,12 +3014,12 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)  							DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n");  							return -EINVAL;  						} -						dst_offset = ib[idx+1]; +						dst_offset = radeon_get_ib_value(p, idx+1);  						dst_offset <<= 8; -						dst2_offset = ib[idx+2]; +						dst2_offset = radeon_get_ib_value(p, idx+2);  						dst2_offset <<= 8; -						src_offset = ib[idx+8]; -						src_offset |= ((u64)(ib[idx+9] & 0xff)) << 32; +						src_offset = radeon_get_ib_value(p, idx+8); +						src_offset |= ((u64)(radeon_get_ib_value(p, idx+9) & 0xff)) << 32;  						if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {  							dev_warn(p->dev, "DMA L2T, broadcast src buffer too small (%llu %lu)\n",  								 src_offset + (count * 4), radeon_bo_size(src_reloc->robj)); @@ -3046,22 +3046,22 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)  						/* detile bit */  						if (idx_value & (1 << 31)) {  							/* tiled src, linear dst */ -							src_offset = ib[idx+1]; +							src_offset = radeon_get_ib_value(p, idx+1);  							src_offset <<= 8;  							ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8); -							dst_offset = ib[idx+7]; -							dst_offset |= ((u64)(ib[idx+8] & 0xff)) << 32; +							dst_offset = radeon_get_ib_value(p, idx+7); +							dst_offset |= ((u64)(radeon_get_ib_value(p, idx+8) & 0xff)) << 32;  							ib[idx+7] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);  							ib[idx+8] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;  						} else {  							/* linear src, tiled dst */ -							src_offset = ib[idx+7]; -							src_offset |= ((u64)(ib[idx+8] & 0xff)) << 32; +							src_offset = radeon_get_ib_value(p, idx+7); +							src_offset |= ((u64)(radeon_get_ib_value(p, idx+8) & 0xff)) << 32;  							ib[idx+7] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);  							ib[idx+8] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff; -							dst_offset = ib[idx+1]; +							dst_offset = radeon_get_ib_value(p, idx+1);  							dst_offset <<= 8;  							ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);  						} @@ -3098,12 +3098,12 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)  							DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n");  							return -EINVAL;  						} -						dst_offset = ib[idx+1]; +						dst_offset = radeon_get_ib_value(p, idx+1);  						dst_offset <<= 8; -						dst2_offset = ib[idx+2]; +						dst2_offset = radeon_get_ib_value(p, idx+2);  						dst2_offset <<= 8; -						src_offset = ib[idx+8]; -						src_offset |= ((u64)(ib[idx+9] & 0xff)) << 32; +						src_offset = radeon_get_ib_value(p, idx+8); +						src_offset |= ((u64)(radeon_get_ib_value(p, idx+9) & 0xff)) << 32;  						if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {  							dev_warn(p->dev, "DMA L2T, broadcast src buffer too small (%llu %lu)\n",  								 src_offset + (count * 4), radeon_bo_size(src_reloc->robj)); @@ -3135,22 +3135,22 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)  						/* detile bit */  						if (idx_value & (1 << 31)) {  							/* tiled src, linear dst */ -							src_offset = ib[idx+1]; +							src_offset = radeon_get_ib_value(p, idx+1);  							src_offset <<= 8;  							ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8); -							dst_offset = ib[idx+7]; -							dst_offset |= ((u64)(ib[idx+8] & 0xff)) << 32; +							dst_offset = radeon_get_ib_value(p, idx+7); +							dst_offset |= ((u64)(radeon_get_ib_value(p, idx+8) & 0xff)) << 32;  							ib[idx+7] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);  							ib[idx+8] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;  						} else {  							/* linear src, tiled dst */ -							src_offset = ib[idx+7]; -							src_offset |= ((u64)(ib[idx+8] & 0xff)) << 32; +							src_offset = radeon_get_ib_value(p, idx+7); +							src_offset |= ((u64)(radeon_get_ib_value(p, idx+8) & 0xff)) << 32;  							ib[idx+7] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);  							ib[idx+8] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff; -							dst_offset = ib[idx+1]; +							dst_offset = radeon_get_ib_value(p, idx+1);  							dst_offset <<= 8;  							ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);  						} @@ -3176,10 +3176,10 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)  					switch (misc) {  					case 0:  						/* L2L, byte */ -						src_offset = ib[idx+2]; -						src_offset |= ((u64)(ib[idx+4] & 0xff)) << 32; -						dst_offset = ib[idx+1]; -						dst_offset |= ((u64)(ib[idx+3] & 0xff)) << 32; +						src_offset = radeon_get_ib_value(p, idx+2); +						src_offset |= ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32; +						dst_offset = radeon_get_ib_value(p, idx+1); +						dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff)) << 32;  						if ((src_offset + count) > radeon_bo_size(src_reloc->robj)) {  							dev_warn(p->dev, "DMA L2L, byte src buffer too small (%llu %lu)\n",  								 src_offset + count, radeon_bo_size(src_reloc->robj)); @@ -3216,12 +3216,12 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)  							DRM_ERROR("bad L2L, dw, broadcast DMA_PACKET_COPY\n");  							return -EINVAL;  						} -						dst_offset = ib[idx+1]; -						dst_offset |= ((u64)(ib[idx+4] & 0xff)) << 32; -						dst2_offset = ib[idx+2]; -						dst2_offset |= ((u64)(ib[idx+5] & 0xff)) << 32; -						src_offset = ib[idx+3]; -						src_offset |= ((u64)(ib[idx+6] & 0xff)) << 32; +						dst_offset = radeon_get_ib_value(p, idx+1); +						dst_offset |= ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32; +						dst2_offset = radeon_get_ib_value(p, idx+2); +						dst2_offset |= ((u64)(radeon_get_ib_value(p, idx+5) & 0xff)) << 32; +						src_offset = radeon_get_ib_value(p, idx+3); +						src_offset |= ((u64)(radeon_get_ib_value(p, idx+6) & 0xff)) << 32;  						if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {  							dev_warn(p->dev, "DMA L2L, dw, broadcast src buffer too small (%llu %lu)\n",  								 src_offset + (count * 4), radeon_bo_size(src_reloc->robj)); @@ -3251,10 +3251,10 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)  					}  				} else {  					/* L2L, dw */ -					src_offset = ib[idx+2]; -					src_offset |= ((u64)(ib[idx+4] & 0xff)) << 32; -					dst_offset = ib[idx+1]; -					dst_offset |= ((u64)(ib[idx+3] & 0xff)) << 32; +					src_offset = radeon_get_ib_value(p, idx+2); +					src_offset |= ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32; +					dst_offset = radeon_get_ib_value(p, idx+1); +					dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff)) << 32;  					if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {  						dev_warn(p->dev, "DMA L2L, dw src buffer too small (%llu %lu)\n",  							 src_offset + (count * 4), radeon_bo_size(src_reloc->robj)); @@ -3279,8 +3279,8 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)  				DRM_ERROR("bad DMA_PACKET_CONSTANT_FILL\n");  				return -EINVAL;  			} -			dst_offset = ib[idx+1]; -			dst_offset |= ((u64)(ib[idx+3] & 0x00ff0000)) << 16; +			dst_offset = radeon_get_ib_value(p, idx+1); +			dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0x00ff0000)) << 16;  			if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {  				dev_warn(p->dev, "DMA constant fill buffer too small (%llu %lu)\n",  					 dst_offset, radeon_bo_size(dst_reloc->robj)); diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c index 896f1cbc58a..835992d8d06 100644 --- a/drivers/gpu/drm/radeon/ni.c +++ b/drivers/gpu/drm/radeon/ni.c @@ -1216,7 +1216,7 @@ void cayman_dma_stop(struct radeon_device *rdev)  int cayman_dma_resume(struct radeon_device *rdev)  {  	struct radeon_ring *ring; -	u32 rb_cntl, dma_cntl; +	u32 rb_cntl, dma_cntl, ib_cntl;  	u32 rb_bufsz;  	u32 reg_offset, wb_offset;  	int i, r; @@ -1265,7 +1265,11 @@ int cayman_dma_resume(struct radeon_device *rdev)  		WREG32(DMA_RB_BASE + reg_offset, ring->gpu_addr >> 8);  		/* enable DMA IBs */ -		WREG32(DMA_IB_CNTL + reg_offset, DMA_IB_ENABLE | CMD_VMID_FORCE); +		ib_cntl = DMA_IB_ENABLE | CMD_VMID_FORCE; +#ifdef __BIG_ENDIAN +		ib_cntl |= DMA_IB_SWAP_ENABLE; +#endif +		WREG32(DMA_IB_CNTL + reg_offset, ib_cntl);  		dma_cntl = RREG32(DMA_CNTL + reg_offset);  		dma_cntl &= ~CTXEMPTY_INT_ENABLE; @@ -1409,6 +1413,12 @@ static int cayman_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)  {  	struct evergreen_mc_save save; +	if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE)) +		reset_mask &= ~(RADEON_RESET_GFX | RADEON_RESET_COMPUTE); + +	if (RREG32(DMA_STATUS_REG) & DMA_IDLE) +		reset_mask &= ~RADEON_RESET_DMA; +  	if (reset_mask == 0)  		return 0; diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index 537e259b383..becb03e8b32 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c @@ -1378,6 +1378,12 @@ static int r600_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)  {  	struct rv515_mc_save save; +	if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE)) +		reset_mask &= ~(RADEON_RESET_GFX | RADEON_RESET_COMPUTE); + +	if (RREG32(DMA_STATUS_REG) & DMA_IDLE) +		reset_mask &= ~RADEON_RESET_DMA; +  	if (reset_mask == 0)  		return 0; @@ -1456,12 +1462,15 @@ u32 r6xx_remap_render_backend(struct radeon_device *rdev,  			      u32 disabled_rb_mask)  {  	u32 rendering_pipe_num, rb_num_width, req_rb_num; -	u32 pipe_rb_ratio, pipe_rb_remain; +	u32 pipe_rb_ratio, pipe_rb_remain, tmp;  	u32 data = 0, mask = 1 << (max_rb_num - 1);  	unsigned i, j;  	/* mask out the RBs that don't exist on that asic */ -	disabled_rb_mask |= (0xff << max_rb_num) & 0xff; +	tmp = disabled_rb_mask | ((0xff << max_rb_num) & 0xff); +	/* make sure at least one RB is available */ +	if ((tmp & 0xff) != 0xff) +		disabled_rb_mask = tmp;  	rendering_pipe_num = 1 << tiling_pipe_num;  	req_rb_num = total_max_rb_num - r600_count_pipe_bits(disabled_rb_mask); @@ -2307,7 +2316,7 @@ void r600_dma_stop(struct radeon_device *rdev)  int r600_dma_resume(struct radeon_device *rdev)  {  	struct radeon_ring *ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX]; -	u32 rb_cntl, dma_cntl; +	u32 rb_cntl, dma_cntl, ib_cntl;  	u32 rb_bufsz;  	int r; @@ -2347,7 +2356,11 @@ int r600_dma_resume(struct radeon_device *rdev)  	WREG32(DMA_RB_BASE, ring->gpu_addr >> 8);  	/* enable DMA IBs */ -	WREG32(DMA_IB_CNTL, DMA_IB_ENABLE); +	ib_cntl = DMA_IB_ENABLE; +#ifdef __BIG_ENDIAN +	ib_cntl |= DMA_IB_SWAP_ENABLE; +#endif +	WREG32(DMA_IB_CNTL, ib_cntl);  	dma_cntl = RREG32(DMA_CNTL);  	dma_cntl &= ~CTXEMPTY_INT_ENABLE; diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c index 69ec24ab8d6..9b2512bf1a4 100644 --- a/drivers/gpu/drm/radeon/r600_cs.c +++ b/drivers/gpu/drm/radeon/r600_cs.c @@ -2623,14 +2623,14 @@ int r600_dma_cs_parse(struct radeon_cs_parser *p)  				return -EINVAL;  			}  			if (tiled) { -				dst_offset = ib[idx+1]; +				dst_offset = radeon_get_ib_value(p, idx+1);  				dst_offset <<= 8;  				ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);  				p->idx += count + 5;  			} else { -				dst_offset = ib[idx+1]; -				dst_offset |= ((u64)(ib[idx+2] & 0xff)) << 32; +				dst_offset = radeon_get_ib_value(p, idx+1); +				dst_offset |= ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32;  				ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);  				ib[idx+2] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff; @@ -2658,32 +2658,32 @@ int r600_dma_cs_parse(struct radeon_cs_parser *p)  				/* detile bit */  				if (idx_value & (1 << 31)) {  					/* tiled src, linear dst */ -					src_offset = ib[idx+1]; +					src_offset = radeon_get_ib_value(p, idx+1);  					src_offset <<= 8;  					ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8); -					dst_offset = ib[idx+5]; -					dst_offset |= ((u64)(ib[idx+6] & 0xff)) << 32; +					dst_offset = radeon_get_ib_value(p, idx+5); +					dst_offset |= ((u64)(radeon_get_ib_value(p, idx+6) & 0xff)) << 32;  					ib[idx+5] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);  					ib[idx+6] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;  				} else {  					/* linear src, tiled dst */ -					src_offset = ib[idx+5]; -					src_offset |= ((u64)(ib[idx+6] & 0xff)) << 32; +					src_offset = radeon_get_ib_value(p, idx+5); +					src_offset |= ((u64)(radeon_get_ib_value(p, idx+6) & 0xff)) << 32;  					ib[idx+5] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);  					ib[idx+6] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff; -					dst_offset = ib[idx+1]; +					dst_offset = radeon_get_ib_value(p, idx+1);  					dst_offset <<= 8;  					ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);  				}  				p->idx += 7;  			} else {  				if (p->family >= CHIP_RV770) { -					src_offset = ib[idx+2]; -					src_offset |= ((u64)(ib[idx+4] & 0xff)) << 32; -					dst_offset = ib[idx+1]; -					dst_offset |= ((u64)(ib[idx+3] & 0xff)) << 32; +					src_offset = radeon_get_ib_value(p, idx+2); +					src_offset |= ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32; +					dst_offset = radeon_get_ib_value(p, idx+1); +					dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff)) << 32;  					ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);  					ib[idx+2] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc); @@ -2691,10 +2691,10 @@ int r600_dma_cs_parse(struct radeon_cs_parser *p)  					ib[idx+4] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;  					p->idx += 5;  				} else { -					src_offset = ib[idx+2]; -					src_offset |= ((u64)(ib[idx+3] & 0xff)) << 32; -					dst_offset = ib[idx+1]; -					dst_offset |= ((u64)(ib[idx+3] & 0xff0000)) << 16; +					src_offset = radeon_get_ib_value(p, idx+2); +					src_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff)) << 32; +					dst_offset = radeon_get_ib_value(p, idx+1); +					dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff0000)) << 16;  					ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);  					ib[idx+2] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc); @@ -2724,8 +2724,8 @@ int r600_dma_cs_parse(struct radeon_cs_parser *p)  				DRM_ERROR("bad DMA_PACKET_WRITE\n");  				return -EINVAL;  			} -			dst_offset = ib[idx+1]; -			dst_offset |= ((u64)(ib[idx+3] & 0x00ff0000)) << 16; +			dst_offset = radeon_get_ib_value(p, idx+1); +			dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0x00ff0000)) << 16;  			if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {  				dev_warn(p->dev, "DMA constant fill buffer too small (%llu %lu)\n",  					 dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj)); diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 34e52304a52..a08f657329a 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -324,7 +324,6 @@ struct radeon_bo {  	struct list_head		list;  	/* Protected by tbo.reserved */  	u32				placements[3]; -	u32				busy_placements[3];  	struct ttm_placement		placement;  	struct ttm_buffer_object	tbo;  	struct ttm_bo_kmap_obj		kmap; @@ -654,6 +653,8 @@ struct radeon_ring {  	u32			ptr_reg_mask;  	u32			nop;  	u32			idx; +	u64			last_semaphore_signal_addr; +	u64			last_semaphore_wait_addr;  };  /* diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c index 9056fafb00e..0b202c07fe5 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.c +++ b/drivers/gpu/drm/radeon/radeon_asic.c @@ -1445,7 +1445,7 @@ static struct radeon_asic cayman_asic = {  	.vm = {  		.init = &cayman_vm_init,  		.fini = &cayman_vm_fini, -		.pt_ring_index = R600_RING_TYPE_DMA_INDEX, +		.pt_ring_index = RADEON_RING_TYPE_GFX_INDEX,  		.set_page = &cayman_vm_set_page,  	},  	.ring = { @@ -1572,7 +1572,7 @@ static struct radeon_asic trinity_asic = {  	.vm = {  		.init = &cayman_vm_init,  		.fini = &cayman_vm_fini, -		.pt_ring_index = R600_RING_TYPE_DMA_INDEX, +		.pt_ring_index = RADEON_RING_TYPE_GFX_INDEX,  		.set_page = &cayman_vm_set_page,  	},  	.ring = { @@ -1699,7 +1699,7 @@ static struct radeon_asic si_asic = {  	.vm = {  		.init = &si_vm_init,  		.fini = &si_vm_fini, -		.pt_ring_index = R600_RING_TYPE_DMA_INDEX, +		.pt_ring_index = RADEON_RING_TYPE_GFX_INDEX,  		.set_page = &si_vm_set_page,  	},  	.ring = { diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c index 33a56a09ff1..3e403bdda58 100644 --- a/drivers/gpu/drm/radeon/radeon_combios.c +++ b/drivers/gpu/drm/radeon/radeon_combios.c @@ -2470,6 +2470,14 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)  								   1),  								  ATOM_DEVICE_CRT1_SUPPORT);  				} +				/* RV100 board with external TDMS bit mis-set. +				 * Actually uses internal TMDS, clear the bit. +				 */ +				if (dev->pdev->device == 0x5159 && +				    dev->pdev->subsystem_vendor == 0x1014 && +				    dev->pdev->subsystem_device == 0x029A) { +					tmp &= ~(1 << 4); +				}  				if ((tmp >> 4) & 0x1) {  					devices |= ATOM_DEVICE_DFP2_SUPPORT;  					radeon_add_legacy_encoder(dev, diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c index 469661fd190..5407459e56d 100644 --- a/drivers/gpu/drm/radeon/radeon_cs.c +++ b/drivers/gpu/drm/radeon/radeon_cs.c @@ -286,6 +286,8 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)  			    p->chunks[p->chunk_ib_idx].kpage[1] == NULL) {  				kfree(p->chunks[p->chunk_ib_idx].kpage[0]);  				kfree(p->chunks[p->chunk_ib_idx].kpage[1]); +				p->chunks[p->chunk_ib_idx].kpage[0] = NULL; +				p->chunks[p->chunk_ib_idx].kpage[1] = NULL;  				return -ENOMEM;  			}  		} diff --git a/drivers/gpu/drm/radeon/radeon_cursor.c b/drivers/gpu/drm/radeon/radeon_cursor.c index ad6df625e8b..0d67674b64b 100644 --- a/drivers/gpu/drm/radeon/radeon_cursor.c +++ b/drivers/gpu/drm/radeon/radeon_cursor.c @@ -241,7 +241,8 @@ int radeon_crtc_cursor_move(struct drm_crtc *crtc,  		y = 0;  	} -	if (ASIC_IS_AVIVO(rdev)) { +	/* fixed on DCE6 and newer */ +	if (ASIC_IS_AVIVO(rdev) && !ASIC_IS_DCE6(rdev)) {  		int i = 0;  		struct drm_crtc *crtc_p; diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index edfc54e4184..0d6562bb0c9 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c @@ -429,7 +429,8 @@ bool radeon_card_posted(struct radeon_device *rdev)  {  	uint32_t reg; -	if (efi_enabled && rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE) +	if (efi_enabled(EFI_BOOT) && +	    rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE)  		return false;  	/* first check CRTCs */ diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index 1da2386d7cf..05c96fa0b05 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c @@ -1115,14 +1115,16 @@ radeon_user_framebuffer_create(struct drm_device *dev,  	}  	radeon_fb = kzalloc(sizeof(*radeon_fb), GFP_KERNEL); -	if (radeon_fb == NULL) +	if (radeon_fb == NULL) { +		drm_gem_object_unreference_unlocked(obj);  		return ERR_PTR(-ENOMEM); +	}  	ret = radeon_framebuffer_init(dev, radeon_fb, mode_cmd, obj);  	if (ret) {  		kfree(radeon_fb);  		drm_gem_object_unreference_unlocked(obj); -		return NULL; +		return ERR_PTR(ret);  	}  	return &radeon_fb->base; diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c index dff6cf77f95..d9bf96ee299 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.c +++ b/drivers/gpu/drm/radeon/radeon_drv.c @@ -69,9 +69,10 @@   *   2.26.0 - r600-eg: fix htile size computation   *   2.27.0 - r600-SI: Add CS ioctl support for async DMA   *   2.28.0 - r600-eg: Add MEM_WRITE packet support + *   2.29.0 - R500 FP16 color clear registers   */  #define KMS_DRIVER_MAJOR	2 -#define KMS_DRIVER_MINOR	28 +#define KMS_DRIVER_MINOR	29  #define KMS_DRIVER_PATCHLEVEL	0  int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);  int radeon_driver_unload_kms(struct drm_device *dev); diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c index 883c95d8d90..d3aface2d12 100644 --- a/drivers/gpu/drm/radeon/radeon_object.c +++ b/drivers/gpu/drm/radeon/radeon_object.c @@ -84,6 +84,7 @@ void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain)  	rbo->placement.fpfn = 0;  	rbo->placement.lpfn = 0;  	rbo->placement.placement = rbo->placements; +	rbo->placement.busy_placement = rbo->placements;  	if (domain & RADEON_GEM_DOMAIN_VRAM)  		rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |  					TTM_PL_FLAG_VRAM; @@ -104,14 +105,6 @@ void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain)  	if (!c)  		rbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;  	rbo->placement.num_placement = c; - -	c = 0; -	rbo->placement.busy_placement = rbo->busy_placements; -	if (rbo->rdev->flags & RADEON_IS_AGP) { -		rbo->busy_placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_TT; -	} else { -		rbo->busy_placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_TT; -	}  	rbo->placement.num_busy_placement = c;  } @@ -357,6 +350,7 @@ int radeon_bo_list_validate(struct list_head *head)  {  	struct radeon_bo_list *lobj;  	struct radeon_bo *bo; +	u32 domain;  	int r;  	r = ttm_eu_reserve_buffers(head); @@ -366,9 +360,17 @@ int radeon_bo_list_validate(struct list_head *head)  	list_for_each_entry(lobj, head, tv.head) {  		bo = lobj->bo;  		if (!bo->pin_count) { +			domain = lobj->wdomain ? lobj->wdomain : lobj->rdomain; +			 +		retry: +			radeon_ttm_placement_from_domain(bo, domain);  			r = ttm_bo_validate(&bo->tbo, &bo->placement,  						true, false);  			if (unlikely(r)) { +				if (r != -ERESTARTSYS && domain == RADEON_GEM_DOMAIN_VRAM) { +					domain |= RADEON_GEM_DOMAIN_GTT; +					goto retry; +				}  				return r;  			}  		} diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c index 141f2b6a9cf..cd72062d5a9 100644 --- a/drivers/gpu/drm/radeon/radeon_ring.c +++ b/drivers/gpu/drm/radeon/radeon_ring.c @@ -377,6 +377,9 @@ int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_ring *ring, unsi  {  	int r; +	/* make sure we aren't trying to allocate more space than there is on the ring */ +	if (ndw > (ring->ring_size / 4)) +		return -ENOMEM;  	/* Align requested size with padding so unlock_commit can  	 * pad safely */  	ndw = (ndw + ring->align_mask) & ~ring->align_mask; @@ -784,6 +787,8 @@ static int radeon_debugfs_ring_info(struct seq_file *m, void *data)  	}  	seq_printf(m, "driver's copy of the wptr: 0x%08x [%5d]\n", ring->wptr, ring->wptr);  	seq_printf(m, "driver's copy of the rptr: 0x%08x [%5d]\n", ring->rptr, ring->rptr); +	seq_printf(m, "last semaphore signal addr : 0x%016llx\n", ring->last_semaphore_signal_addr); +	seq_printf(m, "last semaphore wait addr   : 0x%016llx\n", ring->last_semaphore_wait_addr);  	seq_printf(m, "%u free dwords in ring\n", ring->ring_free_dw);  	seq_printf(m, "%u dwords in ring\n", count);  	/* print 8 dw before current rptr as often it's the last executed diff --git a/drivers/gpu/drm/radeon/radeon_semaphore.c b/drivers/gpu/drm/radeon/radeon_semaphore.c index 97f3ece81cd..8dcc20f53d7 100644 --- a/drivers/gpu/drm/radeon/radeon_semaphore.c +++ b/drivers/gpu/drm/radeon/radeon_semaphore.c @@ -95,6 +95,10 @@ int radeon_semaphore_sync_rings(struct radeon_device *rdev,  	/* we assume caller has already allocated space on waiters ring */  	radeon_semaphore_emit_wait(rdev, waiter, semaphore); +	/* for debugging lockup only, used by sysfs debug files */ +	rdev->ring[signaler].last_semaphore_signal_addr = semaphore->gpu_addr; +	rdev->ring[waiter].last_semaphore_wait_addr = semaphore->gpu_addr; +  	return 0;  } diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index 1d8ff2f850b..93f760e27a9 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c @@ -38,6 +38,7 @@  #include <drm/radeon_drm.h>  #include <linux/seq_file.h>  #include <linux/slab.h> +#include <linux/swiotlb.h>  #include "radeon_reg.h"  #include "radeon.h" diff --git a/drivers/gpu/drm/radeon/reg_srcs/cayman b/drivers/gpu/drm/radeon/reg_srcs/cayman index 0f656b111c1..a072fa8c46b 100644 --- a/drivers/gpu/drm/radeon/reg_srcs/cayman +++ b/drivers/gpu/drm/radeon/reg_srcs/cayman @@ -1,5 +1,6 @@  cayman 0x9400  0x0000802C GRBM_GFX_INDEX +0x00008040 WAIT_UNTIL  0x000084FC CP_STRMOUT_CNTL  0x000085F0 CP_COHER_CNTL  0x000085F4 CP_COHER_SIZE diff --git a/drivers/gpu/drm/radeon/reg_srcs/rv515 b/drivers/gpu/drm/radeon/reg_srcs/rv515 index 911a8fbd32b..78d5e99d759 100644 --- a/drivers/gpu/drm/radeon/reg_srcs/rv515 +++ b/drivers/gpu/drm/radeon/reg_srcs/rv515 @@ -324,6 +324,8 @@ rv515 0x6d40  0x46AC US_OUT_FMT_2  0x46B0 US_OUT_FMT_3  0x46B4 US_W_FMT +0x46C0 RB3D_COLOR_CLEAR_VALUE_AR +0x46C4 RB3D_COLOR_CLEAR_VALUE_GB  0x4BC0 FG_FOG_BLEND  0x4BC4 FG_FOG_FACTOR  0x4BC8 FG_FOG_COLOR_R diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c index 2bb6d0e84b3..435ed355136 100644 --- a/drivers/gpu/drm/radeon/rv515.c +++ b/drivers/gpu/drm/radeon/rv515.c @@ -336,6 +336,8 @@ void rv515_mc_stop(struct radeon_device *rdev, struct rv515_mc_save *save)  				WREG32(R600_CITF_CNTL, blackout);  		}  	} +	/* wait for the MC to settle */ +	udelay(100);  }  void rv515_mc_resume(struct radeon_device *rdev, struct rv515_mc_save *save) diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c index 3240a3d64f3..ae8b48205a6 100644 --- a/drivers/gpu/drm/radeon/si.c +++ b/drivers/gpu/drm/radeon/si.c @@ -2215,6 +2215,12 @@ static int si_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)  {  	struct evergreen_mc_save save; +	if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE)) +		reset_mask &= ~(RADEON_RESET_GFX | RADEON_RESET_COMPUTE); + +	if (RREG32(DMA_STATUS_REG) & DMA_IDLE) +		reset_mask &= ~RADEON_RESET_DMA; +  	if (reset_mask == 0)  		return 0; diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 33d20be87db..52b20b12c83 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -434,6 +434,7 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,  			bo->mem = tmp_mem;  			bdev->driver->move_notify(bo, mem);  			bo->mem = *mem; +			*mem = tmp_mem;  		}  		goto out_err; diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index d73d6e3e17b..8be35c809c7 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c @@ -344,8 +344,12 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,  	if (ttm->state == tt_unpopulated) {  		ret = ttm->bdev->driver->ttm_tt_populate(ttm); -		if (ret) +		if (ret) { +			/* if we fail here don't nuke the mm node +			 * as the bo still owns it */ +			old_copy.mm_node = NULL;  			goto out1; +		}  	}  	add = 0; @@ -371,8 +375,11 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,  						   prot);  		} else  			ret = ttm_copy_io_page(new_iomap, old_iomap, page); -		if (ret) +		if (ret) { +			/* failing here, means keep old copy as-is */ +			old_copy.mm_node = NULL;  			goto out1; +		}  	}  	mb();  out2: @@ -422,7 +429,7 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,  	struct ttm_bo_device *bdev = bo->bdev;  	struct ttm_bo_driver *driver = bdev->driver; -	fbo = kzalloc(sizeof(*fbo), GFP_KERNEL); +	fbo = kmalloc(sizeof(*fbo), GFP_KERNEL);  	if (!fbo)  		return -ENOMEM; @@ -441,7 +448,12 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,  	fbo->vm_node = NULL;  	atomic_set(&fbo->cpu_writers, 0); -	fbo->sync_obj = driver->sync_obj_ref(bo->sync_obj); +	spin_lock(&bdev->fence_lock); +	if (bo->sync_obj) +		fbo->sync_obj = driver->sync_obj_ref(bo->sync_obj); +	else +		fbo->sync_obj = NULL; +	spin_unlock(&bdev->fence_lock);  	kref_init(&fbo->list_kref);  	kref_init(&fbo->kref);  	fbo->destroy = &ttm_transfered_destroy; @@ -654,13 +666,11 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,  		 */  		set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags); - -		/* ttm_buffer_object_transfer accesses bo->sync_obj */ -		ret = ttm_buffer_object_transfer(bo, &ghost_obj);  		spin_unlock(&bdev->fence_lock);  		if (tmp_obj)  			driver->sync_obj_unref(&tmp_obj); +		ret = ttm_buffer_object_transfer(bo, &ghost_obj);  		if (ret)  			return ret; diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index 4dfa605e2d1..34e25471aea 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h @@ -306,6 +306,9 @@  #define USB_VENDOR_ID_EZKEY		0x0518  #define USB_DEVICE_ID_BTC_8193		0x0002 +#define USB_VENDOR_ID_FORMOSA          0x147a +#define USB_DEVICE_ID_FORMOSA_IR_RECEIVER      0xe03e +  #define USB_VENDOR_ID_FREESCALE		0x15A2  #define USB_DEVICE_ID_FREESCALE_MX28	0x004F diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c index 12e4fdc810b..e766b5614ef 100644 --- a/drivers/hid/i2c-hid/i2c-hid.c +++ b/drivers/hid/i2c-hid/i2c-hid.c @@ -540,13 +540,24 @@ static int i2c_hid_output_raw_report(struct hid_device *hid, __u8 *buf,  {  	struct i2c_client *client = hid->driver_data;  	int report_id = buf[0]; +	int ret;  	if (report_type == HID_INPUT_REPORT)  		return -EINVAL; -	return i2c_hid_set_report(client, +	if (report_id) { +		buf++; +		count--; +	} + +	ret = i2c_hid_set_report(client,  				report_type == HID_FEATURE_REPORT ? 0x03 : 0x02,  				report_id, buf, count); + +	if (report_id && ret >= 0) +		ret++; /* add report_id to the number of transfered bytes */ + +	return ret;  }  static int i2c_hid_parse(struct hid_device *hid) diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c index ac9e3522825..e0e6abf1cd3 100644 --- a/drivers/hid/usbhid/hid-quirks.c +++ b/drivers/hid/usbhid/hid-quirks.c @@ -70,6 +70,7 @@ static const struct hid_blacklist {  	{ USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_AXIS_295, HID_QUIRK_NOGET },  	{ USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC, HID_QUIRK_NOGET },  	{ USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_TS2700, HID_QUIRK_NOGET }, +	{ USB_VENDOR_ID_FORMOSA, USB_DEVICE_ID_FORMOSA_IR_RECEIVER, HID_QUIRK_NO_INIT_REPORTS },  	{ USB_VENDOR_ID_FREESCALE, USB_DEVICE_ID_FREESCALE_MX28, HID_QUIRK_NOGET },  	{ USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS, HID_QUIRK_NOGET },  	{ USB_VENDOR_ID_NOVATEK, USB_DEVICE_ID_NOVATEK_MOUSE, HID_QUIRK_NO_INIT_REPORTS }, diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c index f6c0011a033..dd289fd179c 100644 --- a/drivers/hv/hv_balloon.c +++ b/drivers/hv/hv_balloon.c @@ -403,7 +403,7 @@ struct dm_info_header {   */  struct dm_info_msg { -	struct dm_info_header header; +	struct dm_header hdr;  	__u32 reserved;  	__u32 info_size;  	__u8  info[]; @@ -503,13 +503,17 @@ static void hot_add_req(struct hv_dynmem_device *dm, struct dm_hot_add *msg)  static void process_info(struct hv_dynmem_device *dm, struct dm_info_msg *msg)  { -	switch (msg->header.type) { +	struct dm_info_header *info_hdr; + +	info_hdr = (struct dm_info_header *)msg->info; + +	switch (info_hdr->type) {  	case INFO_TYPE_MAX_PAGE_CNT:  		pr_info("Received INFO_TYPE_MAX_PAGE_CNT\n"); -		pr_info("Data Size is %d\n", msg->header.data_size); +		pr_info("Data Size is %d\n", info_hdr->data_size);  		break;  	default: -		pr_info("Received Unknown type: %d\n", msg->header.type); +		pr_info("Received Unknown type: %d\n", info_hdr->type);  	}  } @@ -879,7 +883,7 @@ static int balloon_probe(struct hv_device *dev,  			balloon_onchannelcallback, dev);  	if (ret) -		return ret; +		goto probe_error0;  	dm_device.dev = dev;  	dm_device.state = DM_INITIALIZING; @@ -891,7 +895,7 @@ static int balloon_probe(struct hv_device *dev,  		 kthread_run(dm_thread_func, &dm_device, "hv_balloon");  	if (IS_ERR(dm_device.thread)) {  		ret = PTR_ERR(dm_device.thread); -		goto probe_error0; +		goto probe_error1;  	}  	hv_set_drvdata(dev, &dm_device); @@ -914,12 +918,12 @@ static int balloon_probe(struct hv_device *dev,  				VM_PKT_DATA_INBAND,  				VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);  	if (ret) -		goto probe_error1; +		goto probe_error2;  	t = wait_for_completion_timeout(&dm_device.host_event, 5*HZ);  	if (t == 0) {  		ret = -ETIMEDOUT; -		goto probe_error1; +		goto probe_error2;  	}  	/* @@ -928,7 +932,7 @@ static int balloon_probe(struct hv_device *dev,  	 */  	if (dm_device.state == DM_INIT_ERROR) {  		ret = -ETIMEDOUT; -		goto probe_error1; +		goto probe_error2;  	}  	/*  	 * Now submit our capabilities to the host. @@ -961,12 +965,12 @@ static int balloon_probe(struct hv_device *dev,  				VM_PKT_DATA_INBAND,  				VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);  	if (ret) -		goto probe_error1; +		goto probe_error2;  	t = wait_for_completion_timeout(&dm_device.host_event, 5*HZ);  	if (t == 0) {  		ret = -ETIMEDOUT; -		goto probe_error1; +		goto probe_error2;  	}  	/* @@ -975,18 +979,20 @@ static int balloon_probe(struct hv_device *dev,  	 */  	if (dm_device.state == DM_INIT_ERROR) {  		ret = -ETIMEDOUT; -		goto probe_error1; +		goto probe_error2;  	}  	dm_device.state = DM_INITIALIZED;  	return 0; -probe_error1: +probe_error2:  	kthread_stop(dm_device.thread); -probe_error0: +probe_error1:  	vmbus_close(dev->channel); +probe_error0: +	kfree(send_buffer);  	return ret;  } @@ -999,6 +1005,7 @@ static int balloon_remove(struct hv_device *dev)  	vmbus_close(dev->channel);  	kthread_stop(dm->thread); +	kfree(send_buffer);  	return 0;  } diff --git a/drivers/i2c/busses/i2c-designware-core.c b/drivers/i2c/busses/i2c-designware-core.c index cbba7db9ad5..f5258c205de 100644 --- a/drivers/i2c/busses/i2c-designware-core.c +++ b/drivers/i2c/busses/i2c-designware-core.c @@ -34,6 +34,7 @@  #include <linux/io.h>  #include <linux/pm_runtime.h>  #include <linux/delay.h> +#include <linux/module.h>  #include "i2c-designware-core.h"  /* @@ -725,3 +726,6 @@ u32 i2c_dw_read_comp_param(struct dw_i2c_dev *dev)  	return dw_readl(dev, DW_IC_COMP_PARAM_1);  }  EXPORT_SYMBOL_GPL(i2c_dw_read_comp_param); + +MODULE_DESCRIPTION("Synopsys DesignWare I2C bus adapter core"); +MODULE_LICENSE("GPL"); diff --git a/drivers/i2c/busses/i2c-mxs.c b/drivers/i2c/busses/i2c-mxs.c index 1b1a936eccc..d6abaf2cf2e 100644 --- a/drivers/i2c/busses/i2c-mxs.c +++ b/drivers/i2c/busses/i2c-mxs.c @@ -127,7 +127,7 @@ struct mxs_i2c_dev {  	struct device *dev;  	void __iomem *regs;  	struct completion cmd_complete; -	u32 cmd_err; +	int cmd_err;  	struct i2c_adapter adapter;  	const struct mxs_i2c_speed_config *speed; @@ -316,7 +316,7 @@ static int mxs_i2c_xfer_msg(struct i2c_adapter *adap, struct i2c_msg *msg,  	if (msg->len == 0)  		return -EINVAL; -	init_completion(&i2c->cmd_complete); +	INIT_COMPLETION(i2c->cmd_complete);  	i2c->cmd_err = 0;  	ret = mxs_i2c_dma_setup_xfer(adap, msg, flags); @@ -473,6 +473,8 @@ static int mxs_i2c_probe(struct platform_device *pdev)  	i2c->dev = dev;  	i2c->speed = &mxs_i2c_95kHz_config; +	init_completion(&i2c->cmd_complete); +  	if (dev->of_node) {  		err = mxs_i2c_get_ofdata(i2c);  		if (err) diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c index 20d41bfa7c1..4cc2f0528c8 100644 --- a/drivers/i2c/busses/i2c-omap.c +++ b/drivers/i2c/busses/i2c-omap.c @@ -803,7 +803,7 @@ static int errata_omap3_i462(struct omap_i2c_dev *dev)  			if (stat & OMAP_I2C_STAT_AL) {  				dev_err(dev->dev, "Arbitration lost\n");  				dev->cmd_err |= OMAP_I2C_STAT_AL; -				omap_i2c_ack_stat(dev, OMAP_I2C_STAT_NACK); +				omap_i2c_ack_stat(dev, OMAP_I2C_STAT_AL);  			}  			return -EIO; @@ -963,7 +963,7 @@ omap_i2c_isr_thread(int this_irq, void *dev_id)  				i2c_omap_errata_i207(dev, stat);  			omap_i2c_ack_stat(dev, OMAP_I2C_STAT_RDR); -			break; +			continue;  		}  		if (stat & OMAP_I2C_STAT_RRDY) { @@ -989,7 +989,7 @@ omap_i2c_isr_thread(int this_irq, void *dev_id)  				break;  			omap_i2c_ack_stat(dev, OMAP_I2C_STAT_XDR); -			break; +			continue;  		}  		if (stat & OMAP_I2C_STAT_XRDY) { diff --git a/drivers/i2c/busses/i2c-sirf.c b/drivers/i2c/busses/i2c-sirf.c index 3f1818b8797..e03381aee34 100644 --- a/drivers/i2c/busses/i2c-sirf.c +++ b/drivers/i2c/busses/i2c-sirf.c @@ -12,6 +12,7 @@  #include <linux/slab.h>  #include <linux/platform_device.h>  #include <linux/i2c.h> +#include <linux/of_i2c.h>  #include <linux/clk.h>  #include <linux/err.h>  #include <linux/io.h> @@ -328,6 +329,7 @@ static int i2c_sirfsoc_probe(struct platform_device *pdev)  	adap->algo = &i2c_sirfsoc_algo;  	adap->algo_data = siic; +	adap->dev.of_node = pdev->dev.of_node;  	adap->dev.parent = &pdev->dev;  	adap->nr = pdev->id; @@ -371,6 +373,8 @@ static int i2c_sirfsoc_probe(struct platform_device *pdev)  	clk_disable(clk); +	of_i2c_register_devices(adap); +  	dev_info(&pdev->dev, " I2C adapter ready to operate\n");  	return 0; diff --git a/drivers/i2c/muxes/i2c-mux-pinctrl.c b/drivers/i2c/muxes/i2c-mux-pinctrl.c index 1e44d04d1b2..a43c0ce5e3d 100644 --- a/drivers/i2c/muxes/i2c-mux-pinctrl.c +++ b/drivers/i2c/muxes/i2c-mux-pinctrl.c @@ -167,7 +167,7 @@ static int i2c_mux_pinctrl_probe(struct platform_device *pdev)  	}  	mux->busses = devm_kzalloc(&pdev->dev, -				   sizeof(mux->busses) * mux->pdata->bus_count, +				   sizeof(*mux->busses) * mux->pdata->bus_count,  				   GFP_KERNEL);  	if (!mux->busses) {  		dev_err(&pdev->dev, "Cannot allocate busses\n"); diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c index 4ba384f1ab5..2df9414a72f 100644 --- a/drivers/idle/intel_idle.c +++ b/drivers/idle/intel_idle.c @@ -448,8 +448,6 @@ static int intel_idle_probe(void)  	else  		on_each_cpu(__setup_broadcast_timer, (void *)true, 1); -	register_cpu_notifier(&cpu_hotplug_notifier); -  	pr_debug(PREFIX "v" INTEL_IDLE_VERSION  		" model 0x%X\n", boot_cpu_data.x86_model); @@ -612,6 +610,7 @@ static int __init intel_idle_init(void)  			return retval;  		}  	} +	register_cpu_notifier(&cpu_hotplug_notifier);  	return 0;  } diff --git a/drivers/infiniband/hw/qib/qib_qp.c b/drivers/infiniband/hw/qib/qib_qp.c index 4850d03870c..35275099caf 100644 --- a/drivers/infiniband/hw/qib/qib_qp.c +++ b/drivers/infiniband/hw/qib/qib_qp.c @@ -263,20 +263,15 @@ static void remove_qp(struct qib_ibdev *dev, struct qib_qp *qp)  		struct qib_qp __rcu **qpp;  		qpp = &dev->qp_table[n]; -		q = rcu_dereference_protected(*qpp, -			lockdep_is_held(&dev->qpt_lock)); -		for (; q; qpp = &q->next) { +		for (; (q = rcu_dereference_protected(*qpp, +				lockdep_is_held(&dev->qpt_lock))) != NULL; +				qpp = &q->next)  			if (q == qp) {  				atomic_dec(&qp->refcount);  				*qpp = qp->next;  				rcu_assign_pointer(qp->next, NULL); -				q = rcu_dereference_protected(*qpp, -					lockdep_is_held(&dev->qpt_lock));  				break;  			} -			q = rcu_dereference_protected(*qpp, -				lockdep_is_held(&dev->qpt_lock)); -		}  	}  	spin_unlock_irqrestore(&dev->qpt_lock, flags); diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c index 03103d2bd64..67b0c1d2367 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c @@ -741,6 +741,9 @@ void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_  	tx_req->mapping = addr; +	skb_orphan(skb); +	skb_dst_drop(skb); +  	rc = post_send(priv, tx, tx->tx_head & (ipoib_sendq_size - 1),  		       addr, skb->len);  	if (unlikely(rc)) { @@ -752,9 +755,6 @@ void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_  		dev->trans_start = jiffies;  		++tx->tx_head; -		skb_orphan(skb); -		skb_dst_drop(skb); -  		if (++priv->tx_outstanding == ipoib_sendq_size) {  			ipoib_dbg(priv, "TX ring 0x%x full, stopping kernel net queue\n",  				  tx->qp->qp_num); diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c index a1bca70e20a..2cfa76f5d99 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c @@ -600,6 +600,9 @@ void ipoib_send(struct net_device *dev, struct sk_buff *skb,  		netif_stop_queue(dev);  	} +	skb_orphan(skb); +	skb_dst_drop(skb); +  	rc = post_send(priv, priv->tx_head & (ipoib_sendq_size - 1),  		       address->ah, qpn, tx_req, phead, hlen);  	if (unlikely(rc)) { @@ -615,9 +618,6 @@ void ipoib_send(struct net_device *dev, struct sk_buff *skb,  		address->last_send = priv->tx_head;  		++priv->tx_head; - -		skb_orphan(skb); -		skb_dst_drop(skb);  	}  	if (unlikely(priv->tx_outstanding > MAX_SEND_CQE)) diff --git a/drivers/input/input.c b/drivers/input/input.c index ce01332f7b3..c0446992892 100644 --- a/drivers/input/input.c +++ b/drivers/input/input.c @@ -1785,12 +1785,13 @@ static void devm_input_device_release(struct device *dev, void *res)   * its driver (or binding fails). Once managed input device is allocated,   * it is ready to be set up and registered in the same fashion as regular   * input device. There are no special devm_input_device_[un]register() - * variants, regular ones work with both managed and unmanaged devices. + * variants, regular ones work with both managed and unmanaged devices, + * should you need them. In most cases however, managed input device need + * not be explicitly unregistered or freed.   *   * NOTE: the owner device is set up as parent of input device and users   * should not override it.   */ -  struct input_dev *devm_input_allocate_device(struct device *dev)  {  	struct input_dev *input; @@ -2004,6 +2005,17 @@ static void devm_input_device_unregister(struct device *dev, void *res)   * Once device has been successfully registered it can be unregistered   * with input_unregister_device(); input_free_device() should not be   * called in this case. + * + * Note that this function is also used to register managed input devices + * (ones allocated with devm_input_allocate_device()). Such managed input + * devices need not be explicitly unregistered or freed, their tear down + * is controlled by the devres infrastructure. It is also worth noting + * that tear down of managed input devices is internally a 2-step process: + * registered managed input device is first unregistered, but stays in + * memory and can still handle input_event() calls (although events will + * not be delivered anywhere). The freeing of managed input device will + * happen later, when devres stack is unwound to the point where device + * allocation was made.   */  int input_register_device(struct input_dev *dev)  { diff --git a/drivers/input/joystick/analog.c b/drivers/input/joystick/analog.c index 358cd7ee905..7cd74e29cbc 100644 --- a/drivers/input/joystick/analog.c +++ b/drivers/input/joystick/analog.c @@ -162,7 +162,7 @@ static unsigned int get_time_pit(void)  #define GET_TIME(x)	do { x = get_cycles(); } while (0)  #define DELTA(x,y)	((y)-(x))  #define TIME_NAME	"PCC" -#elif defined(CONFIG_MN10300) +#elif defined(CONFIG_MN10300) || defined(CONFIG_TILE)  #define GET_TIME(x)	do { x = get_cycles(); } while (0)  #define DELTA(x, y)	((x) - (y))  #define TIME_NAME	"TSC" diff --git a/drivers/input/keyboard/lm8323.c b/drivers/input/keyboard/lm8323.c index 93c81266213..0de23f41b2d 100644 --- a/drivers/input/keyboard/lm8323.c +++ b/drivers/input/keyboard/lm8323.c @@ -398,7 +398,7 @@ static irqreturn_t lm8323_irq(int irq, void *_lm)  			lm8323_configure(lm);  		}  		for (i = 0; i < LM8323_NUM_PWMS; i++) { -			if (ints & (1 << (INT_PWM1 + i))) { +			if (ints & (INT_PWM1 << i)) {  				dev_vdbg(&lm->client->dev,  					 "pwm%d engine completed\n", i);  				pwm_done(&lm->pwm[i]); diff --git a/drivers/input/tablet/wacom_sys.c b/drivers/input/tablet/wacom_sys.c index f92d34f45a1..aaf23aeae2e 100644 --- a/drivers/input/tablet/wacom_sys.c +++ b/drivers/input/tablet/wacom_sys.c @@ -553,10 +553,10 @@ static int wacom_set_device_mode(struct usb_interface *intf, int report_id, int  	if (!rep_data)  		return error; -	rep_data[0] = report_id; -	rep_data[1] = mode; -  	do { +		rep_data[0] = report_id; +		rep_data[1] = mode; +  		error = wacom_set_report(intf, WAC_HID_FEATURE_REPORT,  		                         report_id, rep_data, length, 1);  		if (error >= 0) diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c index 81837b0710a..faf10ba1ed9 100644 --- a/drivers/iommu/amd_iommu_init.c +++ b/drivers/iommu/amd_iommu_init.c @@ -975,6 +975,38 @@ static void __init free_iommu_all(void)  }  /* + * Family15h Model 10h-1fh erratum 746 (IOMMU Logging May Stall Translations) + * Workaround: + *     BIOS should disable L2B micellaneous clock gating by setting + *     L2_L2B_CK_GATE_CONTROL[CKGateL2BMiscDisable](D0F2xF4_x90[2]) = 1b + */ +static void __init amd_iommu_erratum_746_workaround(struct amd_iommu *iommu) +{ +	u32 value; + +	if ((boot_cpu_data.x86 != 0x15) || +	    (boot_cpu_data.x86_model < 0x10) || +	    (boot_cpu_data.x86_model > 0x1f)) +		return; + +	pci_write_config_dword(iommu->dev, 0xf0, 0x90); +	pci_read_config_dword(iommu->dev, 0xf4, &value); + +	if (value & BIT(2)) +		return; + +	/* Select NB indirect register 0x90 and enable writing */ +	pci_write_config_dword(iommu->dev, 0xf0, 0x90 | (1 << 8)); + +	pci_write_config_dword(iommu->dev, 0xf4, value | 0x4); +	pr_info("AMD-Vi: Applying erratum 746 workaround for IOMMU at %s\n", +		dev_name(&iommu->dev->dev)); + +	/* Clear the enable writing bit */ +	pci_write_config_dword(iommu->dev, 0xf0, 0x90); +} + +/*   * This function clues the initialization function for one IOMMU   * together and also allocates the command buffer and programs the   * hardware. It does NOT enable the IOMMU. This is done afterwards. @@ -1172,6 +1204,8 @@ static int iommu_init_pci(struct amd_iommu *iommu)  			iommu->stored_l2[i] = iommu_read_l2(iommu, i);  	} +	amd_iommu_erratum_746_workaround(iommu); +  	return pci_enable_device(iommu->dev);  } diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index b9d09115788..eca28014ef3 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c @@ -4234,6 +4234,21 @@ static struct iommu_ops intel_iommu_ops = {  	.pgsize_bitmap	= INTEL_IOMMU_PGSIZES,  }; +static void quirk_iommu_g4x_gfx(struct pci_dev *dev) +{ +	/* G4x/GM45 integrated gfx dmar support is totally busted. */ +	printk(KERN_INFO "DMAR: Disabling IOMMU for graphics on this chipset\n"); +	dmar_map_gfx = 0; +} + +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_g4x_gfx); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_g4x_gfx); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_g4x_gfx); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_g4x_gfx); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_g4x_gfx); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_g4x_gfx); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_g4x_gfx); +  static void quirk_iommu_rwbf(struct pci_dev *dev)  {  	/* @@ -4242,12 +4257,6 @@ static void quirk_iommu_rwbf(struct pci_dev *dev)  	 */  	printk(KERN_INFO "DMAR: Forcing write-buffer flush capability\n");  	rwbf_quirk = 1; - -	/* https://bugzilla.redhat.com/show_bug.cgi?id=538163 */ -	if (dev->revision == 0x07) { -		printk(KERN_INFO "DMAR: Disabling IOMMU for graphics on this chipset\n"); -		dmar_map_gfx = 0; -	}  }  DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf); diff --git a/drivers/isdn/gigaset/capi.c b/drivers/isdn/gigaset/capi.c index 68452b768da..03a0a01a405 100644 --- a/drivers/isdn/gigaset/capi.c +++ b/drivers/isdn/gigaset/capi.c @@ -248,6 +248,8 @@ static inline void dump_rawmsg(enum debuglevel level, const char *tag,  		CAPIMSG_APPID(data), CAPIMSG_MSGID(data), l,  		CAPIMSG_CONTROL(data));  	l -= 12; +	if (l <= 0) +		return;  	dbgline = kmalloc(3 * l, GFP_ATOMIC);  	if (!dbgline)  		return; diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c index 3d8984edeff..9e58dbd8d8c 100644 --- a/drivers/md/dm-raid.c +++ b/drivers/md/dm-raid.c @@ -340,24 +340,22 @@ static int validate_region_size(struct raid_set *rs, unsigned long region_size)  }  /* - * validate_rebuild_devices + * validate_raid_redundancy   * @rs   * - * Determine if the devices specified for rebuild can result in a valid - * usable array that is capable of rebuilding the given devices. + * Determine if there are enough devices in the array that haven't + * failed (or are being rebuilt) to form a usable array.   *   * Returns: 0 on success, -EINVAL on failure.   */ -static int validate_rebuild_devices(struct raid_set *rs) +static int validate_raid_redundancy(struct raid_set *rs)  {  	unsigned i, rebuild_cnt = 0;  	unsigned rebuilds_per_group, copies, d; -	if (!(rs->print_flags & DMPF_REBUILD)) -		return 0; -  	for (i = 0; i < rs->md.raid_disks; i++) -		if (!test_bit(In_sync, &rs->dev[i].rdev.flags)) +		if (!test_bit(In_sync, &rs->dev[i].rdev.flags) || +		    !rs->dev[i].rdev.sb_page)  			rebuild_cnt++;  	switch (rs->raid_type->level) { @@ -393,27 +391,24 @@ static int validate_rebuild_devices(struct raid_set *rs)  		 *          A    A    B    B    C  		 *          C    D    D    E    E  		 */ -		rebuilds_per_group = 0;  		for (i = 0; i < rs->md.raid_disks * copies; i++) { +			if (!(i % copies)) +				rebuilds_per_group = 0;  			d = i % rs->md.raid_disks; -			if (!test_bit(In_sync, &rs->dev[d].rdev.flags) && +			if ((!rs->dev[d].rdev.sb_page || +			     !test_bit(In_sync, &rs->dev[d].rdev.flags)) &&  			    (++rebuilds_per_group >= copies))  				goto too_many; -			if (!((i + 1) % copies)) -				rebuilds_per_group = 0;  		}  		break;  	default: -		DMERR("The rebuild parameter is not supported for %s", -		      rs->raid_type->name); -		rs->ti->error = "Rebuild not supported for this RAID type"; -		return -EINVAL; +		if (rebuild_cnt) +			return -EINVAL;  	}  	return 0;  too_many: -	rs->ti->error = "Too many rebuild devices specified";  	return -EINVAL;  } @@ -664,9 +659,6 @@ static int parse_raid_params(struct raid_set *rs, char **argv,  	}  	rs->md.dev_sectors = sectors_per_dev; -	if (validate_rebuild_devices(rs)) -		return -EINVAL; -  	/* Assume there are no metadata devices until the drives are parsed */  	rs->md.persistent = 0;  	rs->md.external = 1; @@ -995,28 +987,10 @@ static int super_validate(struct mddev *mddev, struct md_rdev *rdev)  static int analyse_superblocks(struct dm_target *ti, struct raid_set *rs)  {  	int ret; -	unsigned redundancy = 0;  	struct raid_dev *dev;  	struct md_rdev *rdev, *tmp, *freshest;  	struct mddev *mddev = &rs->md; -	switch (rs->raid_type->level) { -	case 1: -		redundancy = rs->md.raid_disks - 1; -		break; -	case 4: -	case 5: -	case 6: -		redundancy = rs->raid_type->parity_devs; -		break; -	case 10: -		redundancy = raid10_md_layout_to_copies(mddev->layout) - 1; -		break; -	default: -		ti->error = "Unknown RAID type"; -		return -EINVAL; -	} -  	freshest = NULL;  	rdev_for_each_safe(rdev, tmp, mddev) {  		/* @@ -1045,44 +1019,43 @@ static int analyse_superblocks(struct dm_target *ti, struct raid_set *rs)  			break;  		default:  			dev = container_of(rdev, struct raid_dev, rdev); -			if (redundancy--) { -				if (dev->meta_dev) -					dm_put_device(ti, dev->meta_dev); - -				dev->meta_dev = NULL; -				rdev->meta_bdev = NULL; +			if (dev->meta_dev) +				dm_put_device(ti, dev->meta_dev); -				if (rdev->sb_page) -					put_page(rdev->sb_page); +			dev->meta_dev = NULL; +			rdev->meta_bdev = NULL; -				rdev->sb_page = NULL; +			if (rdev->sb_page) +				put_page(rdev->sb_page); -				rdev->sb_loaded = 0; +			rdev->sb_page = NULL; -				/* -				 * We might be able to salvage the data device -				 * even though the meta device has failed.  For -				 * now, we behave as though '- -' had been -				 * set for this device in the table. -				 */ -				if (dev->data_dev) -					dm_put_device(ti, dev->data_dev); +			rdev->sb_loaded = 0; -				dev->data_dev = NULL; -				rdev->bdev = NULL; +			/* +			 * We might be able to salvage the data device +			 * even though the meta device has failed.  For +			 * now, we behave as though '- -' had been +			 * set for this device in the table. +			 */ +			if (dev->data_dev) +				dm_put_device(ti, dev->data_dev); -				list_del(&rdev->same_set); +			dev->data_dev = NULL; +			rdev->bdev = NULL; -				continue; -			} -			ti->error = "Failed to load superblock"; -			return ret; +			list_del(&rdev->same_set);  		}  	}  	if (!freshest)  		return 0; +	if (validate_raid_redundancy(rs)) { +		rs->ti->error = "Insufficient redundancy to activate array"; +		return -EINVAL; +	} +  	/*  	 * Validation of the freshest device provides the source of  	 * validation for the remaining devices. @@ -1432,7 +1405,7 @@ static void raid_resume(struct dm_target *ti)  static struct target_type raid_target = {  	.name = "raid", -	.version = {1, 4, 0}, +	.version = {1, 4, 1},  	.module = THIS_MODULE,  	.ctr = raid_ctr,  	.dtr = raid_dtr, diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c index 675ae527401..5409607d487 100644 --- a/drivers/md/dm-thin.c +++ b/drivers/md/dm-thin.c @@ -2746,19 +2746,9 @@ static int thin_iterate_devices(struct dm_target *ti,  	return 0;  } -/* - * A thin device always inherits its queue limits from its pool. - */ -static void thin_io_hints(struct dm_target *ti, struct queue_limits *limits) -{ -	struct thin_c *tc = ti->private; - -	*limits = bdev_get_queue(tc->pool_dev->bdev)->limits; -} -  static struct target_type thin_target = {  	.name = "thin", -	.version = {1, 6, 0}, +	.version = {1, 7, 0},  	.module	= THIS_MODULE,  	.ctr = thin_ctr,  	.dtr = thin_dtr, @@ -2767,7 +2757,6 @@ static struct target_type thin_target = {  	.postsuspend = thin_postsuspend,  	.status = thin_status,  	.iterate_devices = thin_iterate_devices, -	.io_hints = thin_io_hints,  };  /*----------------------------------------------------------------*/ diff --git a/drivers/md/dm.c b/drivers/md/dm.c index c72e4d5a961..314a0e2faf7 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -1188,6 +1188,7 @@ static int __clone_and_map_changing_extent_only(struct clone_info *ci,  {  	struct dm_target *ti;  	sector_t len; +	unsigned num_requests;  	do {  		ti = dm_table_find_target(ci->map, ci->sector); @@ -1200,7 +1201,8 @@ static int __clone_and_map_changing_extent_only(struct clone_info *ci,  		 * reconfiguration might also have changed that since the  		 * check was performed.  		 */ -		if (!get_num_requests || !get_num_requests(ti)) +		num_requests = get_num_requests ? get_num_requests(ti) : 0; +		if (!num_requests)  			return -EOPNOTSUPP;  		if (is_split_required && !is_split_required(ti)) @@ -1208,7 +1210,7 @@ static int __clone_and_map_changing_extent_only(struct clone_info *ci,  		else  			len = min(ci->sector_count, max_io_len(ci->sector, ti)); -		__issue_target_requests(ci, ti, ti->num_discard_requests, len); +		__issue_target_requests(ci, ti, num_requests, len);  		ci->sector += len;  	} while (ci->sector_count -= len); diff --git a/drivers/media/dvb-core/dvb_frontend.c b/drivers/media/dvb-core/dvb_frontend.c index 49d95040096..0223ad255cb 100644 --- a/drivers/media/dvb-core/dvb_frontend.c +++ b/drivers/media/dvb-core/dvb_frontend.c @@ -1820,7 +1820,7 @@ static int dvb_frontend_ioctl(struct file *file,  	struct dvb_frontend *fe = dvbdev->priv;  	struct dtv_frontend_properties *c = &fe->dtv_property_cache;  	struct dvb_frontend_private *fepriv = fe->frontend_priv; -	int err = -ENOTTY; +	int err = -EOPNOTSUPP;  	dev_dbg(fe->dvb->device, "%s: (%d)\n", __func__, _IOC_NR(cmd));  	if (fepriv->exit != DVB_FE_NO_EXIT) @@ -1938,7 +1938,7 @@ static int dvb_frontend_ioctl_properties(struct file *file,  		}  	} else -		err = -ENOTTY; +		err = -EOPNOTSUPP;  out:  	kfree(tvp); @@ -2071,7 +2071,7 @@ static int dvb_frontend_ioctl_legacy(struct file *file,  	struct dvb_frontend *fe = dvbdev->priv;  	struct dvb_frontend_private *fepriv = fe->frontend_priv;  	struct dtv_frontend_properties *c = &fe->dtv_property_cache; -	int err = -ENOTTY; +	int err = -EOPNOTSUPP;  	switch (cmd) {  	case FE_GET_INFO: { diff --git a/drivers/media/i2c/m5mols/m5mols_core.c b/drivers/media/i2c/m5mols/m5mols_core.c index 8a8d42fe263..d4e7567b367 100644 --- a/drivers/media/i2c/m5mols/m5mols_core.c +++ b/drivers/media/i2c/m5mols/m5mols_core.c @@ -556,7 +556,7 @@ static int m5mols_get_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,  	mutex_lock(&info->lock);  	format = __find_format(info, fh, fmt->which, info->res_type); -	if (!format) +	if (format)  		fmt->format = *format;  	else  		ret = -EINVAL; diff --git a/drivers/media/platform/coda.c b/drivers/media/platform/coda.c index 1cf8293c0fb..4a980e029ca 100644 --- a/drivers/media/platform/coda.c +++ b/drivers/media/platform/coda.c @@ -23,8 +23,8 @@  #include <linux/slab.h>  #include <linux/videodev2.h>  #include <linux/of.h> +#include <linux/platform_data/imx-iram.h> -#include <mach/iram.h>  #include <media/v4l2-ctrls.h>  #include <media/v4l2-device.h>  #include <media/v4l2-ioctl.h> diff --git a/drivers/media/platform/omap3isp/ispvideo.c b/drivers/media/platform/omap3isp/ispvideo.c index e0d73a64218..8dac17511e6 100644 --- a/drivers/media/platform/omap3isp/ispvideo.c +++ b/drivers/media/platform/omap3isp/ispvideo.c @@ -35,9 +35,6 @@  #include <linux/vmalloc.h>  #include <media/v4l2-dev.h>  #include <media/v4l2-ioctl.h> -#include <plat/iommu.h> -#include <plat/iovmm.h> -#include <plat/omap-pm.h>  #include "ispvideo.h"  #include "isp.h" diff --git a/drivers/media/platform/s5p-fimc/fimc-mdevice.c b/drivers/media/platform/s5p-fimc/fimc-mdevice.c index 4ab99f3a7b0..b4a68ecf0ca 100644 --- a/drivers/media/platform/s5p-fimc/fimc-mdevice.c +++ b/drivers/media/platform/s5p-fimc/fimc-mdevice.c @@ -593,7 +593,7 @@ static int __fimc_md_create_flite_source_links(struct fimc_md *fmd)  {  	struct media_entity *source, *sink;  	unsigned int flags = MEDIA_LNK_FL_ENABLED; -	int i, ret; +	int i, ret = 0;  	for (i = 0; i < FIMC_LITE_MAX_DEVS; i++) {  		struct fimc_lite *fimc = fmd->fimc_lite[i]; diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc.c b/drivers/media/platform/s5p-mfc/s5p_mfc.c index 379f5743371..681bc6ba149 100644 --- a/drivers/media/platform/s5p-mfc/s5p_mfc.c +++ b/drivers/media/platform/s5p-mfc/s5p_mfc.c @@ -412,62 +412,48 @@ leave_handle_frame:  }  /* Error handling for interrupt */ -static void s5p_mfc_handle_error(struct s5p_mfc_ctx *ctx, -				 unsigned int reason, unsigned int err) +static void s5p_mfc_handle_error(struct s5p_mfc_dev *dev, +		struct s5p_mfc_ctx *ctx, unsigned int reason, unsigned int err)  { -	struct s5p_mfc_dev *dev;  	unsigned long flags; -	/* If no context is available then all necessary -	 * processing has been done. */ -	if (ctx == NULL) -		return; - -	dev = ctx->dev;  	mfc_err("Interrupt Error: %08x\n", err); -	s5p_mfc_hw_call(dev->mfc_ops, clear_int_flags, dev); -	wake_up_dev(dev, reason, err); -	/* Error recovery is dependent on the state of context */ -	switch (ctx->state) { -	case MFCINST_INIT: -		/* This error had to happen while acquireing instance */ -	case MFCINST_GOT_INST: -		/* This error had to happen while parsing the header */ -	case MFCINST_HEAD_PARSED: -		/* This error had to happen while setting dst buffers */ -	case MFCINST_RETURN_INST: -		/* This error had to happen while releasing instance */ -		clear_work_bit(ctx); -		wake_up_ctx(ctx, reason, err); -		if (test_and_clear_bit(0, &dev->hw_lock) == 0) -			BUG(); -		s5p_mfc_clock_off(); -		ctx->state = MFCINST_ERROR; -		break; -	case MFCINST_FINISHING: -	case MFCINST_FINISHED: -	case MFCINST_RUNNING: -		/* It is higly probable that an error occured -		 * while decoding a frame */ -		clear_work_bit(ctx); -		ctx->state = MFCINST_ERROR; -		/* Mark all dst buffers as having an error */ -		spin_lock_irqsave(&dev->irqlock, flags); -		s5p_mfc_hw_call(dev->mfc_ops, cleanup_queue, &ctx->dst_queue, -				&ctx->vq_dst); -		/* Mark all src buffers as having an error */ -		s5p_mfc_hw_call(dev->mfc_ops, cleanup_queue, &ctx->src_queue, -				&ctx->vq_src); -		spin_unlock_irqrestore(&dev->irqlock, flags); -		if (test_and_clear_bit(0, &dev->hw_lock) == 0) -			BUG(); -		s5p_mfc_clock_off(); -		break; -	default: -		mfc_err("Encountered an error interrupt which had not been handled\n"); -		break; +	if (ctx != NULL) { +		/* Error recovery is dependent on the state of context */ +		switch (ctx->state) { +		case MFCINST_RES_CHANGE_INIT: +		case MFCINST_RES_CHANGE_FLUSH: +		case MFCINST_RES_CHANGE_END: +		case MFCINST_FINISHING: +		case MFCINST_FINISHED: +		case MFCINST_RUNNING: +			/* It is higly probable that an error occured +			 * while decoding a frame */ +			clear_work_bit(ctx); +			ctx->state = MFCINST_ERROR; +			/* Mark all dst buffers as having an error */ +			spin_lock_irqsave(&dev->irqlock, flags); +			s5p_mfc_hw_call(dev->mfc_ops, cleanup_queue, +						&ctx->dst_queue, &ctx->vq_dst); +			/* Mark all src buffers as having an error */ +			s5p_mfc_hw_call(dev->mfc_ops, cleanup_queue, +						&ctx->src_queue, &ctx->vq_src); +			spin_unlock_irqrestore(&dev->irqlock, flags); +			wake_up_ctx(ctx, reason, err); +			break; +		default: +			clear_work_bit(ctx); +			ctx->state = MFCINST_ERROR; +			wake_up_ctx(ctx, reason, err); +			break; +		}  	} +	if (test_and_clear_bit(0, &dev->hw_lock) == 0) +		BUG(); +	s5p_mfc_hw_call(dev->mfc_ops, clear_int_flags, dev); +	s5p_mfc_clock_off(); +	wake_up_dev(dev, reason, err);  	return;  } @@ -632,7 +618,7 @@ static irqreturn_t s5p_mfc_irq(int irq, void *priv)  				dev->warn_start)  			s5p_mfc_handle_frame(ctx, reason, err);  		else -			s5p_mfc_handle_error(ctx, reason, err); +			s5p_mfc_handle_error(dev, ctx, reason, err);  		clear_bit(0, &dev->enter_suspend);  		break; diff --git a/drivers/media/radio/radio-keene.c b/drivers/media/radio/radio-keene.c index e10e525f33e..296941a9ae2 100644 --- a/drivers/media/radio/radio-keene.c +++ b/drivers/media/radio/radio-keene.c @@ -374,6 +374,7 @@ static int usb_keene_probe(struct usb_interface *intf,  	radio->vdev.ioctl_ops = &usb_keene_ioctl_ops;  	radio->vdev.lock = &radio->lock;  	radio->vdev.release = video_device_release_empty; +	radio->vdev.vfl_dir = VFL_DIR_TX;  	radio->usbdev = interface_to_usbdev(intf);  	radio->intf = intf; diff --git a/drivers/media/radio/radio-si4713.c b/drivers/media/radio/radio-si4713.c index a082e400ed0..1507c9d508d 100644 --- a/drivers/media/radio/radio-si4713.c +++ b/drivers/media/radio/radio-si4713.c @@ -250,6 +250,7 @@ static struct video_device radio_si4713_vdev_template = {  	.name			= "radio-si4713",  	.release		= video_device_release,  	.ioctl_ops		= &radio_si4713_ioctl_ops, +	.vfl_dir		= VFL_DIR_TX,  };  /* Platform driver interface */ diff --git a/drivers/media/radio/radio-wl1273.c b/drivers/media/radio/radio-wl1273.c index c48be195bba..cabbe3adf43 100644 --- a/drivers/media/radio/radio-wl1273.c +++ b/drivers/media/radio/radio-wl1273.c @@ -1971,6 +1971,7 @@ static struct video_device wl1273_viddev_template = {  	.ioctl_ops		= &wl1273_ioctl_ops,  	.name			= WL1273_FM_DRIVER_NAME,  	.release		= wl1273_vdev_release, +	.vfl_dir		= VFL_DIR_TX,  };  static int wl1273_fm_radio_remove(struct platform_device *pdev) diff --git a/drivers/media/radio/wl128x/fmdrv_v4l2.c b/drivers/media/radio/wl128x/fmdrv_v4l2.c index 048de453603..0a8ee8fab92 100644 --- a/drivers/media/radio/wl128x/fmdrv_v4l2.c +++ b/drivers/media/radio/wl128x/fmdrv_v4l2.c @@ -518,6 +518,16 @@ static struct video_device fm_viddev_template = {  	.ioctl_ops = &fm_drv_ioctl_ops,  	.name = FM_DRV_NAME,  	.release = video_device_release, +	/* +	 * To ensure both the tuner and modulator ioctls are accessible we +	 * set the vfl_dir to M2M to indicate this. +	 * +	 * It is not really a mem2mem device of course, but it can both receive +	 * and transmit using the same radio device. It's the only radio driver +	 * that does this and it should really be split in two radio devices, +	 * but that would affect applications using this driver. +	 */ +	.vfl_dir = VFL_DIR_M2M,  };  int fm_v4l2_init_video_device(struct fmdev *fmdev, int radio_nr) diff --git a/drivers/media/usb/gspca/kinect.c b/drivers/media/usb/gspca/kinect.c index 40ad6687ee5..3773a8a745d 100644 --- a/drivers/media/usb/gspca/kinect.c +++ b/drivers/media/usb/gspca/kinect.c @@ -381,6 +381,7 @@ static const struct sd_desc sd_desc = {  /* -- module initialisation -- */  static const struct usb_device_id device_table[] = {  	{USB_DEVICE(0x045e, 0x02ae)}, +	{USB_DEVICE(0x045e, 0x02bf)},  	{}  }; diff --git a/drivers/media/usb/gspca/sonixb.c b/drivers/media/usb/gspca/sonixb.c index 70511d5f953..1220340e760 100644 --- a/drivers/media/usb/gspca/sonixb.c +++ b/drivers/media/usb/gspca/sonixb.c @@ -496,7 +496,7 @@ static void reg_w(struct gspca_dev *gspca_dev,  	}  } -static void i2c_w(struct gspca_dev *gspca_dev, const __u8 *buffer) +static void i2c_w(struct gspca_dev *gspca_dev, const u8 *buf)  {  	int retry = 60; @@ -504,16 +504,19 @@ static void i2c_w(struct gspca_dev *gspca_dev, const __u8 *buffer)  		return;  	/* is i2c ready */ -	reg_w(gspca_dev, 0x08, buffer, 8); +	reg_w(gspca_dev, 0x08, buf, 8);  	while (retry--) {  		if (gspca_dev->usb_err < 0)  			return; -		msleep(10); +		msleep(1);  		reg_r(gspca_dev, 0x08);  		if (gspca_dev->usb_buf[0] & 0x04) {  			if (gspca_dev->usb_buf[0] & 0x08) {  				dev_err(gspca_dev->v4l2_dev.dev, -					"i2c write error\n"); +					"i2c error writing %02x %02x %02x %02x" +					" %02x %02x %02x %02x\n", +					buf[0], buf[1], buf[2], buf[3], +					buf[4], buf[5], buf[6], buf[7]);  				gspca_dev->usb_err = -EIO;  			}  			return; @@ -530,7 +533,7 @@ static void i2c_w_vector(struct gspca_dev *gspca_dev,  	for (;;) {  		if (gspca_dev->usb_err < 0)  			return; -		reg_w(gspca_dev, 0x08, *buffer, 8); +		i2c_w(gspca_dev, *buffer);  		len -= 8;  		if (len <= 0)  			break; diff --git a/drivers/media/usb/gspca/sonixj.c b/drivers/media/usb/gspca/sonixj.c index 5a86047b846..36307a9028a 100644 --- a/drivers/media/usb/gspca/sonixj.c +++ b/drivers/media/usb/gspca/sonixj.c @@ -1550,6 +1550,7 @@ static void i2c_w1(struct gspca_dev *gspca_dev, u8 reg, u8 val)  			0,  			gspca_dev->usb_buf, 8,  			500); +	msleep(2);  	if (ret < 0) {  		pr_err("i2c_w1 err %d\n", ret);  		gspca_dev->usb_err = ret; diff --git a/drivers/media/usb/uvc/uvc_ctrl.c b/drivers/media/usb/uvc/uvc_ctrl.c index 2bb7613ddeb..d5baab17a5e 100644 --- a/drivers/media/usb/uvc/uvc_ctrl.c +++ b/drivers/media/usb/uvc/uvc_ctrl.c @@ -1431,8 +1431,10 @@ int uvc_ctrl_set(struct uvc_video_chain *chain,  	int ret;  	ctrl = uvc_find_control(chain, xctrl->id, &mapping); -	if (ctrl == NULL || (ctrl->info.flags & UVC_CTRL_FLAG_SET_CUR) == 0) +	if (ctrl == NULL)  		return -EINVAL; +	if (!(ctrl->info.flags & UVC_CTRL_FLAG_SET_CUR)) +		return -EACCES;  	/* Clamp out of range values. */  	switch (mapping->v4l2_type) { diff --git a/drivers/media/usb/uvc/uvc_v4l2.c b/drivers/media/usb/uvc/uvc_v4l2.c index f2ee8c6b0d8..68d59b52749 100644 --- a/drivers/media/usb/uvc/uvc_v4l2.c +++ b/drivers/media/usb/uvc/uvc_v4l2.c @@ -657,8 +657,7 @@ static long uvc_v4l2_do_ioctl(struct file *file, unsigned int cmd, void *arg)  			ret = uvc_ctrl_get(chain, ctrl);  			if (ret < 0) {  				uvc_ctrl_rollback(handle); -				ctrls->error_idx = ret == -ENOENT -						 ? ctrls->count : i; +				ctrls->error_idx = i;  				return ret;  			}  		} @@ -686,8 +685,7 @@ static long uvc_v4l2_do_ioctl(struct file *file, unsigned int cmd, void *arg)  			ret = uvc_ctrl_set(chain, ctrl);  			if (ret < 0) {  				uvc_ctrl_rollback(handle); -				ctrls->error_idx = (ret == -ENOENT && -						    cmd == VIDIOC_S_EXT_CTRLS) +				ctrls->error_idx = cmd == VIDIOC_S_EXT_CTRLS  						 ? ctrls->count : i;  				return ret;  			} diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c index 9f81be23a81..e02c4797b1c 100644 --- a/drivers/media/v4l2-core/videobuf2-core.c +++ b/drivers/media/v4l2-core/videobuf2-core.c @@ -921,8 +921,10 @@ static void __fill_vb2_buffer(struct vb2_buffer *vb, const struct v4l2_buffer *b  		 * In videobuf we use our internal V4l2_planes struct for  		 * single-planar buffers as well, for simplicity.  		 */ -		if (V4L2_TYPE_IS_OUTPUT(b->type)) +		if (V4L2_TYPE_IS_OUTPUT(b->type)) {  			v4l2_planes[0].bytesused = b->bytesused; +			v4l2_planes[0].data_offset = 0; +		}  		if (b->memory == V4L2_MEMORY_USERPTR) {  			v4l2_planes[0].m.userptr = b->m.userptr; diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig index 47ad4e27087..ff553babf45 100644 --- a/drivers/mfd/Kconfig +++ b/drivers/mfd/Kconfig @@ -237,6 +237,7 @@ config MFD_TPS65910  	depends on I2C=y && GPIOLIB  	select MFD_CORE  	select REGMAP_I2C +	select REGMAP_IRQ  	select IRQ_DOMAIN  	help  	  if you say yes here you get support for the TPS65910 series of diff --git a/drivers/mfd/ab8500-core.c b/drivers/mfd/ab8500-core.c index e1650badd10..4778bb124ef 100644 --- a/drivers/mfd/ab8500-core.c +++ b/drivers/mfd/ab8500-core.c @@ -19,6 +19,7 @@  #include <linux/mfd/core.h>  #include <linux/mfd/abx500.h>  #include <linux/mfd/abx500/ab8500.h> +#include <linux/mfd/abx500/ab8500-bm.h>  #include <linux/mfd/dbx500-prcmu.h>  #include <linux/regulator/ab8500.h>  #include <linux/of.h> diff --git a/drivers/mfd/arizona-core.c b/drivers/mfd/arizona-core.c index bc8a3edb6bb..222c03a5ddc 100644 --- a/drivers/mfd/arizona-core.c +++ b/drivers/mfd/arizona-core.c @@ -239,7 +239,12 @@ static int arizona_runtime_resume(struct device *dev)  		return ret;  	} -	regcache_sync(arizona->regmap); +	ret = regcache_sync(arizona->regmap); +	if (ret != 0) { +		dev_err(arizona->dev, "Failed to restore register cache\n"); +		regulator_disable(arizona->dcvdd); +		return ret; +	}  	return 0;  } diff --git a/drivers/mfd/arizona-irq.c b/drivers/mfd/arizona-irq.c index 74713bf5371..2bec5f0db3e 100644 --- a/drivers/mfd/arizona-irq.c +++ b/drivers/mfd/arizona-irq.c @@ -176,14 +176,7 @@ int arizona_irq_init(struct arizona *arizona)  		aod = &wm5102_aod;  		irq = &wm5102_irq; -		switch (arizona->rev) { -		case 0: -		case 1: -			ctrlif_error = false; -			break; -		default: -			break; -		} +		ctrlif_error = false;  		break;  #endif  #ifdef CONFIG_MFD_WM5110 @@ -191,14 +184,7 @@ int arizona_irq_init(struct arizona *arizona)  		aod = &wm5110_aod;  		irq = &wm5110_irq; -		switch (arizona->rev) { -		case 0: -		case 1: -			ctrlif_error = false; -			break; -		default: -			break; -		} +		ctrlif_error = false;  		break;  #endif  	default: diff --git a/drivers/mfd/da9052-i2c.c b/drivers/mfd/da9052-i2c.c index ac74a4d1dae..885e5678035 100644 --- a/drivers/mfd/da9052-i2c.c +++ b/drivers/mfd/da9052-i2c.c @@ -27,6 +27,66 @@  #include <linux/of_device.h>  #endif +/* I2C safe register check */ +static inline bool i2c_safe_reg(unsigned char reg) +{ +	switch (reg) { +	case DA9052_STATUS_A_REG: +	case DA9052_STATUS_B_REG: +	case DA9052_STATUS_C_REG: +	case DA9052_STATUS_D_REG: +	case DA9052_ADC_RES_L_REG: +	case DA9052_ADC_RES_H_REG: +	case DA9052_VDD_RES_REG: +	case DA9052_ICHG_AV_REG: +	case DA9052_TBAT_RES_REG: +	case DA9052_ADCIN4_RES_REG: +	case DA9052_ADCIN5_RES_REG: +	case DA9052_ADCIN6_RES_REG: +	case DA9052_TJUNC_RES_REG: +	case DA9052_TSI_X_MSB_REG: +	case DA9052_TSI_Y_MSB_REG: +	case DA9052_TSI_LSB_REG: +	case DA9052_TSI_Z_MSB_REG: +		return true; +	default: +		return false; +	} +} + +/* + * There is an issue with DA9052 and DA9053_AA/BA/BB PMIC where the PMIC + * gets lockup up or fails to respond following a system reset. + * This fix is to follow any read or write with a dummy read to a safe + * register. + */ +int da9052_i2c_fix(struct da9052 *da9052, unsigned char reg) +{ +	int val; + +	switch (da9052->chip_id) { +	case DA9052: +	case DA9053_AA: +	case DA9053_BA: +	case DA9053_BB: +		/* A dummy read to a safe register address. */ +	if (!i2c_safe_reg(reg)) +			return regmap_read(da9052->regmap, +					   DA9052_PARK_REGISTER, +					   &val); +		break; +	default: +		/* +		 * For other chips parking of I2C register +		 * to a safe place is not required. +		 */ +		break; +	} + +	return 0; +} +EXPORT_SYMBOL(da9052_i2c_fix); +  static int da9052_i2c_enable_multiwrite(struct da9052 *da9052)  {  	int reg_val, ret; @@ -83,6 +143,7 @@ static int da9052_i2c_probe(struct i2c_client *client,  	da9052->dev = &client->dev;  	da9052->chip_irq = client->irq; +	da9052->fix_io = da9052_i2c_fix;  	i2c_set_clientdata(client, da9052); diff --git a/drivers/mfd/db8500-prcmu.c b/drivers/mfd/db8500-prcmu.c index dc8826d8d69..268f45d4239 100644 --- a/drivers/mfd/db8500-prcmu.c +++ b/drivers/mfd/db8500-prcmu.c @@ -2524,7 +2524,7 @@ static bool read_mailbox_0(void)  		for (n = 0; n < NUM_PRCMU_WAKEUPS; n++) {  			if (ev & prcmu_irq_bit[n]) -				generic_handle_irq(IRQ_PRCMU_BASE + n); +				generic_handle_irq(irq_find_mapping(db8500_irq_domain, n));  		}  		r = true;  		break; @@ -2737,13 +2737,14 @@ static int db8500_irq_map(struct irq_domain *d, unsigned int virq,  }  static struct irq_domain_ops db8500_irq_ops = { -        .map    = db8500_irq_map, -        .xlate  = irq_domain_xlate_twocell, +	.map    = db8500_irq_map, +	.xlate  = irq_domain_xlate_twocell,  };  static int db8500_irq_init(struct device_node *np)  { -	int irq_base = -1; +	int irq_base = 0; +	int i;  	/* In the device tree case, just take some IRQs */  	if (!np) @@ -2758,6 +2759,10 @@ static int db8500_irq_init(struct device_node *np)  		return -ENOSYS;  	} +	/* All wakeups will be used, so create mappings for all */ +	for (i = 0; i < NUM_PRCMU_WAKEUPS; i++) +		irq_create_mapping(db8500_irq_domain, i); +  	return 0;  } diff --git a/drivers/mfd/max77686.c b/drivers/mfd/max77686.c index f6878f8db57..4d73963cd8f 100644 --- a/drivers/mfd/max77686.c +++ b/drivers/mfd/max77686.c @@ -93,15 +93,6 @@ static int max77686_i2c_probe(struct i2c_client *i2c,  	if (max77686 == NULL)  		return -ENOMEM; -	max77686->regmap = regmap_init_i2c(i2c, &max77686_regmap_config); -	if (IS_ERR(max77686->regmap)) { -		ret = PTR_ERR(max77686->regmap); -		dev_err(max77686->dev, "Failed to allocate register map: %d\n", -				ret); -		kfree(max77686); -		return ret; -	} -  	i2c_set_clientdata(i2c, max77686);  	max77686->dev = &i2c->dev;  	max77686->i2c = i2c; @@ -111,6 +102,15 @@ static int max77686_i2c_probe(struct i2c_client *i2c,  	max77686->irq_gpio = pdata->irq_gpio;  	max77686->irq = i2c->irq; +	max77686->regmap = regmap_init_i2c(i2c, &max77686_regmap_config); +	if (IS_ERR(max77686->regmap)) { +		ret = PTR_ERR(max77686->regmap); +		dev_err(max77686->dev, "Failed to allocate register map: %d\n", +				ret); +		kfree(max77686); +		return ret; +	} +  	if (regmap_read(max77686->regmap,  			 MAX77686_REG_DEVICE_ID, &data) < 0) {  		dev_err(max77686->dev, diff --git a/drivers/mfd/max77693.c b/drivers/mfd/max77693.c index cc5155e2049..9e60fed5ff8 100644 --- a/drivers/mfd/max77693.c +++ b/drivers/mfd/max77693.c @@ -114,35 +114,37 @@ static int max77693_i2c_probe(struct i2c_client *i2c,  	u8 reg_data;  	int ret = 0; +	if (!pdata) { +		dev_err(&i2c->dev, "No platform data found.\n"); +		return -EINVAL; +	} +  	max77693 = devm_kzalloc(&i2c->dev,  			sizeof(struct max77693_dev), GFP_KERNEL);  	if (max77693 == NULL)  		return -ENOMEM; -	max77693->regmap = devm_regmap_init_i2c(i2c, &max77693_regmap_config); -	if (IS_ERR(max77693->regmap)) { -		ret = PTR_ERR(max77693->regmap); -		dev_err(max77693->dev,"failed to allocate register map: %d\n", -				ret); -		goto err_regmap; -	} -  	i2c_set_clientdata(i2c, max77693);  	max77693->dev = &i2c->dev;  	max77693->i2c = i2c;  	max77693->irq = i2c->irq;  	max77693->type = id->driver_data; -	if (!pdata) -		goto err_regmap; +	max77693->regmap = devm_regmap_init_i2c(i2c, &max77693_regmap_config); +	if (IS_ERR(max77693->regmap)) { +		ret = PTR_ERR(max77693->regmap); +		dev_err(max77693->dev, "failed to allocate register map: %d\n", +				ret); +		return ret; +	}  	max77693->wakeup = pdata->wakeup; -	if (max77693_read_reg(max77693->regmap, -				MAX77693_PMIC_REG_PMIC_ID2, ®_data) < 0) { +	ret = max77693_read_reg(max77693->regmap, MAX77693_PMIC_REG_PMIC_ID2, +				®_data); +	if (ret < 0) {  		dev_err(max77693->dev, "device not found on this channel\n"); -		ret = -ENODEV; -		goto err_regmap; +		return ret;  	} else  		dev_info(max77693->dev, "device ID: 0x%x\n", reg_data); @@ -163,7 +165,7 @@ static int max77693_i2c_probe(struct i2c_client *i2c,  		ret = PTR_ERR(max77693->regmap_muic);  		dev_err(max77693->dev,  			"failed to allocate register map: %d\n", ret); -		goto err_regmap; +		goto err_regmap_muic;  	}  	ret = max77693_irq_init(max77693); @@ -184,9 +186,9 @@ static int max77693_i2c_probe(struct i2c_client *i2c,  err_mfd:  	max77693_irq_exit(max77693);  err_irq: +err_regmap_muic:  	i2c_unregister_device(max77693->muic);  	i2c_unregister_device(max77693->haptic); -err_regmap:  	return ret;  } diff --git a/drivers/mfd/pcf50633-core.c b/drivers/mfd/pcf50633-core.c index 64803f13bce..d11567307fb 100644 --- a/drivers/mfd/pcf50633-core.c +++ b/drivers/mfd/pcf50633-core.c @@ -208,6 +208,8 @@ static int pcf50633_probe(struct i2c_client *client,  	if (!pcf)  		return -ENOMEM; +	i2c_set_clientdata(client, pcf); +	pcf->dev = &client->dev;  	pcf->pdata = pdata;  	mutex_init(&pcf->lock); @@ -219,9 +221,6 @@ static int pcf50633_probe(struct i2c_client *client,  		return ret;  	} -	i2c_set_clientdata(client, pcf); -	pcf->dev = &client->dev; -  	version = pcf50633_reg_read(pcf, 0);  	variant = pcf50633_reg_read(pcf, 1);  	if (version < 0 || variant < 0) { diff --git a/drivers/mfd/rtl8411.c b/drivers/mfd/rtl8411.c index 89f046ca9e4..3d3b4addf81 100644 --- a/drivers/mfd/rtl8411.c +++ b/drivers/mfd/rtl8411.c @@ -112,6 +112,21 @@ static int rtl8411_card_power_off(struct rtsx_pcr *pcr, int card)  			BPP_LDO_POWB, BPP_LDO_SUSPEND);  } +static int rtl8411_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage) +{ +	u8 mask, val; + +	mask = (BPP_REG_TUNED18 << BPP_TUNED18_SHIFT_8411) | BPP_PAD_MASK; +	if (voltage == OUTPUT_3V3) +		val = (BPP_ASIC_3V3 << BPP_TUNED18_SHIFT_8411) | BPP_PAD_3V3; +	else if (voltage == OUTPUT_1V8) +		val = (BPP_ASIC_1V8 << BPP_TUNED18_SHIFT_8411) | BPP_PAD_1V8; +	else +		return -EINVAL; + +	return rtsx_pci_write_register(pcr, LDO_CTL, mask, val); +} +  static unsigned int rtl8411_cd_deglitch(struct rtsx_pcr *pcr)  {  	unsigned int card_exist; @@ -163,6 +178,18 @@ static unsigned int rtl8411_cd_deglitch(struct rtsx_pcr *pcr)  	return card_exist;  } +static int rtl8411_conv_clk_and_div_n(int input, int dir) +{ +	int output; + +	if (dir == CLK_TO_DIV_N) +		output = input * 4 / 5 - 2; +	else +		output = (input + 2) * 5 / 4; + +	return output; +} +  static const struct pcr_ops rtl8411_pcr_ops = {  	.extra_init_hw = rtl8411_extra_init_hw,  	.optimize_phy = NULL, @@ -172,7 +199,9 @@ static const struct pcr_ops rtl8411_pcr_ops = {  	.disable_auto_blink = rtl8411_disable_auto_blink,  	.card_power_on = rtl8411_card_power_on,  	.card_power_off = rtl8411_card_power_off, +	.switch_output_voltage = rtl8411_switch_output_voltage,  	.cd_deglitch = rtl8411_cd_deglitch, +	.conv_clk_and_div_n = rtl8411_conv_clk_and_div_n,  };  /* SD Pull Control Enable: diff --git a/drivers/mfd/rts5209.c b/drivers/mfd/rts5209.c index 283a4f14808..98fe0f39463 100644 --- a/drivers/mfd/rts5209.c +++ b/drivers/mfd/rts5209.c @@ -144,6 +144,25 @@ static int rts5209_card_power_off(struct rtsx_pcr *pcr, int card)  	return rtsx_pci_send_cmd(pcr, 100);  } +static int rts5209_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage) +{ +	int err; + +	if (voltage == OUTPUT_3V3) { +		err = rtsx_pci_write_phy_register(pcr, 0x08, 0x4FC0 | 0x24); +		if (err < 0) +			return err; +	} else if (voltage == OUTPUT_1V8) { +		err = rtsx_pci_write_phy_register(pcr, 0x08, 0x4C40 | 0x24); +		if (err < 0) +			return err; +	} else { +		return -EINVAL; +	} + +	return 0; +} +  static const struct pcr_ops rts5209_pcr_ops = {  	.extra_init_hw = rts5209_extra_init_hw,  	.optimize_phy = rts5209_optimize_phy, @@ -153,7 +172,9 @@ static const struct pcr_ops rts5209_pcr_ops = {  	.disable_auto_blink = rts5209_disable_auto_blink,  	.card_power_on = rts5209_card_power_on,  	.card_power_off = rts5209_card_power_off, +	.switch_output_voltage = rts5209_switch_output_voltage,  	.cd_deglitch = NULL, +	.conv_clk_and_div_n = NULL,  };  /* SD Pull Control Enable: diff --git a/drivers/mfd/rts5229.c b/drivers/mfd/rts5229.c index b9dbab266fd..29d889cbb9c 100644 --- a/drivers/mfd/rts5229.c +++ b/drivers/mfd/rts5229.c @@ -114,6 +114,25 @@ static int rts5229_card_power_off(struct rtsx_pcr *pcr, int card)  	return rtsx_pci_send_cmd(pcr, 100);  } +static int rts5229_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage) +{ +	int err; + +	if (voltage == OUTPUT_3V3) { +		err = rtsx_pci_write_phy_register(pcr, 0x08, 0x4FC0 | 0x24); +		if (err < 0) +			return err; +	} else if (voltage == OUTPUT_1V8) { +		err = rtsx_pci_write_phy_register(pcr, 0x08, 0x4C40 | 0x24); +		if (err < 0) +			return err; +	} else { +		return -EINVAL; +	} + +	return 0; +} +  static const struct pcr_ops rts5229_pcr_ops = {  	.extra_init_hw = rts5229_extra_init_hw,  	.optimize_phy = rts5229_optimize_phy, @@ -123,7 +142,9 @@ static const struct pcr_ops rts5229_pcr_ops = {  	.disable_auto_blink = rts5229_disable_auto_blink,  	.card_power_on = rts5229_card_power_on,  	.card_power_off = rts5229_card_power_off, +	.switch_output_voltage = rts5229_switch_output_voltage,  	.cd_deglitch = NULL, +	.conv_clk_and_div_n = NULL,  };  /* SD Pull Control Enable: diff --git a/drivers/mfd/rtsx_pcr.c b/drivers/mfd/rtsx_pcr.c index 7a7b0bda461..9fc57009e22 100644 --- a/drivers/mfd/rtsx_pcr.c +++ b/drivers/mfd/rtsx_pcr.c @@ -630,7 +630,10 @@ int rtsx_pci_switch_clock(struct rtsx_pcr *pcr, unsigned int card_clock,  	if (clk == pcr->cur_clock)  		return 0; -	N = (u8)(clk - 2); +	if (pcr->ops->conv_clk_and_div_n) +		N = (u8)pcr->ops->conv_clk_and_div_n(clk, CLK_TO_DIV_N); +	else +		N = (u8)(clk - 2);  	if ((clk <= 2) || (N > max_N))  		return -EINVAL; @@ -641,7 +644,14 @@ int rtsx_pci_switch_clock(struct rtsx_pcr *pcr, unsigned int card_clock,  	/* Make sure that the SSC clock div_n is equal or greater than min_N */  	div = CLK_DIV_1;  	while ((N < min_N) && (div < max_div)) { -		N = (N + 2) * 2 - 2; +		if (pcr->ops->conv_clk_and_div_n) { +			int dbl_clk = pcr->ops->conv_clk_and_div_n(N, +					DIV_N_TO_CLK) * 2; +			N = (u8)pcr->ops->conv_clk_and_div_n(dbl_clk, +					CLK_TO_DIV_N); +		} else { +			N = (N + 2) * 2 - 2; +		}  		div++;  	}  	dev_dbg(&(pcr->pci->dev), "N = %d, div = %d\n", N, div); @@ -703,6 +713,15 @@ int rtsx_pci_card_power_off(struct rtsx_pcr *pcr, int card)  }  EXPORT_SYMBOL_GPL(rtsx_pci_card_power_off); +int rtsx_pci_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage) +{ +	if (pcr->ops->switch_output_voltage) +		return pcr->ops->switch_output_voltage(pcr, voltage); + +	return 0; +} +EXPORT_SYMBOL_GPL(rtsx_pci_switch_output_voltage); +  unsigned int rtsx_pci_card_exist(struct rtsx_pcr *pcr)  {  	unsigned int val; @@ -767,10 +786,10 @@ static void rtsx_pci_card_detect(struct work_struct *work)  	spin_unlock_irqrestore(&pcr->lock, flags); -	if (card_detect & SD_EXIST) +	if ((card_detect & SD_EXIST) && pcr->slots[RTSX_SD_CARD].card_event)  		pcr->slots[RTSX_SD_CARD].card_event(  				pcr->slots[RTSX_SD_CARD].p_dev); -	if (card_detect & MS_EXIST) +	if ((card_detect & MS_EXIST) && pcr->slots[RTSX_MS_CARD].card_event)  		pcr->slots[RTSX_MS_CARD].card_event(  				pcr->slots[RTSX_MS_CARD].p_dev);  } diff --git a/drivers/mfd/tc3589x.c b/drivers/mfd/tc3589x.c index a06d66b929b..ecc092c7f74 100644 --- a/drivers/mfd/tc3589x.c +++ b/drivers/mfd/tc3589x.c @@ -219,25 +219,18 @@ static void tc3589x_irq_unmap(struct irq_domain *d, unsigned int virq)  }  static struct irq_domain_ops tc3589x_irq_ops = { -        .map    = tc3589x_irq_map, +	.map    = tc3589x_irq_map,  	.unmap  = tc3589x_irq_unmap, -        .xlate  = irq_domain_xlate_twocell, +	.xlate  = irq_domain_xlate_twocell,  };  static int tc3589x_irq_init(struct tc3589x *tc3589x, struct device_node *np)  {  	int base = tc3589x->irq_base; -	if (base) { -		tc3589x->domain = irq_domain_add_legacy( -			NULL, TC3589x_NR_INTERNAL_IRQS, base, -			0, &tc3589x_irq_ops, tc3589x); -	} -	else { -		tc3589x->domain = irq_domain_add_linear( -			np, TC3589x_NR_INTERNAL_IRQS, -			&tc3589x_irq_ops, tc3589x); -	} +	tc3589x->domain = irq_domain_add_simple( +		np, TC3589x_NR_INTERNAL_IRQS, base, +		&tc3589x_irq_ops, tc3589x);  	if (!tc3589x->domain) {  		dev_err(tc3589x->dev, "Failed to create irqdomain\n"); diff --git a/drivers/mfd/twl4030-power.c b/drivers/mfd/twl4030-power.c index 4dae241e501..dd362c1078e 100644 --- a/drivers/mfd/twl4030-power.c +++ b/drivers/mfd/twl4030-power.c @@ -159,7 +159,7 @@ out:  static int twl4030_write_script(u8 address, struct twl4030_ins *script,  				       int len)  { -	int err; +	int err = -EINVAL;  	for (; len; len--, address++, script++) {  		if (len == 1) { diff --git a/drivers/mfd/vexpress-config.c b/drivers/mfd/vexpress-config.c index fae15d88075..3c1723aa622 100644 --- a/drivers/mfd/vexpress-config.c +++ b/drivers/mfd/vexpress-config.c @@ -67,6 +67,7 @@ struct vexpress_config_bridge *vexpress_config_bridge_register(  	return bridge;  } +EXPORT_SYMBOL(vexpress_config_bridge_register);  void vexpress_config_bridge_unregister(struct vexpress_config_bridge *bridge)  { @@ -83,6 +84,7 @@ void vexpress_config_bridge_unregister(struct vexpress_config_bridge *bridge)  	while (!list_empty(&__bridge.transactions))  		cpu_relax();  } +EXPORT_SYMBOL(vexpress_config_bridge_unregister);  struct vexpress_config_func { @@ -142,6 +144,7 @@ struct vexpress_config_func *__vexpress_config_func_get(struct device *dev,  	return func;  } +EXPORT_SYMBOL(__vexpress_config_func_get);  void vexpress_config_func_put(struct vexpress_config_func *func)  { @@ -149,7 +152,7 @@ void vexpress_config_func_put(struct vexpress_config_func *func)  	of_node_put(func->bridge->node);  	kfree(func);  } - +EXPORT_SYMBOL(vexpress_config_func_put);  struct vexpress_config_trans {  	struct vexpress_config_func *func; @@ -229,6 +232,7 @@ void vexpress_config_complete(struct vexpress_config_bridge *bridge,  	complete(&trans->completion);  } +EXPORT_SYMBOL(vexpress_config_complete);  int vexpress_config_wait(struct vexpress_config_trans *trans)  { @@ -236,7 +240,7 @@ int vexpress_config_wait(struct vexpress_config_trans *trans)  	return trans->status;  } - +EXPORT_SYMBOL(vexpress_config_wait);  int vexpress_config_read(struct vexpress_config_func *func, int offset,  		u32 *data) diff --git a/drivers/mfd/vexpress-sysreg.c b/drivers/mfd/vexpress-sysreg.c index e5d8f63b252..77048b18439 100644 --- a/drivers/mfd/vexpress-sysreg.c +++ b/drivers/mfd/vexpress-sysreg.c @@ -313,19 +313,11 @@ static void vexpress_sysreg_config_complete(unsigned long data)  } -void __init vexpress_sysreg_early_init(void __iomem *base) +void __init vexpress_sysreg_setup(struct device_node *node)  { -	struct device_node *node = of_find_compatible_node(NULL, NULL, -			"arm,vexpress-sysreg"); - -	if (node) -		base = of_iomap(node, 0); - -	if (WARN_ON(!base)) +	if (WARN_ON(!vexpress_sysreg_base))  		return; -	vexpress_sysreg_base = base; -  	if (readl(vexpress_sysreg_base + SYS_MISC) & SYS_MISC_MASTERSITE)  		vexpress_master_site = VEXPRESS_SITE_DB2;  	else @@ -336,9 +328,23 @@ void __init vexpress_sysreg_early_init(void __iomem *base)  	WARN_ON(!vexpress_sysreg_config_bridge);  } +void __init vexpress_sysreg_early_init(void __iomem *base) +{ +	vexpress_sysreg_base = base; +	vexpress_sysreg_setup(NULL); +} +  void __init vexpress_sysreg_of_early_init(void)  { -	vexpress_sysreg_early_init(NULL); +	struct device_node *node = of_find_compatible_node(NULL, NULL, +			"arm,vexpress-sysreg"); + +	if (node) { +		vexpress_sysreg_base = of_iomap(node, 0); +		vexpress_sysreg_setup(node); +	} else { +		pr_info("vexpress-sysreg: No Device Tree node found."); +	}  } @@ -426,9 +432,11 @@ static int vexpress_sysreg_probe(struct platform_device *pdev)  		return -EBUSY;  	} -	if (!vexpress_sysreg_base) +	if (!vexpress_sysreg_base) {  		vexpress_sysreg_base = devm_ioremap(&pdev->dev, res->start,  				resource_size(res)); +		vexpress_sysreg_setup(pdev->dev.of_node); +	}  	if (!vexpress_sysreg_base) {  		dev_err(&pdev->dev, "Failed to obtain base address!\n"); diff --git a/drivers/mfd/wm5102-tables.c b/drivers/mfd/wm5102-tables.c index 088872ab633..1133a64c2dc 100644 --- a/drivers/mfd/wm5102-tables.c +++ b/drivers/mfd/wm5102-tables.c @@ -1882,7 +1882,7 @@ static bool wm5102_volatile_register(struct device *dev, unsigned int reg)  	}  } -#define WM5102_MAX_REGISTER 0x1a8fff +#define WM5102_MAX_REGISTER 0x1a9800  const struct regmap_config wm5102_spi_regmap = {  	.reg_bits = 32, diff --git a/drivers/misc/ti-st/st_kim.c b/drivers/misc/ti-st/st_kim.c index 9ff942a346e..83269f1d16e 100644 --- a/drivers/misc/ti-st/st_kim.c +++ b/drivers/misc/ti-st/st_kim.c @@ -468,6 +468,11 @@ long st_kim_start(void *kim_data)  		if (pdata->chip_enable)  			pdata->chip_enable(kim_gdata); +		/* Configure BT nShutdown to HIGH state */ +		gpio_set_value(kim_gdata->nshutdown, GPIO_LOW); +		mdelay(5);	/* FIXME: a proper toggle */ +		gpio_set_value(kim_gdata->nshutdown, GPIO_HIGH); +		mdelay(100);  		/* re-initialize the completion */  		INIT_COMPLETION(kim_gdata->ldisc_installed);  		/* send notification to UIM */ @@ -509,7 +514,8 @@ long st_kim_start(void *kim_data)   *	(b) upon failure to either install ldisc or download firmware.   *	The function is responsible to (a) notify UIM about un-installation,   *	(b) flush UART if the ldisc was installed. - *	(c) invoke platform's chip disabling routine. + *	(c) reset BT_EN - pull down nshutdown at the end. + *	(d) invoke platform's chip disabling routine.   */  long st_kim_stop(void *kim_data)  { @@ -541,6 +547,13 @@ long st_kim_stop(void *kim_data)  		err = -ETIMEDOUT;  	} +	/* By default configure BT nShutdown to LOW state */ +	gpio_set_value(kim_gdata->nshutdown, GPIO_LOW); +	mdelay(1); +	gpio_set_value(kim_gdata->nshutdown, GPIO_HIGH); +	mdelay(1); +	gpio_set_value(kim_gdata->nshutdown, GPIO_LOW); +  	/* platform specific disable */  	if (pdata->chip_disable)  		pdata->chip_disable(kim_gdata); @@ -733,6 +746,20 @@ static int kim_probe(struct platform_device *pdev)  	/* refer to itself */  	kim_gdata->core_data->kim_data = kim_gdata; +	/* Claim the chip enable nShutdown gpio from the system */ +	kim_gdata->nshutdown = pdata->nshutdown_gpio; +	err = gpio_request(kim_gdata->nshutdown, "kim"); +	if (unlikely(err)) { +		pr_err(" gpio %ld request failed ", kim_gdata->nshutdown); +		return err; +	} + +	/* Configure nShutdown GPIO as output=0 */ +	err = gpio_direction_output(kim_gdata->nshutdown, 0); +	if (unlikely(err)) { +		pr_err(" unable to configure gpio %ld", kim_gdata->nshutdown); +		return err; +	}  	/* get reference of pdev for request_firmware  	 */  	kim_gdata->kim_pdev = pdev; @@ -779,10 +806,18 @@ err_core_init:  static int kim_remove(struct platform_device *pdev)  { +	/* free the GPIOs requested */ +	struct ti_st_plat_data	*pdata = pdev->dev.platform_data;  	struct kim_data_s	*kim_gdata;  	kim_gdata = dev_get_drvdata(&pdev->dev); +	/* Free the Bluetooth/FM/GPIO +	 * nShutdown gpio from the system +	 */ +	gpio_free(pdata->nshutdown_gpio); +	pr_info("nshutdown GPIO Freed"); +  	debugfs_remove_recursive(kim_debugfs_dir);  	sysfs_remove_group(&pdev->dev.kobj, &uim_attr_grp);  	pr_info("sysfs entries removed"); diff --git a/drivers/mmc/host/mvsdio.c b/drivers/mmc/host/mvsdio.c index de4c20b3936..f8dd3610294 100644 --- a/drivers/mmc/host/mvsdio.c +++ b/drivers/mmc/host/mvsdio.c @@ -50,8 +50,6 @@ struct mvsd_host {  	struct timer_list timer;  	struct mmc_host *mmc;  	struct device *dev; -	struct resource *res; -	int irq;  	struct clk *clk;  	int gpio_card_detect;  	int gpio_write_protect; @@ -718,10 +716,6 @@ static int __init mvsd_probe(struct platform_device *pdev)  	if (!r || irq < 0 || !mvsd_data)  		return -ENXIO; -	r = request_mem_region(r->start, SZ_1K, DRIVER_NAME); -	if (!r) -		return -EBUSY; -  	mmc = mmc_alloc_host(sizeof(struct mvsd_host), &pdev->dev);  	if (!mmc) {  		ret = -ENOMEM; @@ -731,8 +725,8 @@ static int __init mvsd_probe(struct platform_device *pdev)  	host = mmc_priv(mmc);  	host->mmc = mmc;  	host->dev = &pdev->dev; -	host->res = r;  	host->base_clock = mvsd_data->clock / 2; +	host->clk = ERR_PTR(-EINVAL);  	mmc->ops = &mvsd_ops; @@ -752,7 +746,7 @@ static int __init mvsd_probe(struct platform_device *pdev)  	spin_lock_init(&host->lock); -	host->base = ioremap(r->start, SZ_4K); +	host->base = devm_request_and_ioremap(&pdev->dev, r);  	if (!host->base) {  		ret = -ENOMEM;  		goto out; @@ -765,44 +759,45 @@ static int __init mvsd_probe(struct platform_device *pdev)  	mvsd_power_down(host); -	ret = request_irq(irq, mvsd_irq, 0, DRIVER_NAME, host); +	ret = devm_request_irq(&pdev->dev, irq, mvsd_irq, 0, DRIVER_NAME, host);  	if (ret) {  		pr_err("%s: cannot assign irq %d\n", DRIVER_NAME, irq);  		goto out; -	} else -		host->irq = irq; +	}  	/* Not all platforms can gate the clock, so it is not  	   an error if the clock does not exists. */ -	host->clk = clk_get(&pdev->dev, NULL); -	if (!IS_ERR(host->clk)) { +	host->clk = devm_clk_get(&pdev->dev, NULL); +	if (!IS_ERR(host->clk))  		clk_prepare_enable(host->clk); -	}  	if (mvsd_data->gpio_card_detect) { -		ret = gpio_request(mvsd_data->gpio_card_detect, -				   DRIVER_NAME " cd"); +		ret = devm_gpio_request_one(&pdev->dev, +					    mvsd_data->gpio_card_detect, +					    GPIOF_IN, DRIVER_NAME " cd");  		if (ret == 0) { -			gpio_direction_input(mvsd_data->gpio_card_detect);  			irq = gpio_to_irq(mvsd_data->gpio_card_detect); -			ret = request_irq(irq, mvsd_card_detect_irq, -					  IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING, -					  DRIVER_NAME " cd", host); +			ret = devm_request_irq(&pdev->dev, irq, +					       mvsd_card_detect_irq, +					       IRQ_TYPE_EDGE_RISING | +					       IRQ_TYPE_EDGE_FALLING, +					       DRIVER_NAME " cd", host);  			if (ret == 0)  				host->gpio_card_detect =  					mvsd_data->gpio_card_detect;  			else -				gpio_free(mvsd_data->gpio_card_detect); +				devm_gpio_free(&pdev->dev, +					       mvsd_data->gpio_card_detect);  		}  	}  	if (!host->gpio_card_detect)  		mmc->caps |= MMC_CAP_NEEDS_POLL;  	if (mvsd_data->gpio_write_protect) { -		ret = gpio_request(mvsd_data->gpio_write_protect, -				   DRIVER_NAME " wp"); +		ret = devm_gpio_request_one(&pdev->dev, +					    mvsd_data->gpio_write_protect, +					    GPIOF_IN, DRIVER_NAME " wp");  		if (ret == 0) { -			gpio_direction_input(mvsd_data->gpio_write_protect);  			host->gpio_write_protect =  				mvsd_data->gpio_write_protect;  		} @@ -824,26 +819,11 @@ static int __init mvsd_probe(struct platform_device *pdev)  	return 0;  out: -	if (host) { -		if (host->irq) -			free_irq(host->irq, host); -		if (host->gpio_card_detect) { -			free_irq(gpio_to_irq(host->gpio_card_detect), host); -			gpio_free(host->gpio_card_detect); -		} -		if (host->gpio_write_protect) -			gpio_free(host->gpio_write_protect); -		if (host->base) -			iounmap(host->base); -	} -	if (r) -		release_resource(r); -	if (mmc) -		if (!IS_ERR_OR_NULL(host->clk)) { +	if (mmc) { +		if (!IS_ERR(host->clk))  			clk_disable_unprepare(host->clk); -			clk_put(host->clk); -		}  		mmc_free_host(mmc); +	}  	return ret;  } @@ -852,28 +832,16 @@ static int __exit mvsd_remove(struct platform_device *pdev)  {  	struct mmc_host *mmc = platform_get_drvdata(pdev); -	if (mmc) { -		struct mvsd_host *host = mmc_priv(mmc); +	struct mvsd_host *host = mmc_priv(mmc); -		if (host->gpio_card_detect) { -			free_irq(gpio_to_irq(host->gpio_card_detect), host); -			gpio_free(host->gpio_card_detect); -		} -		mmc_remove_host(mmc); -		free_irq(host->irq, host); -		if (host->gpio_write_protect) -			gpio_free(host->gpio_write_protect); -		del_timer_sync(&host->timer); -		mvsd_power_down(host); -		iounmap(host->base); -		release_resource(host->res); +	mmc_remove_host(mmc); +	del_timer_sync(&host->timer); +	mvsd_power_down(host); + +	if (!IS_ERR(host->clk)) +		clk_disable_unprepare(host->clk); +	mmc_free_host(mmc); -		if (!IS_ERR(host->clk)) { -			clk_disable_unprepare(host->clk); -			clk_put(host->clk); -		} -		mmc_free_host(mmc); -	}  	platform_set_drvdata(pdev, NULL);  	return 0;  } diff --git a/drivers/mmc/host/rtsx_pci_sdmmc.c b/drivers/mmc/host/rtsx_pci_sdmmc.c index 571915dfb21..f74b5adca64 100644 --- a/drivers/mmc/host/rtsx_pci_sdmmc.c +++ b/drivers/mmc/host/rtsx_pci_sdmmc.c @@ -1060,26 +1060,6 @@ static int sd_wait_voltage_stable_2(struct realtek_pci_sdmmc *host)  	return 0;  } -static int sd_change_bank_voltage(struct realtek_pci_sdmmc *host, u8 voltage) -{ -	struct rtsx_pcr *pcr = host->pcr; -	int err; - -	if (voltage == SD_IO_3V3) { -		err = rtsx_pci_write_phy_register(pcr, 0x08, 0x4FC0 | 0x24); -		if (err < 0) -			return err; -	} else if (voltage == SD_IO_1V8) { -		err = rtsx_pci_write_phy_register(pcr, 0x08, 0x4C40 | 0x24); -		if (err < 0) -			return err; -	} else { -		return -EINVAL; -	} - -	return 0; -} -  static int sdmmc_switch_voltage(struct mmc_host *mmc, struct mmc_ios *ios)  {  	struct realtek_pci_sdmmc *host = mmc_priv(mmc); @@ -1098,11 +1078,11 @@ static int sdmmc_switch_voltage(struct mmc_host *mmc, struct mmc_ios *ios)  	rtsx_pci_start_run(pcr);  	if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330) -		voltage = SD_IO_3V3; +		voltage = OUTPUT_3V3;  	else -		voltage = SD_IO_1V8; +		voltage = OUTPUT_1V8; -	if (voltage == SD_IO_1V8) { +	if (voltage == OUTPUT_1V8) {  		err = rtsx_pci_write_register(pcr,  				SD30_DRIVE_SEL, 0x07, DRIVER_TYPE_B);  		if (err < 0) @@ -1113,11 +1093,11 @@ static int sdmmc_switch_voltage(struct mmc_host *mmc, struct mmc_ios *ios)  			goto out;  	} -	err = sd_change_bank_voltage(host, voltage); +	err = rtsx_pci_switch_output_voltage(pcr, voltage);  	if (err < 0)  		goto out; -	if (voltage == SD_IO_1V8) { +	if (voltage == OUTPUT_1V8) {  		err = sd_wait_voltage_stable_2(host);  		if (err < 0)  			goto out; diff --git a/drivers/mtd/devices/Kconfig b/drivers/mtd/devices/Kconfig index 27f80cd8aef..46dcb54c32e 100644 --- a/drivers/mtd/devices/Kconfig +++ b/drivers/mtd/devices/Kconfig @@ -272,6 +272,7 @@ config MTD_DOCG3  	tristate "M-Systems Disk-On-Chip G3"  	select BCH  	select BCH_CONST_PARAMS +	select BITREVERSE  	---help---  	  This provides an MTD device driver for the M-Systems DiskOnChip  	  G3 devices. diff --git a/drivers/mtd/maps/physmap_of.c b/drivers/mtd/maps/physmap_of.c index 67cc73c18dd..7901d72c924 100644 --- a/drivers/mtd/maps/physmap_of.c +++ b/drivers/mtd/maps/physmap_of.c @@ -170,7 +170,7 @@ static int of_flash_probe(struct platform_device *dev)  	resource_size_t res_size;  	struct mtd_part_parser_data ppdata;  	bool map_indirect; -	const char *mtd_name; +	const char *mtd_name = NULL;  	match = of_match_device(of_flash_match, &dev->dev);  	if (!match) diff --git a/drivers/mtd/nand/bcm47xxnflash/ops_bcm4706.c b/drivers/mtd/nand/bcm47xxnflash/ops_bcm4706.c index 86c9a79b89b..595de4012e7 100644 --- a/drivers/mtd/nand/bcm47xxnflash/ops_bcm4706.c +++ b/drivers/mtd/nand/bcm47xxnflash/ops_bcm4706.c @@ -17,8 +17,8 @@  #include "bcm47xxnflash.h"  /* Broadcom uses 1'000'000 but it seems to be too many. Tests on WNDR4500 has - * shown 164 retries as maxiumum. */ -#define NFLASH_READY_RETRIES		1000 + * shown ~1000 retries as maxiumum. */ +#define NFLASH_READY_RETRIES		10000  #define NFLASH_SECTOR_SIZE		512 diff --git a/drivers/mtd/nand/davinci_nand.c b/drivers/mtd/nand/davinci_nand.c index 3502606f648..feae55c7b88 100644 --- a/drivers/mtd/nand/davinci_nand.c +++ b/drivers/mtd/nand/davinci_nand.c @@ -523,7 +523,7 @@ static struct nand_ecclayout hwecc4_2048 __initconst = {  static const struct of_device_id davinci_nand_of_match[] = {  	{.compatible = "ti,davinci-nand", },  	{}, -} +};  MODULE_DEVICE_TABLE(of, davinci_nand_of_match);  static struct davinci_nand_pdata diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c index 8323ac991ad..3766682a028 100644 --- a/drivers/mtd/nand/nand_base.c +++ b/drivers/mtd/nand/nand_base.c @@ -2857,8 +2857,11 @@ static int nand_flash_detect_onfi(struct mtd_info *mtd, struct nand_chip *chip,  	int i;  	int val; -	/* ONFI need to be probed in 8 bits mode */ -	WARN_ON(chip->options & NAND_BUSWIDTH_16); +	/* ONFI need to be probed in 8 bits mode, and 16 bits should be selected with NAND_BUSWIDTH_AUTO */ +	if (chip->options & NAND_BUSWIDTH_16) { +		pr_err("Trying ONFI probe in 16 bits mode, aborting !\n"); +		return 0; +	}  	/* Try ONFI for unknown chip or LP */  	chip->cmdfunc(mtd, NAND_CMD_READID, 0x20, -1);  	if (chip->read_byte(mtd) != 'O' || chip->read_byte(mtd) != 'N' || diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c index 1877ed7ca08..1c9e09fbdff 100644 --- a/drivers/net/bonding/bond_sysfs.c +++ b/drivers/net/bonding/bond_sysfs.c @@ -1053,6 +1053,7 @@ static ssize_t bonding_store_primary(struct device *d,  		pr_info("%s: Setting primary slave to None.\n",  			bond->dev->name);  		bond->primary_slave = NULL; +		memset(bond->params.primary, 0, sizeof(bond->params.primary));  		bond_select_active_slave(bond);  		goto out;  	} diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c index 5233b8f58d7..2282b1ae976 100644 --- a/drivers/net/can/c_can/c_can.c +++ b/drivers/net/can/c_can/c_can.c @@ -488,8 +488,12 @@ static void c_can_setup_receive_object(struct net_device *dev, int iface,  	priv->write_reg(priv, C_CAN_IFACE(MASK1_REG, iface),  			IFX_WRITE_LOW_16BIT(mask)); + +	/* According to C_CAN documentation, the reserved bit +	 * in IFx_MASK2 register is fixed 1 +	 */  	priv->write_reg(priv, C_CAN_IFACE(MASK2_REG, iface), -			IFX_WRITE_HIGH_16BIT(mask)); +			IFX_WRITE_HIGH_16BIT(mask) | BIT(13));  	priv->write_reg(priv, C_CAN_IFACE(ARB1_REG, iface),  			IFX_WRITE_LOW_16BIT(id)); @@ -960,7 +964,7 @@ static int c_can_handle_bus_err(struct net_device *dev,  		break;  	case LEC_ACK_ERROR:  		netdev_dbg(dev, "ack error\n"); -		cf->data[2] |= (CAN_ERR_PROT_LOC_ACK | +		cf->data[3] |= (CAN_ERR_PROT_LOC_ACK |  				CAN_ERR_PROT_LOC_ACK_DEL);  		break;  	case LEC_BIT1_ERROR: @@ -973,7 +977,7 @@ static int c_can_handle_bus_err(struct net_device *dev,  		break;  	case LEC_CRC_ERROR:  		netdev_dbg(dev, "CRC error\n"); -		cf->data[2] |= (CAN_ERR_PROT_LOC_CRC_SEQ | +		cf->data[3] |= (CAN_ERR_PROT_LOC_CRC_SEQ |  				CAN_ERR_PROT_LOC_CRC_DEL);  		break;  	default: diff --git a/drivers/net/can/pch_can.c b/drivers/net/can/pch_can.c index 7d1748575b1..5c314a96197 100644 --- a/drivers/net/can/pch_can.c +++ b/drivers/net/can/pch_can.c @@ -560,7 +560,7 @@ static void pch_can_error(struct net_device *ndev, u32 status)  		stats->rx_errors++;  		break;  	case PCH_CRC_ERR: -		cf->data[2] |= CAN_ERR_PROT_LOC_CRC_SEQ | +		cf->data[3] |= CAN_ERR_PROT_LOC_CRC_SEQ |  			       CAN_ERR_PROT_LOC_CRC_DEL;  		priv->can.can_stats.bus_error++;  		stats->rx_errors++; diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c index f898c636372..300581b24ff 100644 --- a/drivers/net/can/ti_hecc.c +++ b/drivers/net/can/ti_hecc.c @@ -746,12 +746,12 @@ static int ti_hecc_error(struct net_device *ndev, int int_status,  		}  		if (err_status & HECC_CANES_CRCE) {  			hecc_set_bit(priv, HECC_CANES, HECC_CANES_CRCE); -			cf->data[2] |= CAN_ERR_PROT_LOC_CRC_SEQ | +			cf->data[3] |= CAN_ERR_PROT_LOC_CRC_SEQ |  					CAN_ERR_PROT_LOC_CRC_DEL;  		}  		if (err_status & HECC_CANES_ACKE) {  			hecc_set_bit(priv, HECC_CANES, HECC_CANES_ACKE); -			cf->data[2] |= CAN_ERR_PROT_LOC_ACK | +			cf->data[3] |= CAN_ERR_PROT_LOC_ACK |  					CAN_ERR_PROT_LOC_ACK_DEL;  		}  	} diff --git a/drivers/net/ethernet/3com/3c574_cs.c b/drivers/net/ethernet/3com/3c574_cs.c index 66df9363808..ffd8de28a76 100644 --- a/drivers/net/ethernet/3com/3c574_cs.c +++ b/drivers/net/ethernet/3com/3c574_cs.c @@ -432,7 +432,7 @@ static int tc574_config(struct pcmcia_device *link)  	netdev_info(dev, "%s at io %#3lx, irq %d, hw_addr %pM\n",  		    cardname, dev->base_addr, dev->irq, dev->dev_addr);  	netdev_info(dev, " %dK FIFO split %s Rx:Tx, %sMII interface.\n", -		    8 << config & Ram_size, +		    8 << (config & Ram_size),  		    ram_split[(config & Ram_split) >> Ram_split_shift],  		    config & Autoselect ? "autoselect " : ""); diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c index 56d3f697e0c..0035c01660b 100644 --- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c +++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c @@ -21,7 +21,7 @@  #include "atl1c.h" -#define ATL1C_DRV_VERSION "1.0.1.0-NAPI" +#define ATL1C_DRV_VERSION "1.0.1.1-NAPI"  char atl1c_driver_name[] = "atl1c";  char atl1c_driver_version[] = ATL1C_DRV_VERSION; @@ -1652,6 +1652,7 @@ static int atl1c_alloc_rx_buffer(struct atl1c_adapter *adapter)  	u16 num_alloc = 0;  	u16 rfd_next_to_use, next_next;  	struct atl1c_rx_free_desc *rfd_desc; +	dma_addr_t mapping;  	next_next = rfd_next_to_use = rfd_ring->next_to_use;  	if (++next_next == rfd_ring->count) @@ -1678,9 +1679,18 @@ static int atl1c_alloc_rx_buffer(struct atl1c_adapter *adapter)  		ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_BUSY);  		buffer_info->skb = skb;  		buffer_info->length = adapter->rx_buffer_len; -		buffer_info->dma = pci_map_single(pdev, vir_addr, +		mapping = pci_map_single(pdev, vir_addr,  						buffer_info->length,  						PCI_DMA_FROMDEVICE); +		if (unlikely(pci_dma_mapping_error(pdev, mapping))) { +			dev_kfree_skb(skb); +			buffer_info->skb = NULL; +			buffer_info->length = 0; +			ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_FREE); +			netif_warn(adapter, rx_err, adapter->netdev, "RX pci_map_single failed"); +			break; +		} +		buffer_info->dma = mapping;  		ATL1C_SET_PCIMAP_TYPE(buffer_info, ATL1C_PCIMAP_SINGLE,  			ATL1C_PCIMAP_FROMDEVICE);  		rfd_desc->buffer_addr = cpu_to_le64(buffer_info->dma); @@ -2015,7 +2025,29 @@ check_sum:  	return 0;  } -static void atl1c_tx_map(struct atl1c_adapter *adapter, +static void atl1c_tx_rollback(struct atl1c_adapter *adpt, +			      struct atl1c_tpd_desc *first_tpd, +			      enum atl1c_trans_queue type) +{ +	struct atl1c_tpd_ring *tpd_ring = &adpt->tpd_ring[type]; +	struct atl1c_buffer *buffer_info; +	struct atl1c_tpd_desc *tpd; +	u16 first_index, index; + +	first_index = first_tpd - (struct atl1c_tpd_desc *)tpd_ring->desc; +	index = first_index; +	while (index != tpd_ring->next_to_use) { +		tpd = ATL1C_TPD_DESC(tpd_ring, index); +		buffer_info = &tpd_ring->buffer_info[index]; +		atl1c_clean_buffer(adpt->pdev, buffer_info, 0); +		memset(tpd, 0, sizeof(struct atl1c_tpd_desc)); +		if (++index == tpd_ring->count) +			index = 0; +	} +	tpd_ring->next_to_use = first_index; +} + +static int atl1c_tx_map(struct atl1c_adapter *adapter,  		      struct sk_buff *skb, struct atl1c_tpd_desc *tpd,  			enum atl1c_trans_queue type)  { @@ -2040,7 +2072,10 @@ static void atl1c_tx_map(struct atl1c_adapter *adapter,  		buffer_info->length = map_len;  		buffer_info->dma = pci_map_single(adapter->pdev,  					skb->data, hdr_len, PCI_DMA_TODEVICE); -		ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_BUSY); +		if (unlikely(pci_dma_mapping_error(adapter->pdev, +						   buffer_info->dma))) +			goto err_dma; +  		ATL1C_SET_PCIMAP_TYPE(buffer_info, ATL1C_PCIMAP_SINGLE,  			ATL1C_PCIMAP_TODEVICE);  		mapped_len += map_len; @@ -2062,6 +2097,10 @@ static void atl1c_tx_map(struct atl1c_adapter *adapter,  		buffer_info->dma =  			pci_map_single(adapter->pdev, skb->data + mapped_len,  					buffer_info->length, PCI_DMA_TODEVICE); +		if (unlikely(pci_dma_mapping_error(adapter->pdev, +						   buffer_info->dma))) +			goto err_dma; +  		ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_BUSY);  		ATL1C_SET_PCIMAP_TYPE(buffer_info, ATL1C_PCIMAP_SINGLE,  			ATL1C_PCIMAP_TODEVICE); @@ -2083,6 +2122,9 @@ static void atl1c_tx_map(struct atl1c_adapter *adapter,  						    frag, 0,  						    buffer_info->length,  						    DMA_TO_DEVICE); +		if (dma_mapping_error(&adapter->pdev->dev, buffer_info->dma)) +			goto err_dma; +  		ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_BUSY);  		ATL1C_SET_PCIMAP_TYPE(buffer_info, ATL1C_PCIMAP_PAGE,  			ATL1C_PCIMAP_TODEVICE); @@ -2095,6 +2137,13 @@ static void atl1c_tx_map(struct atl1c_adapter *adapter,  	/* The last buffer info contain the skb address,  	   so it will be free after unmap */  	buffer_info->skb = skb; + +	return 0; + +err_dma: +	buffer_info->dma = 0; +	buffer_info->length = 0; +	return -1;  }  static void atl1c_tx_queue(struct atl1c_adapter *adapter, struct sk_buff *skb, @@ -2157,10 +2206,18 @@ static netdev_tx_t atl1c_xmit_frame(struct sk_buff *skb,  	if (skb_network_offset(skb) != ETH_HLEN)  		tpd->word1 |= 1 << TPD_ETH_TYPE_SHIFT; /* Ethernet frame */ -	atl1c_tx_map(adapter, skb, tpd, type); -	atl1c_tx_queue(adapter, skb, tpd, type); +	if (atl1c_tx_map(adapter, skb, tpd, type) < 0) { +		netif_info(adapter, tx_done, adapter->netdev, +			   "tx-skb droppted due to dma error\n"); +		/* roll back tpd/buffer */ +		atl1c_tx_rollback(adapter, tpd, type); +		spin_unlock_irqrestore(&adapter->tx_lock, flags); +		dev_kfree_skb(skb); +	} else { +		atl1c_tx_queue(adapter, skb, tpd, type); +		spin_unlock_irqrestore(&adapter->tx_lock, flags); +	} -	spin_unlock_irqrestore(&adapter->tx_lock, flags);  	return NETDEV_TX_OK;  } diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index f771ddfba64..a5edac8df67 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -504,13 +504,11 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,  		skb_shinfo(skb)->gso_size = bnx2x_set_lro_mss(bp,  					tpa_info->parsing_flags, len_on_bd); -		/* set for GRO */ -		if (fp->mode == TPA_MODE_GRO) -			skb_shinfo(skb)->gso_type = -			    (GET_FLAG(tpa_info->parsing_flags, -				      PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) == -						PRS_FLAG_OVERETH_IPV6) ? -				SKB_GSO_TCPV6 : SKB_GSO_TCPV4; +		skb_shinfo(skb)->gso_type = +			(GET_FLAG(tpa_info->parsing_flags, +				  PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) == +			 PRS_FLAG_OVERETH_IPV6) ? +			SKB_GSO_TCPV6 : SKB_GSO_TCPV4;  	} diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index 78ea90c40e1..bdb086934cd 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -1283,14 +1283,26 @@ static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)  	return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);  } -#define TG3_PHY_AUXCTL_SMDSP_ENABLE(tp) \ -	tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \ -			     MII_TG3_AUXCTL_ACTL_SMDSP_ENA | \ -			     MII_TG3_AUXCTL_ACTL_TX_6DB) +static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable) +{ +	u32 val; +	int err; -#define TG3_PHY_AUXCTL_SMDSP_DISABLE(tp) \ -	tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \ -			     MII_TG3_AUXCTL_ACTL_TX_6DB); +	err = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val); + +	if (err) +		return err; +	if (enable) + +		val |= MII_TG3_AUXCTL_ACTL_SMDSP_ENA; +	else +		val &= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA; + +	err = tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, +				   val | MII_TG3_AUXCTL_ACTL_TX_6DB); + +	return err; +}  static int tg3_bmcr_reset(struct tg3 *tp)  { @@ -2223,7 +2235,7 @@ static void tg3_phy_apply_otp(struct tg3 *tp)  	otp = tp->phy_otp; -	if (TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) +	if (tg3_phy_toggle_auxctl_smdsp(tp, true))  		return;  	phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT); @@ -2248,7 +2260,7 @@ static void tg3_phy_apply_otp(struct tg3 *tp)  	      ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);  	tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy); -	TG3_PHY_AUXCTL_SMDSP_DISABLE(tp); +	tg3_phy_toggle_auxctl_smdsp(tp, false);  }  static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up) @@ -2284,9 +2296,9 @@ static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)  	if (!tp->setlpicnt) {  		if (current_link_up == 1 && -		   !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) { +		   !tg3_phy_toggle_auxctl_smdsp(tp, true)) {  			tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000); -			TG3_PHY_AUXCTL_SMDSP_DISABLE(tp); +			tg3_phy_toggle_auxctl_smdsp(tp, false);  		}  		val = tr32(TG3_CPMU_EEE_MODE); @@ -2302,11 +2314,11 @@ static void tg3_phy_eee_enable(struct tg3 *tp)  	    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||  	     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||  	     tg3_flag(tp, 57765_CLASS)) && -	    !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) { +	    !tg3_phy_toggle_auxctl_smdsp(tp, true)) {  		val = MII_TG3_DSP_TAP26_ALNOKO |  		      MII_TG3_DSP_TAP26_RMRXSTO;  		tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val); -		TG3_PHY_AUXCTL_SMDSP_DISABLE(tp); +		tg3_phy_toggle_auxctl_smdsp(tp, false);  	}  	val = tr32(TG3_CPMU_EEE_MODE); @@ -2450,7 +2462,7 @@ static int tg3_phy_reset_5703_4_5(struct tg3 *tp)  		tg3_writephy(tp, MII_CTRL1000,  			     CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER); -		err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp); +		err = tg3_phy_toggle_auxctl_smdsp(tp, true);  		if (err)  			return err; @@ -2471,7 +2483,7 @@ static int tg3_phy_reset_5703_4_5(struct tg3 *tp)  	tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);  	tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000); -	TG3_PHY_AUXCTL_SMDSP_DISABLE(tp); +	tg3_phy_toggle_auxctl_smdsp(tp, false);  	tg3_writephy(tp, MII_CTRL1000, phy9_orig); @@ -2572,10 +2584,10 @@ static int tg3_phy_reset(struct tg3 *tp)  out:  	if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) && -	    !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) { +	    !tg3_phy_toggle_auxctl_smdsp(tp, true)) {  		tg3_phydsp_write(tp, 0x201f, 0x2aaa);  		tg3_phydsp_write(tp, 0x000a, 0x0323); -		TG3_PHY_AUXCTL_SMDSP_DISABLE(tp); +		tg3_phy_toggle_auxctl_smdsp(tp, false);  	}  	if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) { @@ -2584,14 +2596,14 @@ out:  	}  	if (tp->phy_flags & TG3_PHYFLG_BER_BUG) { -		if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) { +		if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {  			tg3_phydsp_write(tp, 0x000a, 0x310b);  			tg3_phydsp_write(tp, 0x201f, 0x9506);  			tg3_phydsp_write(tp, 0x401f, 0x14e2); -			TG3_PHY_AUXCTL_SMDSP_DISABLE(tp); +			tg3_phy_toggle_auxctl_smdsp(tp, false);  		}  	} else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) { -		if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) { +		if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {  			tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);  			if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {  				tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b); @@ -2600,7 +2612,7 @@ out:  			} else  				tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b); -			TG3_PHY_AUXCTL_SMDSP_DISABLE(tp); +			tg3_phy_toggle_auxctl_smdsp(tp, false);  		}  	} @@ -4009,7 +4021,7 @@ static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)  	tw32(TG3_CPMU_EEE_MODE,  	     tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE); -	err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp); +	err = tg3_phy_toggle_auxctl_smdsp(tp, true);  	if (!err) {  		u32 err2; @@ -4042,7 +4054,7 @@ static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)  						 MII_TG3_DSP_CH34TP2_HIBW01);  		} -		err2 = TG3_PHY_AUXCTL_SMDSP_DISABLE(tp); +		err2 = tg3_phy_toggle_auxctl_smdsp(tp, false);  		if (!err)  			err = err2;  	} @@ -6950,6 +6962,9 @@ static void tg3_poll_controller(struct net_device *dev)  	int i;  	struct tg3 *tp = netdev_priv(dev); +	if (tg3_irq_sync(tp)) +		return; +  	for (i = 0; i < tp->irq_cnt; i++)  		tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);  } @@ -16367,6 +16382,7 @@ static int tg3_init_one(struct pci_dev *pdev,  	tp->pm_cap = pm_cap;  	tp->rx_mode = TG3_DEF_RX_MODE;  	tp->tx_mode = TG3_DEF_TX_MODE; +	tp->irq_sync = 1;  	if (tg3_debug > 0)  		tp->msg_enable = tg3_debug; diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c index a9b0830fb39..b9d4bb9530e 100644 --- a/drivers/net/ethernet/cadence/macb.c +++ b/drivers/net/ethernet/cadence/macb.c @@ -693,6 +693,11 @@ static int macb_poll(struct napi_struct *napi, int budget)  		 * get notified when new packets arrive.  		 */  		macb_writel(bp, IER, MACB_RX_INT_FLAGS); + +		/* Packets received while interrupts were disabled */ +		status = macb_readl(bp, RSR); +		if (unlikely(status)) +			napi_reschedule(napi);  	}  	/* TODO: Handle errors */ diff --git a/drivers/net/ethernet/calxeda/xgmac.c b/drivers/net/ethernet/calxeda/xgmac.c index b407043ce9b..f7f02900f65 100644 --- a/drivers/net/ethernet/calxeda/xgmac.c +++ b/drivers/net/ethernet/calxeda/xgmac.c @@ -548,6 +548,10 @@ static int desc_get_rx_status(struct xgmac_priv *priv, struct xgmac_dma_desc *p)  		return -1;  	} +	/* All frames should fit into a single buffer */ +	if (!(status & RXDESC_FIRST_SEG) || !(status & RXDESC_LAST_SEG)) +		return -1; +  	/* Check if packet has checksum already */  	if ((status & RXDESC_FRAME_TYPE) && (status & RXDESC_EXT_STATUS) &&  		!(ext_status & RXDESC_IP_PAYLOAD_MASK)) diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index f0718e1a836..c306df7d456 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -1994,9 +1994,20 @@ static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)  {  	const struct port_info *pi = netdev_priv(dev);  	struct adapter *adap = pi->adapter; +	struct sge_rspq *q; +	int i; +	int r = 0; -	return set_rxq_intr_params(adap, &adap->sge.ethrxq[pi->first_qset].rspq, -			c->rx_coalesce_usecs, c->rx_max_coalesced_frames); +	for (i = pi->first_qset; i < pi->first_qset + pi->nqsets; i++) { +		q = &adap->sge.ethrxq[i].rspq; +		r = set_rxq_intr_params(adap, q, c->rx_coalesce_usecs, +			c->rx_max_coalesced_frames); +		if (r) { +			dev_err(&dev->dev, "failed to set coalesce %d\n", r); +			break; +		} +	} +	return r;  }  static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c) diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h index 4eba17b83ba..f1b3df167ff 100644 --- a/drivers/net/ethernet/emulex/benet/be.h +++ b/drivers/net/ethernet/emulex/benet/be.h @@ -36,13 +36,13 @@  #define DRV_VER			"4.4.161.0u"  #define DRV_NAME		"be2net" -#define BE_NAME			"ServerEngines BladeEngine2 10Gbps NIC" -#define BE3_NAME		"ServerEngines BladeEngine3 10Gbps NIC" -#define OC_NAME			"Emulex OneConnect 10Gbps NIC" +#define BE_NAME			"Emulex BladeEngine2" +#define BE3_NAME		"Emulex BladeEngine3" +#define OC_NAME			"Emulex OneConnect"  #define OC_NAME_BE		OC_NAME	"(be3)"  #define OC_NAME_LANCER		OC_NAME "(Lancer)"  #define OC_NAME_SH		OC_NAME "(Skyhawk)" -#define DRV_DESC		"ServerEngines BladeEngine 10Gbps NIC Driver" +#define DRV_DESC		"Emulex OneConnect 10Gbps NIC Driver"  #define BE_VENDOR_ID 		0x19a2  #define EMULEX_VENDOR_ID	0x10df diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index 5c995700e53..4d6f3c54427 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -25,7 +25,7 @@  MODULE_VERSION(DRV_VER);  MODULE_DEVICE_TABLE(pci, be_dev_ids);  MODULE_DESCRIPTION(DRV_DESC " " DRV_VER); -MODULE_AUTHOR("ServerEngines Corporation"); +MODULE_AUTHOR("Emulex Corporation");  MODULE_LICENSE("GPL");  static unsigned int num_vfs; diff --git a/drivers/net/ethernet/intel/e1000e/defines.h b/drivers/net/ethernet/intel/e1000e/defines.h index 02a12b69555..4dab6fc265a 100644 --- a/drivers/net/ethernet/intel/e1000e/defines.h +++ b/drivers/net/ethernet/intel/e1000e/defines.h @@ -232,6 +232,7 @@  #define E1000_CTRL_FRCDPX   0x00001000  /* Force Duplex */  #define E1000_CTRL_LANPHYPC_OVERRIDE 0x00010000 /* SW control of LANPHYPC */  #define E1000_CTRL_LANPHYPC_VALUE    0x00020000 /* SW value of LANPHYPC */ +#define E1000_CTRL_MEHE     0x00080000  /* Memory Error Handling Enable */  #define E1000_CTRL_SWDPIN0  0x00040000  /* SWDPIN 0 value */  #define E1000_CTRL_SWDPIN1  0x00080000  /* SWDPIN 1 value */  #define E1000_CTRL_SWDPIO0  0x00400000  /* SWDPIN 0 Input or output */ @@ -389,6 +390,12 @@  #define E1000_PBS_16K E1000_PBA_16K +/* Uncorrectable/correctable ECC Error counts and enable bits */ +#define E1000_PBECCSTS_CORR_ERR_CNT_MASK	0x000000FF +#define E1000_PBECCSTS_UNCORR_ERR_CNT_MASK	0x0000FF00 +#define E1000_PBECCSTS_UNCORR_ERR_CNT_SHIFT	8 +#define E1000_PBECCSTS_ECC_ENABLE		0x00010000 +  #define IFS_MAX       80  #define IFS_MIN       40  #define IFS_RATIO     4 @@ -408,6 +415,7 @@  #define E1000_ICR_RXSEQ         0x00000008 /* Rx sequence error */  #define E1000_ICR_RXDMT0        0x00000010 /* Rx desc min. threshold (0) */  #define E1000_ICR_RXT0          0x00000080 /* Rx timer intr (ring 0) */ +#define E1000_ICR_ECCER         0x00400000 /* Uncorrectable ECC Error */  #define E1000_ICR_INT_ASSERTED  0x80000000 /* If this bit asserted, the driver should claim the interrupt */  #define E1000_ICR_RXQ0          0x00100000 /* Rx Queue 0 Interrupt */  #define E1000_ICR_RXQ1          0x00200000 /* Rx Queue 1 Interrupt */ @@ -443,6 +451,7 @@  #define E1000_IMS_RXSEQ     E1000_ICR_RXSEQ     /* Rx sequence error */  #define E1000_IMS_RXDMT0    E1000_ICR_RXDMT0    /* Rx desc min. threshold */  #define E1000_IMS_RXT0      E1000_ICR_RXT0      /* Rx timer intr */ +#define E1000_IMS_ECCER     E1000_ICR_ECCER     /* Uncorrectable ECC Error */  #define E1000_IMS_RXQ0      E1000_ICR_RXQ0      /* Rx Queue 0 Interrupt */  #define E1000_IMS_RXQ1      E1000_ICR_RXQ1      /* Rx Queue 1 Interrupt */  #define E1000_IMS_TXQ0      E1000_ICR_TXQ0      /* Tx Queue 0 Interrupt */ diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h index 6782a2eea1b..7e95f221d60 100644 --- a/drivers/net/ethernet/intel/e1000e/e1000.h +++ b/drivers/net/ethernet/intel/e1000e/e1000.h @@ -309,6 +309,8 @@ struct e1000_adapter {  	struct napi_struct napi; +	unsigned int uncorr_errors;	/* uncorrectable ECC errors */ +	unsigned int corr_errors;	/* correctable ECC errors */  	unsigned int restart_queue;  	u32 txd_cmd; diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c index f95bc6ee1c2..fd4772a2691 100644 --- a/drivers/net/ethernet/intel/e1000e/ethtool.c +++ b/drivers/net/ethernet/intel/e1000e/ethtool.c @@ -108,6 +108,8 @@ static const struct e1000_stats e1000_gstrings_stats[] = {  	E1000_STAT("dropped_smbus", stats.mgpdc),  	E1000_STAT("rx_dma_failed", rx_dma_failed),  	E1000_STAT("tx_dma_failed", tx_dma_failed), +	E1000_STAT("uncorr_ecc_errors", uncorr_errors), +	E1000_STAT("corr_ecc_errors", corr_errors),  };  #define E1000_GLOBAL_STATS_LEN	ARRAY_SIZE(e1000_gstrings_stats) diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h index cf217777586..b88676ff3d8 100644 --- a/drivers/net/ethernet/intel/e1000e/hw.h +++ b/drivers/net/ethernet/intel/e1000e/hw.h @@ -77,6 +77,7 @@ enum e1e_registers {  #define E1000_POEMB	E1000_PHY_CTRL	/* PHY OEM Bits */  	E1000_PBA      = 0x01000, /* Packet Buffer Allocation - RW */  	E1000_PBS      = 0x01008, /* Packet Buffer Size */ +	E1000_PBECCSTS = 0x0100C, /* Packet Buffer ECC Status - RW */  	E1000_EEMNGCTL = 0x01010, /* MNG EEprom Control */  	E1000_EEWR     = 0x0102C, /* EEPROM Write Register - RW */  	E1000_FLOP     = 0x0103C, /* FLASH Opcode Register */ diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c index 97633654760..24d9f61956f 100644 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.c +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c @@ -3624,6 +3624,17 @@ static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw)  	if (hw->mac.type == e1000_ich8lan)  		reg |= (E1000_RFCTL_IPV6_EX_DIS | E1000_RFCTL_NEW_IPV6_EXT_DIS);  	ew32(RFCTL, reg); + +	/* Enable ECC on Lynxpoint */ +	if (hw->mac.type == e1000_pch_lpt) { +		reg = er32(PBECCSTS); +		reg |= E1000_PBECCSTS_ECC_ENABLE; +		ew32(PBECCSTS, reg); + +		reg = er32(CTRL); +		reg |= E1000_CTRL_MEHE; +		ew32(CTRL, reg); +	}  }  /** diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index fbf75fdca99..643c883dd79 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -1678,6 +1678,23 @@ static irqreturn_t e1000_intr_msi(int irq, void *data)  			mod_timer(&adapter->watchdog_timer, jiffies + 1);  	} +	/* Reset on uncorrectable ECC error */ +	if ((icr & E1000_ICR_ECCER) && (hw->mac.type == e1000_pch_lpt)) { +		u32 pbeccsts = er32(PBECCSTS); + +		adapter->corr_errors += +		    pbeccsts & E1000_PBECCSTS_CORR_ERR_CNT_MASK; +		adapter->uncorr_errors += +		    (pbeccsts & E1000_PBECCSTS_UNCORR_ERR_CNT_MASK) >> +		    E1000_PBECCSTS_UNCORR_ERR_CNT_SHIFT; + +		/* Do the reset outside of interrupt context */ +		schedule_work(&adapter->reset_task); + +		/* return immediately since reset is imminent */ +		return IRQ_HANDLED; +	} +  	if (napi_schedule_prep(&adapter->napi)) {  		adapter->total_tx_bytes = 0;  		adapter->total_tx_packets = 0; @@ -1741,6 +1758,23 @@ static irqreturn_t e1000_intr(int irq, void *data)  			mod_timer(&adapter->watchdog_timer, jiffies + 1);  	} +	/* Reset on uncorrectable ECC error */ +	if ((icr & E1000_ICR_ECCER) && (hw->mac.type == e1000_pch_lpt)) { +		u32 pbeccsts = er32(PBECCSTS); + +		adapter->corr_errors += +		    pbeccsts & E1000_PBECCSTS_CORR_ERR_CNT_MASK; +		adapter->uncorr_errors += +		    (pbeccsts & E1000_PBECCSTS_UNCORR_ERR_CNT_MASK) >> +		    E1000_PBECCSTS_UNCORR_ERR_CNT_SHIFT; + +		/* Do the reset outside of interrupt context */ +		schedule_work(&adapter->reset_task); + +		/* return immediately since reset is imminent */ +		return IRQ_HANDLED; +	} +  	if (napi_schedule_prep(&adapter->napi)) {  		adapter->total_tx_bytes = 0;  		adapter->total_tx_packets = 0; @@ -2104,6 +2138,8 @@ static void e1000_irq_enable(struct e1000_adapter *adapter)  	if (adapter->msix_entries) {  		ew32(EIAC_82574, adapter->eiac_mask & E1000_EIAC_MASK_82574);  		ew32(IMS, adapter->eiac_mask | E1000_IMS_OTHER | E1000_IMS_LSC); +	} else if (hw->mac.type == e1000_pch_lpt) { +		ew32(IMS, IMS_ENABLE_MASK | E1000_IMS_ECCER);  	} else {  		ew32(IMS, IMS_ENABLE_MASK);  	} @@ -4251,6 +4287,16 @@ static void e1000e_update_stats(struct e1000_adapter *adapter)  	adapter->stats.mgptc += er32(MGTPTC);  	adapter->stats.mgprc += er32(MGTPRC);  	adapter->stats.mgpdc += er32(MGTPDC); + +	/* Correctable ECC Errors */ +	if (hw->mac.type == e1000_pch_lpt) { +		u32 pbeccsts = er32(PBECCSTS); +		adapter->corr_errors += +		    pbeccsts & E1000_PBECCSTS_CORR_ERR_CNT_MASK; +		adapter->uncorr_errors += +		    (pbeccsts & E1000_PBECCSTS_UNCORR_ERR_CNT_MASK) >> +		    E1000_PBECCSTS_UNCORR_ERR_CNT_SHIFT; +	}  }  /** diff --git a/drivers/net/ethernet/intel/ixgbe/Makefile b/drivers/net/ethernet/intel/ixgbe/Makefile index f3a632bf8d9..687c83d1bda 100644 --- a/drivers/net/ethernet/intel/ixgbe/Makefile +++ b/drivers/net/ethernet/intel/ixgbe/Makefile @@ -32,7 +32,7 @@  obj-$(CONFIG_IXGBE) += ixgbe.o -ixgbe-objs := ixgbe_main.o ixgbe_common.o ixgbe_ethtool.o ixgbe_debugfs.o\ +ixgbe-objs := ixgbe_main.o ixgbe_common.o ixgbe_ethtool.o \                ixgbe_82599.o ixgbe_82598.o ixgbe_phy.o ixgbe_sriov.o \                ixgbe_mbx.o ixgbe_x540.o ixgbe_lib.o ixgbe_ptp.o @@ -40,4 +40,5 @@ ixgbe-$(CONFIG_IXGBE_DCB) +=  ixgbe_dcb.o ixgbe_dcb_82598.o \                                ixgbe_dcb_82599.o ixgbe_dcb_nl.o  ixgbe-$(CONFIG_IXGBE_HWMON) += ixgbe_sysfs.o +ixgbe-$(CONFIG_DEBUG_FS) += ixgbe_debugfs.o  ixgbe-$(CONFIG_FCOE:m=y) += ixgbe_fcoe.o diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c index 50aa546b8c7..3504686d3af 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c @@ -24,9 +24,6 @@    Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497  *******************************************************************************/ - -#ifdef CONFIG_DEBUG_FS -  #include <linux/debugfs.h>  #include <linux/module.h> @@ -277,5 +274,3 @@ void ixgbe_dbg_exit(void)  {  	debugfs_remove_recursive(ixgbe_dbg_root);  } - -#endif /* CONFIG_DEBUG_FS */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 20a5af6d87d..b3e3294cfe5 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -1401,6 +1401,7 @@ static void ixgbe_set_rsc_gso_size(struct ixgbe_ring *ring,  	/* set gso_size to avoid messing up TCP MSS */  	skb_shinfo(skb)->gso_size = DIV_ROUND_UP((skb->len - hdr_len),  						 IXGBE_CB(skb)->append_cnt); +	skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;  }  static void ixgbe_update_rsc_stats(struct ixgbe_ring *rx_ring, diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c index 1a751c9d09c..bb9256a1b0a 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c @@ -660,11 +660,11 @@ int ixgbe_ptp_hwtstamp_ioctl(struct ixgbe_adapter *adapter,  		break;  	case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:  		tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_L4_V1; -		tsync_rx_mtrl = IXGBE_RXMTRL_V1_SYNC_MSG; +		tsync_rx_mtrl |= IXGBE_RXMTRL_V1_SYNC_MSG;  		break;  	case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:  		tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_L4_V1; -		tsync_rx_mtrl = IXGBE_RXMTRL_V1_DELAY_REQ_MSG; +		tsync_rx_mtrl |= IXGBE_RXMTRL_V1_DELAY_REQ_MSG;  		break;  	case HWTSTAMP_FILTER_PTP_V2_EVENT:  	case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c index 2b799f4f1c3..6771b69f40d 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c @@ -630,10 +630,15 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)  		ring->tx_csum++;  	} -	/* Copy dst mac address to wqe */ -	ethh = (struct ethhdr *)skb->data; -	tx_desc->ctrl.srcrb_flags16[0] = get_unaligned((__be16 *)ethh->h_dest); -	tx_desc->ctrl.imm = get_unaligned((__be32 *)(ethh->h_dest + 2)); +	if (mlx4_is_mfunc(mdev->dev) || priv->validate_loopback) { +		/* Copy dst mac address to wqe. This allows loopback in eSwitch, +		 * so that VFs and PF can communicate with each other +		 */ +		ethh = (struct ethhdr *)skb->data; +		tx_desc->ctrl.srcrb_flags16[0] = get_unaligned((__be16 *)ethh->h_dest); +		tx_desc->ctrl.imm = get_unaligned((__be32 *)(ethh->h_dest + 2)); +	} +  	/* Handle LSO (TSO) packets */  	if (lso_header_size) {  		/* Mark opcode as LSO */ diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index e1bafffbc3b..5163af31499 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -380,7 +380,7 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)  		}  	} -	if ((dev_cap->flags & +	if ((dev->caps.flags &  	    (MLX4_DEV_CAP_FLAG_64B_CQE | MLX4_DEV_CAP_FLAG_64B_EQE)) &&  	    mlx4_is_master(dev))  		dev->caps.function_caps |= MLX4_FUNC_CAP_64B_EQE_CQE; @@ -1790,15 +1790,8 @@ static void mlx4_enable_msi_x(struct mlx4_dev *dev)  	int i;  	if (msi_x) { -		/* In multifunction mode each function gets 2 msi-X vectors -		 * one for data path completions anf the other for asynch events -		 * or command completions */ -		if (mlx4_is_mfunc(dev)) { -			nreq = 2; -		} else { -			nreq = min_t(int, dev->caps.num_eqs - -				     dev->caps.reserved_eqs, nreq); -		} +		nreq = min_t(int, dev->caps.num_eqs - dev->caps.reserved_eqs, +			     nreq);  		entries = kcalloc(nreq, sizeof *entries, GFP_KERNEL);  		if (!entries) diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c index bc165f4d0f6..695667d471a 100644 --- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c @@ -144,7 +144,7 @@ void netxen_release_tx_buffers(struct netxen_adapter *adapter)  					 buffrag->length, PCI_DMA_TODEVICE);  			buffrag->dma = 0ULL;  		} -		for (j = 0; j < cmd_buf->frag_count; j++) { +		for (j = 1; j < cmd_buf->frag_count; j++) {  			buffrag++;  			if (buffrag->dma) {  				pci_unmap_page(adapter->pdev, buffrag->dma, diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c index 6098fd4adfe..69e321a6507 100644 --- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c @@ -1963,10 +1963,12 @@ unwind:  	while (--i >= 0) {  		nf = &pbuf->frag_array[i+1];  		pci_unmap_page(pdev, nf->dma, nf->length, PCI_DMA_TODEVICE); +		nf->dma = 0ULL;  	}  	nf = &pbuf->frag_array[0];  	pci_unmap_single(pdev, nf->dma, skb_headlen(skb), PCI_DMA_TODEVICE); +	nf->dma = 0ULL;  out_err:  	return -ENOMEM; diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c index 6f82812d0fa..09aa310b619 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c @@ -986,8 +986,13 @@ qlcnic_process_lro(struct qlcnic_adapter *adapter,  	th->seq = htonl(seq_number);  	length = skb->len; -	if (adapter->flags & QLCNIC_FW_LRO_MSS_CAP) +	if (adapter->flags & QLCNIC_FW_LRO_MSS_CAP) {  		skb_shinfo(skb)->gso_size = qlcnic_get_lro_sts_mss(sts_data1); +		if (skb->protocol == htons(ETH_P_IPV6)) +			skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6; +		else +			skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; +	}  	if (vid != 0xffff)  		__vlan_hwaccel_put_tag(skb, vid); diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index ed96f309bca..998974f7874 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -450,7 +450,6 @@ enum rtl8168_registers {  #define PWM_EN				(1 << 22)  #define RXDV_GATED_EN			(1 << 19)  #define EARLY_TALLY_EN			(1 << 16) -#define FORCE_CLK			(1 << 15) /* force clock request */  };  enum rtl_register_content { @@ -514,7 +513,6 @@ enum rtl_register_content {  	PMEnable	= (1 << 0),	/* Power Management Enable */  	/* Config2 register p. 25 */ -	ClkReqEn	= (1 << 7),	/* Clock Request Enable */  	MSIEnable	= (1 << 5),	/* 8169 only. Reserved in the 8168. */  	PCI_Clock_66MHz = 0x01,  	PCI_Clock_33MHz = 0x00, @@ -535,7 +533,6 @@ enum rtl_register_content {  	Spi_en		= (1 << 3),  	LanWake		= (1 << 1),	/* LanWake enable/disable */  	PMEStatus	= (1 << 0),	/* PME status can be reset by PCI RST# */ -	ASPM_en		= (1 << 0),	/* ASPM enable */  	/* TBICSR p.28 */  	TBIReset	= 0x80000000, @@ -684,7 +681,6 @@ enum features {  	RTL_FEATURE_WOL		= (1 << 0),  	RTL_FEATURE_MSI		= (1 << 1),  	RTL_FEATURE_GMII	= (1 << 2), -	RTL_FEATURE_FW_LOADED	= (1 << 3),  };  struct rtl8169_counters { @@ -1826,8 +1822,6 @@ static void rtl8169_rx_vlan_tag(struct RxDesc *desc, struct sk_buff *skb)  	if (opts2 & RxVlanTag)  		__vlan_hwaccel_put_tag(skb, swab16(opts2 & 0xffff)); - -	desc->opts2 = 0;  }  static int rtl8169_gset_tbi(struct net_device *dev, struct ethtool_cmd *cmd) @@ -2391,10 +2385,8 @@ static void rtl_apply_firmware(struct rtl8169_private *tp)  	struct rtl_fw *rtl_fw = tp->rtl_fw;  	/* TODO: release firmware once rtl_phy_write_fw signals failures. */ -	if (!IS_ERR_OR_NULL(rtl_fw)) { +	if (!IS_ERR_OR_NULL(rtl_fw))  		rtl_phy_write_fw(tp, rtl_fw); -		tp->features |= RTL_FEATURE_FW_LOADED; -	}  }  static void rtl_apply_firmware_cond(struct rtl8169_private *tp, u8 reg, u16 val) @@ -2405,31 +2397,6 @@ static void rtl_apply_firmware_cond(struct rtl8169_private *tp, u8 reg, u16 val)  		rtl_apply_firmware(tp);  } -static void r810x_aldps_disable(struct rtl8169_private *tp) -{ -	rtl_writephy(tp, 0x1f, 0x0000); -	rtl_writephy(tp, 0x18, 0x0310); -	msleep(100); -} - -static void r810x_aldps_enable(struct rtl8169_private *tp) -{ -	if (!(tp->features & RTL_FEATURE_FW_LOADED)) -		return; - -	rtl_writephy(tp, 0x1f, 0x0000); -	rtl_writephy(tp, 0x18, 0x8310); -} - -static void r8168_aldps_enable_1(struct rtl8169_private *tp) -{ -	if (!(tp->features & RTL_FEATURE_FW_LOADED)) -		return; - -	rtl_writephy(tp, 0x1f, 0x0000); -	rtl_w1w0_phy(tp, 0x15, 0x1000, 0x0000); -} -  static void rtl8169s_hw_phy_config(struct rtl8169_private *tp)  {  	static const struct phy_reg phy_reg_init[] = { @@ -3220,8 +3187,6 @@ static void rtl8168e_2_hw_phy_config(struct rtl8169_private *tp)  	rtl_w1w0_phy(tp, 0x10, 0x0000, 0x0400);  	rtl_writephy(tp, 0x1f, 0x0000); -	r8168_aldps_enable_1(tp); -  	/* Broken BIOS workaround: feed GigaMAC registers with MAC address. */  	rtl_rar_exgmac_set(tp, tp->dev->dev_addr);  } @@ -3296,8 +3261,6 @@ static void rtl8168f_1_hw_phy_config(struct rtl8169_private *tp)  	rtl_writephy(tp, 0x05, 0x8b85);  	rtl_w1w0_phy(tp, 0x06, 0x4000, 0x0000);  	rtl_writephy(tp, 0x1f, 0x0000); - -	r8168_aldps_enable_1(tp);  }  static void rtl8168f_2_hw_phy_config(struct rtl8169_private *tp) @@ -3305,8 +3268,6 @@ static void rtl8168f_2_hw_phy_config(struct rtl8169_private *tp)  	rtl_apply_firmware(tp);  	rtl8168f_hw_phy_config(tp); - -	r8168_aldps_enable_1(tp);  }  static void rtl8411_hw_phy_config(struct rtl8169_private *tp) @@ -3404,8 +3365,6 @@ static void rtl8411_hw_phy_config(struct rtl8169_private *tp)  	rtl_w1w0_phy(tp, 0x19, 0x0000, 0x0001);  	rtl_w1w0_phy(tp, 0x10, 0x0000, 0x0400);  	rtl_writephy(tp, 0x1f, 0x0000); - -	r8168_aldps_enable_1(tp);  }  static void rtl8168g_1_hw_phy_config(struct rtl8169_private *tp) @@ -3491,19 +3450,21 @@ static void rtl8105e_hw_phy_config(struct rtl8169_private *tp)  	};  	/* Disable ALDPS before ram code */ -	r810x_aldps_disable(tp); +	rtl_writephy(tp, 0x1f, 0x0000); +	rtl_writephy(tp, 0x18, 0x0310); +	msleep(100);  	rtl_apply_firmware(tp);  	rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); - -	r810x_aldps_enable(tp);  }  static void rtl8402_hw_phy_config(struct rtl8169_private *tp)  {  	/* Disable ALDPS before setting firmware */ -	r810x_aldps_disable(tp); +	rtl_writephy(tp, 0x1f, 0x0000); +	rtl_writephy(tp, 0x18, 0x0310); +	msleep(20);  	rtl_apply_firmware(tp); @@ -3513,8 +3474,6 @@ static void rtl8402_hw_phy_config(struct rtl8169_private *tp)  	rtl_writephy(tp, 0x10, 0x401f);  	rtl_writephy(tp, 0x19, 0x7030);  	rtl_writephy(tp, 0x1f, 0x0000); - -	r810x_aldps_enable(tp);  }  static void rtl8106e_hw_phy_config(struct rtl8169_private *tp) @@ -3527,7 +3486,9 @@ static void rtl8106e_hw_phy_config(struct rtl8169_private *tp)  	};  	/* Disable ALDPS before ram code */ -	r810x_aldps_disable(tp); +	rtl_writephy(tp, 0x1f, 0x0000); +	rtl_writephy(tp, 0x18, 0x0310); +	msleep(100);  	rtl_apply_firmware(tp); @@ -3535,8 +3496,6 @@ static void rtl8106e_hw_phy_config(struct rtl8169_private *tp)  	rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));  	rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); - -	r810x_aldps_enable(tp);  }  static void rtl_hw_phy_config(struct net_device *dev) @@ -5053,6 +5012,8 @@ static void rtl_hw_start_8168e_2(struct rtl8169_private *tp)  	RTL_W8(MaxTxPacketSize, EarlySize); +	rtl_disable_clock_request(pdev); +  	RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO);  	RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB); @@ -5061,8 +5022,7 @@ static void rtl_hw_start_8168e_2(struct rtl8169_private *tp)  	RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN);  	RTL_W32(MISC, RTL_R32(MISC) | PWM_EN); -	RTL_W8(Config5, (RTL_R8(Config5) & ~Spi_en) | ASPM_en); -	RTL_W8(Config2, RTL_R8(Config2) | ClkReqEn); +	RTL_W8(Config5, RTL_R8(Config5) & ~Spi_en);  }  static void rtl_hw_start_8168f(struct rtl8169_private *tp) @@ -5087,12 +5047,13 @@ static void rtl_hw_start_8168f(struct rtl8169_private *tp)  	RTL_W8(MaxTxPacketSize, EarlySize); +	rtl_disable_clock_request(pdev); +  	RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO);  	RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB);  	RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN); -	RTL_W32(MISC, RTL_R32(MISC) | PWM_EN | FORCE_CLK); -	RTL_W8(Config5, (RTL_R8(Config5) & ~Spi_en) | ASPM_en); -	RTL_W8(Config2, RTL_R8(Config2) | ClkReqEn); +	RTL_W32(MISC, RTL_R32(MISC) | PWM_EN); +	RTL_W8(Config5, RTL_R8(Config5) & ~Spi_en);  }  static void rtl_hw_start_8168f_1(struct rtl8169_private *tp) @@ -5149,10 +5110,8 @@ static void rtl_hw_start_8168g_1(struct rtl8169_private *tp)  	rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC);  	RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb); -	RTL_W32(MISC, (RTL_R32(MISC) | FORCE_CLK) & ~RXDV_GATED_EN); +	RTL_W32(MISC, RTL_R32(MISC) & ~RXDV_GATED_EN);  	RTL_W8(MaxTxPacketSize, EarlySize); -	RTL_W8(Config5, RTL_R8(Config5) | ASPM_en); -	RTL_W8(Config2, RTL_R8(Config2) | ClkReqEn);  	rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);  	rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); @@ -5368,9 +5327,6 @@ static void rtl_hw_start_8105e_1(struct rtl8169_private *tp)  	RTL_W8(MCU, RTL_R8(MCU) | EN_NDP | EN_OOB_RESET);  	RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN); -	RTL_W8(Config5, RTL_R8(Config5) | ASPM_en); -	RTL_W8(Config2, RTL_R8(Config2) | ClkReqEn); -	RTL_W32(MISC, RTL_R32(MISC) | FORCE_CLK);  	rtl_ephy_init(tp, e_info_8105e_1, ARRAY_SIZE(e_info_8105e_1));  } @@ -5396,9 +5352,6 @@ static void rtl_hw_start_8402(struct rtl8169_private *tp)  	RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO);  	RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB); -	RTL_W8(Config5, RTL_R8(Config5) | ASPM_en); -	RTL_W8(Config2, RTL_R8(Config2) | ClkReqEn); -	RTL_W32(MISC, RTL_R32(MISC) | FORCE_CLK);  	rtl_ephy_init(tp, e_info_8402, ARRAY_SIZE(e_info_8402)); @@ -5420,10 +5373,7 @@ static void rtl_hw_start_8106(struct rtl8169_private *tp)  	/* Force LAN exit from ASPM if Rx/Tx are not idle */  	RTL_W32(FuncEvent, RTL_R32(FuncEvent) | 0x002800); -	RTL_W32(MISC, -		(RTL_R32(MISC) | DISABLE_LAN_EN | FORCE_CLK) & ~EARLY_TALLY_EN); -	RTL_W8(Config5, RTL_R8(Config5) | ASPM_en); -	RTL_W8(Config2, RTL_R8(Config2) | ClkReqEn); +	RTL_W32(MISC, (RTL_R32(MISC) | DISABLE_LAN_EN) & ~EARLY_TALLY_EN);  	RTL_W8(MCU, RTL_R8(MCU) | EN_NDP | EN_OOB_RESET);  	RTL_W8(DLLPR, RTL_R8(DLLPR) & ~PFM_EN);  } @@ -6064,8 +6014,6 @@ static int rtl_rx(struct net_device *dev, struct rtl8169_private *tp, u32 budget  			    !(status & (RxRWT | RxFOVF)) &&  			    (dev->features & NETIF_F_RXALL))  				goto process_pkt; - -			rtl8169_mark_to_asic(desc, rx_buf_sz);  		} else {  			struct sk_buff *skb;  			dma_addr_t addr; @@ -6086,16 +6034,14 @@ process_pkt:  			if (unlikely(rtl8169_fragmented_frame(status))) {  				dev->stats.rx_dropped++;  				dev->stats.rx_length_errors++; -				rtl8169_mark_to_asic(desc, rx_buf_sz); -				continue; +				goto release_descriptor;  			}  			skb = rtl8169_try_rx_copy(tp->Rx_databuff[entry],  						  tp, pkt_size, addr); -			rtl8169_mark_to_asic(desc, rx_buf_sz);  			if (!skb) {  				dev->stats.rx_dropped++; -				continue; +				goto release_descriptor;  			}  			rtl8169_rx_csum(skb, status); @@ -6111,13 +6057,10 @@ process_pkt:  			tp->rx_stats.bytes += pkt_size;  			u64_stats_update_end(&tp->rx_stats.syncp);  		} - -		/* Work around for AMD plateform. */ -		if ((desc->opts2 & cpu_to_le32(0xfffe000)) && -		    (tp->mac_version == RTL_GIGA_MAC_VER_05)) { -			desc->opts2 = 0; -			cur_rx++; -		} +release_descriptor: +		desc->opts2 = 0; +		wmb(); +		rtl8169_mark_to_asic(desc, rx_buf_sz);  	}  	count = cur_rx - tp->cur_rx; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index f07c0612abf..b75f4b28689 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -69,7 +69,7 @@  #undef STMMAC_XMIT_DEBUG  /*#define STMMAC_XMIT_DEBUG*/ -#ifdef STMMAC_TX_DEBUG +#ifdef STMMAC_XMIT_DEBUG  #define TX_DBG(fmt, args...)  printk(fmt, ## args)  #else  #define TX_DBG(fmt, args...)  do { } while (0) diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c index 0376a5e6b2b..0b9829fe3ee 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c @@ -188,8 +188,6 @@ int stmmac_mdio_register(struct net_device *ndev)  		goto bus_register_fail;  	} -	priv->mii = new_bus; -  	found = 0;  	for (addr = 0; addr < PHY_MAX_ADDR; addr++) {  		struct phy_device *phydev = new_bus->phy_map[addr]; @@ -237,8 +235,14 @@ int stmmac_mdio_register(struct net_device *ndev)  		}  	} -	if (!found) +	if (!found) {  		pr_warning("%s: No PHY found\n", ndev->name); +		mdiobus_unregister(new_bus); +		mdiobus_free(new_bus); +		return -ENODEV; +	} + +	priv->mii = new_bus;  	return 0; diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c index 7992b3e05d3..78ace59efd2 100644 --- a/drivers/net/ethernet/via/via-rhine.c +++ b/drivers/net/ethernet/via/via-rhine.c @@ -1801,7 +1801,7 @@ static void rhine_tx(struct net_device *dev)  					 rp->tx_skbuff[entry]->len,  					 PCI_DMA_TODEVICE);  		} -		dev_kfree_skb_irq(rp->tx_skbuff[entry]); +		dev_kfree_skb(rp->tx_skbuff[entry]);  		rp->tx_skbuff[entry] = NULL;  		entry = (++rp->dirty_tx) % TX_RING_SIZE;  	} @@ -2010,11 +2010,7 @@ static void rhine_slow_event_task(struct work_struct *work)  	if (intr_status & IntrPCIErr)  		netif_warn(rp, hw, dev, "PCI error\n"); -	napi_disable(&rp->napi); -	rhine_irq_disable(rp); -	/* Slow and safe. Consider __napi_schedule as a replacement ? */ -	napi_enable(&rp->napi); -	napi_schedule(&rp->napi); +	iowrite16(RHINE_EVENT & 0xffff, rp->base + IntrEnable);  out_unlock:  	mutex_unlock(&rp->task_lock); diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h index 5fd6f467432..e6fe0d80d61 100644 --- a/drivers/net/hyperv/hyperv_net.h +++ b/drivers/net/hyperv/hyperv_net.h @@ -84,7 +84,7 @@ struct hv_netvsc_packet {  };  struct netvsc_device_info { -	unsigned char mac_adr[6]; +	unsigned char mac_adr[ETH_ALEN];  	bool link_state;	/* 0 - link up, 1 - link down */  	int  ring_size;  }; diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index f825a629a69..8264f0ef769 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -349,7 +349,7 @@ static int netvsc_set_mac_addr(struct net_device *ndev, void *p)  	struct net_device_context *ndevctx = netdev_priv(ndev);  	struct hv_device *hdev =  ndevctx->device_ctx;  	struct sockaddr *addr = p; -	char save_adr[14]; +	char save_adr[ETH_ALEN];  	unsigned char save_aatype;  	int err; diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c index 81f8f9e31db..fcbf680c3e6 100644 --- a/drivers/net/loopback.c +++ b/drivers/net/loopback.c @@ -77,6 +77,11 @@ static netdev_tx_t loopback_xmit(struct sk_buff *skb,  	skb_orphan(skb); +	/* Before queueing this packet to netif_rx(), +	 * make sure dst is refcounted. +	 */ +	skb_dst_force(skb); +  	skb->protocol = eth_type_trans(skb, dev);  	/* it's OK to use per_cpu_ptr() because BHs are off */ diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index 68a43fe602e..d3fb97d97cb 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c @@ -822,7 +822,10 @@ static int macvlan_changelink(struct net_device *dev,  static size_t macvlan_get_size(const struct net_device *dev)  { -	return nla_total_size(4); +	return (0 +		+ nla_total_size(4) /* IFLA_MACVLAN_MODE */ +		+ nla_total_size(2) /* IFLA_MACVLAN_FLAGS */ +		);  }  static int macvlan_fill_info(struct sk_buff *skb, diff --git a/drivers/net/phy/icplus.c b/drivers/net/phy/icplus.c index d5199cb4cae..b5ddd5077a8 100644 --- a/drivers/net/phy/icplus.c +++ b/drivers/net/phy/icplus.c @@ -36,8 +36,9 @@ MODULE_LICENSE("GPL");  /* IP101A/G - IP1001 */  #define IP10XX_SPEC_CTRL_STATUS		16	/* Spec. Control Register */ +#define IP1001_RXPHASE_SEL		(1<<0)	/* Add delay on RX_CLK */ +#define IP1001_TXPHASE_SEL		(1<<1)	/* Add delay on TX_CLK */  #define IP1001_SPEC_CTRL_STATUS_2	20	/* IP1001 Spec. Control Reg 2 */ -#define IP1001_PHASE_SEL_MASK		3	/* IP1001 RX/TXPHASE_SEL */  #define IP1001_APS_ON			11	/* IP1001 APS Mode  bit */  #define IP101A_G_APS_ON			2	/* IP101A/G APS Mode bit */  #define IP101A_G_IRQ_CONF_STATUS	0x11	/* Conf Info IRQ & Status Reg */ @@ -138,19 +139,24 @@ static int ip1001_config_init(struct phy_device *phydev)  	if (c < 0)  		return c; -	/* INTR pin used: speed/link/duplex will cause an interrupt */ -	c = phy_write(phydev, IP101A_G_IRQ_CONF_STATUS, IP101A_G_IRQ_DEFAULT); -	if (c < 0) -		return c; +	if ((phydev->interface == PHY_INTERFACE_MODE_RGMII) || +	    (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID) || +	    (phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) || +	    (phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID)) { -	if (phydev->interface == PHY_INTERFACE_MODE_RGMII) { -		/* Additional delay (2ns) used to adjust RX clock phase -		 * at RGMII interface */  		c = phy_read(phydev, IP10XX_SPEC_CTRL_STATUS);  		if (c < 0)  			return c; -		c |= IP1001_PHASE_SEL_MASK; +		c &= ~(IP1001_RXPHASE_SEL | IP1001_TXPHASE_SEL); + +		if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID) +			c |= (IP1001_RXPHASE_SEL | IP1001_TXPHASE_SEL); +		else if (phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) +			c |= IP1001_RXPHASE_SEL; +		else if (phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID) +			c |= IP1001_TXPHASE_SEL; +  		c = phy_write(phydev, IP10XX_SPEC_CTRL_STATUS, c);  		if (c < 0)  			return c; @@ -167,6 +173,11 @@ static int ip101a_g_config_init(struct phy_device *phydev)  	if (c < 0)  		return c; +	/* INTR pin used: speed/link/duplex will cause an interrupt */ +	c = phy_write(phydev, IP101A_G_IRQ_CONF_STATUS, IP101A_G_IRQ_DEFAULT); +	if (c < 0) +		return c; +  	/* Enable Auto Power Saving mode */  	c = phy_read(phydev, IP10XX_SPEC_CTRL_STATUS);  	c |= IP101A_G_APS_ON; diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c index 5d2a3f21588..22dec9c7ef0 100644 --- a/drivers/net/phy/marvell.c +++ b/drivers/net/phy/marvell.c @@ -353,15 +353,6 @@ static int m88e1111_config_init(struct phy_device *phydev)  	int err;  	int temp; -	/* Enable Fiber/Copper auto selection */ -	temp = phy_read(phydev, MII_M1111_PHY_EXT_SR); -	temp &= ~MII_M1111_HWCFG_FIBER_COPPER_AUTO; -	phy_write(phydev, MII_M1111_PHY_EXT_SR, temp); - -	temp = phy_read(phydev, MII_BMCR); -	temp |= BMCR_RESET; -	phy_write(phydev, MII_BMCR, temp); -  	if ((phydev->interface == PHY_INTERFACE_MODE_RGMII) ||  	    (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID) ||  	    (phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) || diff --git a/drivers/net/tun.c b/drivers/net/tun.c index af372d0957f..2917a86f4c4 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -109,11 +109,11 @@ struct tap_filter {  	unsigned char	addr[FLT_EXACT_COUNT][ETH_ALEN];  }; -/* 1024 is probably a high enough limit: modern hypervisors seem to support on - * the order of 100-200 CPUs so this leaves us some breathing space if we want - * to match a queue per guest CPU. - */ -#define MAX_TAP_QUEUES 1024 +/* DEFAULT_MAX_NUM_RSS_QUEUES were choosed to let the rx/tx queues allocated for + * the netdevice to be fit in one page. So we can make sure the success of + * memory allocation. TODO: increase the limit. */ +#define MAX_TAP_QUEUES DEFAULT_MAX_NUM_RSS_QUEUES +#define MAX_TAP_FLOWS  4096  #define TUN_FLOW_EXPIRE (3 * HZ) @@ -185,6 +185,8 @@ struct tun_struct {  	unsigned long ageing_time;  	unsigned int numdisabled;  	struct list_head disabled; +	void *security; +	u32 flow_count;  };  static inline u32 tun_hashfn(u32 rxhash) @@ -218,6 +220,7 @@ static struct tun_flow_entry *tun_flow_create(struct tun_struct *tun,  		e->queue_index = queue_index;  		e->tun = tun;  		hlist_add_head_rcu(&e->hash_link, head); +		++tun->flow_count;  	}  	return e;  } @@ -228,6 +231,7 @@ static void tun_flow_delete(struct tun_struct *tun, struct tun_flow_entry *e)  		  e->rxhash, e->queue_index);  	hlist_del_rcu(&e->hash_link);  	kfree_rcu(e, rcu); +	--tun->flow_count;  }  static void tun_flow_flush(struct tun_struct *tun) @@ -294,11 +298,12 @@ static void tun_flow_cleanup(unsigned long data)  }  static void tun_flow_update(struct tun_struct *tun, u32 rxhash, -			    u16 queue_index) +			    struct tun_file *tfile)  {  	struct hlist_head *head;  	struct tun_flow_entry *e;  	unsigned long delay = tun->ageing_time; +	u16 queue_index = tfile->queue_index;  	if (!rxhash)  		return; @@ -307,7 +312,9 @@ static void tun_flow_update(struct tun_struct *tun, u32 rxhash,  	rcu_read_lock(); -	if (tun->numqueues == 1) +	/* We may get a very small possibility of OOO during switching, not +	 * worth to optimize.*/ +	if (tun->numqueues == 1 || tfile->detached)  		goto unlock;  	e = tun_flow_find(head, rxhash); @@ -317,7 +324,8 @@ static void tun_flow_update(struct tun_struct *tun, u32 rxhash,  		e->updated = jiffies;  	} else {  		spin_lock_bh(&tun->lock); -		if (!tun_flow_find(head, rxhash)) +		if (!tun_flow_find(head, rxhash) && +		    tun->flow_count < MAX_TAP_FLOWS)  			tun_flow_create(tun, head, rxhash, queue_index);  		if (!timer_pending(&tun->flow_gc_timer)) @@ -406,21 +414,21 @@ static void __tun_detach(struct tun_file *tfile, bool clean)  	tun = rtnl_dereference(tfile->tun); -	if (tun) { +	if (tun && !tfile->detached) {  		u16 index = tfile->queue_index;  		BUG_ON(index >= tun->numqueues);  		dev = tun->dev;  		rcu_assign_pointer(tun->tfiles[index],  				   tun->tfiles[tun->numqueues - 1]); -		rcu_assign_pointer(tfile->tun, NULL);  		ntfile = rtnl_dereference(tun->tfiles[index]);  		ntfile->queue_index = index;  		--tun->numqueues; -		if (clean) +		if (clean) { +			rcu_assign_pointer(tfile->tun, NULL);  			sock_put(&tfile->sk); -		else +		} else  			tun_disable_queue(tun, tfile);  		synchronize_net(); @@ -434,10 +442,13 @@ static void __tun_detach(struct tun_file *tfile, bool clean)  	}  	if (clean) { -		if (tun && tun->numqueues == 0 && tun->numdisabled == 0 && -		    !(tun->flags & TUN_PERSIST)) -			if (tun->dev->reg_state == NETREG_REGISTERED) +		if (tun && tun->numqueues == 0 && tun->numdisabled == 0) { +			netif_carrier_off(tun->dev); + +			if (!(tun->flags & TUN_PERSIST) && +			    tun->dev->reg_state == NETREG_REGISTERED)  				unregister_netdevice(tun->dev); +		}  		BUG_ON(!test_bit(SOCK_EXTERNALLY_ALLOCATED,  				 &tfile->socket.flags)); @@ -465,6 +476,10 @@ static void tun_detach_all(struct net_device *dev)  		rcu_assign_pointer(tfile->tun, NULL);  		--tun->numqueues;  	} +	list_for_each_entry(tfile, &tun->disabled, next) { +		wake_up_all(&tfile->wq.wait); +		rcu_assign_pointer(tfile->tun, NULL); +	}  	BUG_ON(tun->numqueues != 0);  	synchronize_net(); @@ -490,8 +505,12 @@ static int tun_attach(struct tun_struct *tun, struct file *file)  	struct tun_file *tfile = file->private_data;  	int err; +	err = security_tun_dev_attach(tfile->socket.sk, tun->security); +	if (err < 0) +		goto out; +  	err = -EINVAL; -	if (rtnl_dereference(tfile->tun)) +	if (rtnl_dereference(tfile->tun) && !tfile->detached)  		goto out;  	err = -EBUSY; @@ -1190,7 +1209,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,  	tun->dev->stats.rx_packets++;  	tun->dev->stats.rx_bytes += len; -	tun_flow_update(tun, rxhash, tfile->queue_index); +	tun_flow_update(tun, rxhash, tfile);  	return total_len;  } @@ -1373,6 +1392,7 @@ static void tun_free_netdev(struct net_device *dev)  	BUG_ON(!(list_empty(&tun->disabled)));  	tun_flow_uninit(tun); +	security_tun_dev_free_security(tun->security);  	free_netdev(dev);  } @@ -1562,7 +1582,7 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)  		if (tun_not_capable(tun))  			return -EPERM; -		err = security_tun_dev_attach(tfile->socket.sk); +		err = security_tun_dev_open(tun->security);  		if (err < 0)  			return err; @@ -1577,6 +1597,8 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)  	else {  		char *name;  		unsigned long flags = 0; +		int queues = ifr->ifr_flags & IFF_MULTI_QUEUE ? +			     MAX_TAP_QUEUES : 1;  		if (!ns_capable(net->user_ns, CAP_NET_ADMIN))  			return -EPERM; @@ -1600,8 +1622,8 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)  			name = ifr->ifr_name;  		dev = alloc_netdev_mqs(sizeof(struct tun_struct), name, -				       tun_setup, -				       MAX_TAP_QUEUES, MAX_TAP_QUEUES); +				       tun_setup, queues, queues); +  		if (!dev)  			return -ENOMEM; @@ -1619,7 +1641,9 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)  		spin_lock_init(&tun->lock); -		security_tun_dev_post_create(&tfile->sk); +		err = security_tun_dev_alloc_security(&tun->security); +		if (err < 0) +			goto err_free_dev;  		tun_net_init(dev); @@ -1644,10 +1668,10 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)  		    device_create_file(&tun->dev->dev, &dev_attr_owner) ||  		    device_create_file(&tun->dev->dev, &dev_attr_group))  			pr_err("Failed to create tun sysfs files\n"); - -		netif_carrier_on(tun->dev);  	} +	netif_carrier_on(tun->dev); +  	tun_debug(KERN_INFO, tun, "tun_set_iff\n");  	if (ifr->ifr_flags & IFF_NO_PI) @@ -1789,19 +1813,24 @@ static int tun_set_queue(struct file *file, struct ifreq *ifr)  	if (ifr->ifr_flags & IFF_ATTACH_QUEUE) {  		tun = tfile->detached; -		if (!tun) +		if (!tun) {  			ret = -EINVAL; -		else -			ret = tun_attach(tun, file); +			goto unlock; +		} +		ret = security_tun_dev_attach_queue(tun->security); +		if (ret < 0) +			goto unlock; +		ret = tun_attach(tun, file);  	} else if (ifr->ifr_flags & IFF_DETACH_QUEUE) {  		tun = rtnl_dereference(tfile->tun); -		if (!tun || !(tun->flags & TUN_TAP_MQ)) +		if (!tun || !(tun->flags & TUN_TAP_MQ) || tfile->detached)  			ret = -EINVAL;  		else  			__tun_detach(tfile, false);  	} else  		ret = -EINVAL; +unlock:  	rtnl_unlock();  	return ret;  } diff --git a/drivers/net/usb/cdc_mbim.c b/drivers/net/usb/cdc_mbim.c index 42f51c71ec1..248d2dc765a 100644 --- a/drivers/net/usb/cdc_mbim.c +++ b/drivers/net/usb/cdc_mbim.c @@ -374,6 +374,21 @@ static const struct driver_info cdc_mbim_info = {  	.tx_fixup = cdc_mbim_tx_fixup,  }; +/* MBIM and NCM devices should not need a ZLP after NTBs with + * dwNtbOutMaxSize length. This driver_info is for the exceptional + * devices requiring it anyway, allowing them to be supported without + * forcing the performance penalty on all the sane devices. + */ +static const struct driver_info cdc_mbim_info_zlp = { +	.description = "CDC MBIM", +	.flags = FLAG_NO_SETINT | FLAG_MULTI_PACKET | FLAG_WWAN | FLAG_SEND_ZLP, +	.bind = cdc_mbim_bind, +	.unbind = cdc_mbim_unbind, +	.manage_power = cdc_mbim_manage_power, +	.rx_fixup = cdc_mbim_rx_fixup, +	.tx_fixup = cdc_mbim_tx_fixup, +}; +  static const struct usb_device_id mbim_devs[] = {  	/* This duplicate NCM entry is intentional. MBIM devices can  	 * be disguised as NCM by default, and this is necessary to @@ -385,6 +400,10 @@ static const struct usb_device_id mbim_devs[] = {  	{ USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_NCM, USB_CDC_PROTO_NONE),  	  .driver_info = (unsigned long)&cdc_mbim_info,  	}, +	/* Sierra Wireless MC7710 need ZLPs */ +	{ USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x68a2, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE), +	  .driver_info = (unsigned long)&cdc_mbim_info_zlp, +	},  	{ USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),  	  .driver_info = (unsigned long)&cdc_mbim_info,  	}, diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c index 71b6e92b8e9..00d3b2d3782 100644 --- a/drivers/net/usb/cdc_ncm.c +++ b/drivers/net/usb/cdc_ncm.c @@ -435,6 +435,13 @@ advance:  		len -= temp;  	} +	/* some buggy devices have an IAD but no CDC Union */ +	if (!ctx->union_desc && intf->intf_assoc && intf->intf_assoc->bInterfaceCount == 2) { +		ctx->control = intf; +		ctx->data = usb_ifnum_to_if(dev->udev, intf->cur_altsetting->desc.bInterfaceNumber + 1); +		dev_dbg(&intf->dev, "CDC Union missing - got slave from IAD\n"); +	} +  	/* check if we got everything */  	if ((ctx->control == NULL) || (ctx->data == NULL) ||  	    ((!ctx->mbim_desc) && ((ctx->ether_desc == NULL) || (ctx->control != intf)))) @@ -497,7 +504,8 @@ advance:  error2:  	usb_set_intfdata(ctx->control, NULL);  	usb_set_intfdata(ctx->data, NULL); -	usb_driver_release_interface(driver, ctx->data); +	if (ctx->data != ctx->control) +		usb_driver_release_interface(driver, ctx->data);  error:  	cdc_ncm_free((struct cdc_ncm_ctx *)dev->data[0]);  	dev->data[0] = 0; @@ -1155,6 +1163,20 @@ static const struct driver_info wwan_info = {  	.tx_fixup = cdc_ncm_tx_fixup,  }; +/* Same as wwan_info, but with FLAG_NOARP  */ +static const struct driver_info wwan_noarp_info = { +	.description = "Mobile Broadband Network Device (NO ARP)", +	.flags = FLAG_POINTTOPOINT | FLAG_NO_SETINT | FLAG_MULTI_PACKET +			| FLAG_WWAN | FLAG_NOARP, +	.bind = cdc_ncm_bind, +	.unbind = cdc_ncm_unbind, +	.check_connect = cdc_ncm_check_connect, +	.manage_power = usbnet_manage_power, +	.status = cdc_ncm_status, +	.rx_fixup = cdc_ncm_rx_fixup, +	.tx_fixup = cdc_ncm_tx_fixup, +}; +  static const struct usb_device_id cdc_devs[] = {  	/* Ericsson MBM devices like F5521gw */  	{ .match_flags = USB_DEVICE_ID_MATCH_INT_INFO @@ -1193,6 +1215,16 @@ static const struct usb_device_id cdc_devs[] = {  	{ USB_VENDOR_AND_INTERFACE_INFO(0x12d1, 0xff, 0x02, 0x46),  	  .driver_info = (unsigned long)&wwan_info,  	}, +	{ USB_VENDOR_AND_INTERFACE_INFO(0x12d1, 0xff, 0x02, 0x76), +	  .driver_info = (unsigned long)&wwan_info, +	}, + +	/* Infineon(now Intel) HSPA Modem platform */ +	{ USB_DEVICE_AND_INTERFACE_INFO(0x1519, 0x0443, +		USB_CLASS_COMM, +		USB_CDC_SUBCLASS_NCM, USB_CDC_PROTO_NONE), +	  .driver_info = (unsigned long)&wwan_noarp_info, +	},  	/* Generic CDC-NCM devices */  	{ USB_INTERFACE_INFO(USB_CLASS_COMM, diff --git a/drivers/net/usb/dm9601.c b/drivers/net/usb/dm9601.c index 3f554c1149f..d7e99445518 100644 --- a/drivers/net/usb/dm9601.c +++ b/drivers/net/usb/dm9601.c @@ -45,6 +45,12 @@  #define DM_MCAST_ADDR	0x16	/* 8 bytes */  #define DM_GPR_CTRL	0x1e  #define DM_GPR_DATA	0x1f +#define DM_CHIP_ID	0x2c +#define DM_MODE_CTRL	0x91	/* only on dm9620 */ + +/* chip id values */ +#define ID_DM9601	0 +#define ID_DM9620	1  #define DM_MAX_MCAST	64  #define DM_MCAST_SIZE	8 @@ -53,7 +59,6 @@  #define DM_RX_OVERHEAD	7	/* 3 byte header + 4 byte crc tail */  #define DM_TIMEOUT	1000 -  static int dm_read(struct usbnet *dev, u8 reg, u16 length, void *data)  {  	int err; @@ -84,32 +89,23 @@ static int dm_write(struct usbnet *dev, u8 reg, u16 length, void *data)  static int dm_write_reg(struct usbnet *dev, u8 reg, u8 value)  { -	return usbnet_write_cmd(dev, DM_WRITE_REGS, +	return usbnet_write_cmd(dev, DM_WRITE_REG,  				USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,  				value, reg, NULL, 0);  } -static void dm_write_async_helper(struct usbnet *dev, u8 reg, u8 value, -				  u16 length, void *data) +static void dm_write_async(struct usbnet *dev, u8 reg, u16 length, void *data)  {  	usbnet_write_cmd_async(dev, DM_WRITE_REGS,  			       USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, -			       value, reg, data, length); -} - -static void dm_write_async(struct usbnet *dev, u8 reg, u16 length, void *data) -{ -	netdev_dbg(dev->net, "dm_write_async() reg=0x%02x length=%d\n", reg, length); - -	dm_write_async_helper(dev, reg, 0, length, data); +			       0, reg, data, length);  }  static void dm_write_reg_async(struct usbnet *dev, u8 reg, u8 value)  { -	netdev_dbg(dev->net, "dm_write_reg_async() reg=0x%02x value=0x%02x\n", -		   reg, value); - -	dm_write_async_helper(dev, reg, value, 0, NULL); +	usbnet_write_cmd_async(dev, DM_WRITE_REG, +			       USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, +			       value, reg, NULL, 0);  }  static int dm_read_shared_word(struct usbnet *dev, int phy, u8 reg, __le16 *value) @@ -358,7 +354,7 @@ static const struct net_device_ops dm9601_netdev_ops = {  static int dm9601_bind(struct usbnet *dev, struct usb_interface *intf)  {  	int ret; -	u8 mac[ETH_ALEN]; +	u8 mac[ETH_ALEN], id;  	ret = usbnet_get_endpoints(dev, intf);  	if (ret) @@ -399,6 +395,24 @@ static int dm9601_bind(struct usbnet *dev, struct usb_interface *intf)  		__dm9601_set_mac_address(dev);  	} +	if (dm_read_reg(dev, DM_CHIP_ID, &id) < 0) { +		netdev_err(dev->net, "Error reading chip ID\n"); +		ret = -ENODEV; +		goto out; +	} + +	/* put dm9620 devices in dm9601 mode */ +	if (id == ID_DM9620) { +		u8 mode; + +		if (dm_read_reg(dev, DM_MODE_CTRL, &mode) < 0) { +			netdev_err(dev->net, "Error reading MODE_CTRL\n"); +			ret = -ENODEV; +			goto out; +		} +		dm_write_reg(dev, DM_MODE_CTRL, mode & 0x7f); +	} +  	/* power up phy */  	dm_write_reg(dev, DM_GPR_CTRL, 1);  	dm_write_reg(dev, DM_GPR_DATA, 0); @@ -581,6 +595,10 @@ static const struct usb_device_id products[] = {  	 USB_DEVICE(0x0a46, 0x9000),	/* DM9000E */  	 .driver_info = (unsigned long)&dm9601_info,  	 }, +	{ +	 USB_DEVICE(0x0a46, 0x9620),	/* DM9620 USB to Fast Ethernet Adapter */ +	 .driver_info = (unsigned long)&dm9601_info, +	 },  	{},			// END  }; diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index 6a1ca500e61..19d903598b0 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -351,6 +351,10 @@ static const struct usb_device_id products[] = {  		USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, USB_CLASS_VENDOR_SPEC, 1, 57),  		.driver_info        = (unsigned long)&qmi_wwan_info,  	}, +	{	/* HUAWEI_INTERFACE_NDIS_CONTROL_QUALCOMM */ +		USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, USB_CLASS_VENDOR_SPEC, 0x01, 0x69), +		.driver_info        = (unsigned long)&qmi_wwan_info, +	},  	/* 2. Combined interface devices matching on class+protocol */  	{	/* Huawei E367 and possibly others in "Windows mode" */ @@ -361,6 +365,14 @@ static const struct usb_device_id products[] = {  		USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, USB_CLASS_VENDOR_SPEC, 1, 17),  		.driver_info        = (unsigned long)&qmi_wwan_info,  	}, +	{	/* HUAWEI_NDIS_SINGLE_INTERFACE_VDF */ +		USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, USB_CLASS_VENDOR_SPEC, 0x01, 0x37), +		.driver_info        = (unsigned long)&qmi_wwan_info, +	}, +	{	/* HUAWEI_INTERFACE_NDIS_HW_QUALCOMM */ +		USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, USB_CLASS_VENDOR_SPEC, 0x01, 0x67), +		.driver_info        = (unsigned long)&qmi_wwan_info, +	},  	{	/* Pantech UML290, P4200 and more */  		USB_VENDOR_AND_INTERFACE_INFO(0x106c, USB_CLASS_VENDOR_SPEC, 0xf0, 0xff),  		.driver_info        = (unsigned long)&qmi_wwan_info, @@ -399,6 +411,7 @@ static const struct usb_device_id products[] = {  	},  	/* 3. Combined interface devices matching on interface number */ +	{QMI_FIXED_INTF(0x0408, 0xea42, 4)},	/* Yota / Megafon M100-1 */  	{QMI_FIXED_INTF(0x12d1, 0x140c, 1)},	/* Huawei E173 */  	{QMI_FIXED_INTF(0x19d2, 0x0002, 1)},  	{QMI_FIXED_INTF(0x19d2, 0x0012, 1)}, @@ -433,6 +446,7 @@ static const struct usb_device_id products[] = {  	{QMI_FIXED_INTF(0x19d2, 0x0199, 1)},	/* ZTE MF820S */  	{QMI_FIXED_INTF(0x19d2, 0x0200, 1)},  	{QMI_FIXED_INTF(0x19d2, 0x0257, 3)},	/* ZTE MF821 */ +	{QMI_FIXED_INTF(0x19d2, 0x0265, 4)},	/* ONDA MT8205 4G LTE */  	{QMI_FIXED_INTF(0x19d2, 0x0284, 4)},	/* ZTE MF880 */  	{QMI_FIXED_INTF(0x19d2, 0x0326, 4)},	/* ZTE MF821D */  	{QMI_FIXED_INTF(0x19d2, 0x1008, 4)},	/* ZTE (Vodafone) K3570-Z */ @@ -459,6 +473,8 @@ static const struct usb_device_id products[] = {  	{QMI_FIXED_INTF(0x1199, 0x68a2, 19)},	/* Sierra Wireless MC7710 in QMI mode */  	{QMI_FIXED_INTF(0x1199, 0x901c, 8)},    /* Sierra Wireless EM7700 */  	{QMI_FIXED_INTF(0x1bbb, 0x011e, 4)},	/* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */ +	{QMI_FIXED_INTF(0x2357, 0x0201, 4)},	/* TP-LINK HSUPA Modem MA180 */ +	{QMI_FIXED_INTF(0x1bc7, 0x1200, 5)},	/* Telit LE920 */  	/* 4. Gobi 1000 devices */  	{QMI_GOBI1K_DEVICE(0x05c6, 0x9212)},	/* Acer Gobi Modem Device */ diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c index 3d4bf01641b..5e33606c136 100644 --- a/drivers/net/usb/usbnet.c +++ b/drivers/net/usb/usbnet.c @@ -380,6 +380,12 @@ static int rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags)  	unsigned long		lockflags;  	size_t			size = dev->rx_urb_size; +	/* prevent rx skb allocation when error ratio is high */ +	if (test_bit(EVENT_RX_KILL, &dev->flags)) { +		usb_free_urb(urb); +		return -ENOLINK; +	} +  	skb = __netdev_alloc_skb_ip_align(dev->net, size, flags);  	if (!skb) {  		netif_dbg(dev, rx_err, dev->net, "no rx skb\n"); @@ -539,6 +545,17 @@ block:  		break;  	} +	/* stop rx if packet error rate is high */ +	if (++dev->pkt_cnt > 30) { +		dev->pkt_cnt = 0; +		dev->pkt_err = 0; +	} else { +		if (state == rx_cleanup) +			dev->pkt_err++; +		if (dev->pkt_err > 20) +			set_bit(EVENT_RX_KILL, &dev->flags); +	} +  	state = defer_bh(dev, skb, &dev->rxq, state);  	if (urb) { @@ -791,6 +808,11 @@ int usbnet_open (struct net_device *net)  		   (dev->driver_info->flags & FLAG_FRAMING_AX) ? "ASIX" :  		   "simple"); +	/* reset rx error state */ +	dev->pkt_cnt = 0; +	dev->pkt_err = 0; +	clear_bit(EVENT_RX_KILL, &dev->flags); +  	// delay posting reads until we're fully open  	tasklet_schedule (&dev->bh);  	if (info->manage_power) { @@ -1103,13 +1125,11 @@ netdev_tx_t usbnet_start_xmit (struct sk_buff *skb,  	if (info->tx_fixup) {  		skb = info->tx_fixup (dev, skb, GFP_ATOMIC);  		if (!skb) { -			if (netif_msg_tx_err(dev)) { -				netif_dbg(dev, tx_err, dev->net, "can't tx_fixup skb\n"); -				goto drop; -			} else { -				/* cdc_ncm collected packet; waits for more */ +			/* packet collected; minidriver waiting for more */ +			if (info->flags & FLAG_MULTI_PACKET)  				goto not_drop; -			} +			netif_dbg(dev, tx_err, dev->net, "can't tx_fixup skb\n"); +			goto drop;  		}  	}  	length = skb->len; @@ -1254,6 +1274,9 @@ static void usbnet_bh (unsigned long param)  		}  	} +	/* restart RX again after disabling due to high error rate */ +	clear_bit(EVENT_RX_KILL, &dev->flags); +  	// waiting for all pending urbs to complete?  	if (dev->wait) {  		if ((dev->txq.qlen + dev->rxq.qlen + dev->done.qlen) == 0) { @@ -1448,6 +1471,10 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)  		if ((dev->driver_info->flags & FLAG_WWAN) != 0)  			strcpy(net->name, "wwan%d"); +		/* devices that cannot do ARP */ +		if ((dev->driver_info->flags & FLAG_NOARP) != 0) +			net->flags |= IFF_NOARP; +  		/* maybe the remote can't receive an Ethernet MTU */  		if (net->mtu > (dev->hard_mtu - net->hard_header_len))  			net->mtu = dev->hard_mtu - net->hard_header_len; diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index a6fcf15adc4..35c00c5ea02 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -26,6 +26,7 @@  #include <linux/scatterlist.h>  #include <linux/if_vlan.h>  #include <linux/slab.h> +#include <linux/cpu.h>  static int napi_weight = 128;  module_param(napi_weight, int, 0444); @@ -123,6 +124,12 @@ struct virtnet_info {  	/* Does the affinity hint is set for virtqueues? */  	bool affinity_hint_set; + +	/* Per-cpu variable to show the mapping from CPU to virtqueue */ +	int __percpu *vq_index; + +	/* CPU hot plug notifier */ +	struct notifier_block nb;  };  struct skb_vnet_hdr { @@ -1013,32 +1020,75 @@ static int virtnet_vlan_rx_kill_vid(struct net_device *dev, u16 vid)  	return 0;  } -static void virtnet_set_affinity(struct virtnet_info *vi, bool set) +static void virtnet_clean_affinity(struct virtnet_info *vi, long hcpu)  {  	int i; +	int cpu; + +	if (vi->affinity_hint_set) { +		for (i = 0; i < vi->max_queue_pairs; i++) { +			virtqueue_set_affinity(vi->rq[i].vq, -1); +			virtqueue_set_affinity(vi->sq[i].vq, -1); +		} + +		vi->affinity_hint_set = false; +	} + +	i = 0; +	for_each_online_cpu(cpu) { +		if (cpu == hcpu) { +			*per_cpu_ptr(vi->vq_index, cpu) = -1; +		} else { +			*per_cpu_ptr(vi->vq_index, cpu) = +				++i % vi->curr_queue_pairs; +		} +	} +} + +static void virtnet_set_affinity(struct virtnet_info *vi) +{ +	int i; +	int cpu;  	/* In multiqueue mode, when the number of cpu is equal to the number of  	 * queue pairs, we let the queue pairs to be private to one cpu by  	 * setting the affinity hint to eliminate the contention.  	 */ -	if ((vi->curr_queue_pairs == 1 || -	     vi->max_queue_pairs != num_online_cpus()) && set) { -		if (vi->affinity_hint_set) -			set = false; -		else -			return; +	if (vi->curr_queue_pairs == 1 || +	    vi->max_queue_pairs != num_online_cpus()) { +		virtnet_clean_affinity(vi, -1); +		return;  	} -	for (i = 0; i < vi->max_queue_pairs; i++) { -		int cpu = set ? i : -1; +	i = 0; +	for_each_online_cpu(cpu) {  		virtqueue_set_affinity(vi->rq[i].vq, cpu);  		virtqueue_set_affinity(vi->sq[i].vq, cpu); +		*per_cpu_ptr(vi->vq_index, cpu) = i; +		i++;  	} -	if (set) -		vi->affinity_hint_set = true; -	else -		vi->affinity_hint_set = false; +	vi->affinity_hint_set = true; +} + +static int virtnet_cpu_callback(struct notifier_block *nfb, +			        unsigned long action, void *hcpu) +{ +	struct virtnet_info *vi = container_of(nfb, struct virtnet_info, nb); + +	switch(action & ~CPU_TASKS_FROZEN) { +	case CPU_ONLINE: +	case CPU_DOWN_FAILED: +	case CPU_DEAD: +		virtnet_set_affinity(vi); +		break; +	case CPU_DOWN_PREPARE: +		virtnet_clean_affinity(vi, (long)hcpu); +		break; +	default: +		break; +	} +	return NOTIFY_OK;  }  static void virtnet_get_ringparam(struct net_device *dev, @@ -1082,13 +1132,15 @@ static int virtnet_set_channels(struct net_device *dev,  	if (queue_pairs > vi->max_queue_pairs)  		return -EINVAL; +	get_online_cpus();  	err = virtnet_set_queues(vi, queue_pairs);  	if (!err) {  		netif_set_real_num_tx_queues(dev, queue_pairs);  		netif_set_real_num_rx_queues(dev, queue_pairs); -		virtnet_set_affinity(vi, true); +		virtnet_set_affinity(vi);  	} +	put_online_cpus();  	return err;  } @@ -1127,12 +1179,19 @@ static int virtnet_change_mtu(struct net_device *dev, int new_mtu)  /* To avoid contending a lock hold by a vcpu who would exit to host, select the   * txq based on the processor id. - * TODO: handle cpu hotplug.   */  static u16 virtnet_select_queue(struct net_device *dev, struct sk_buff *skb)  { -	int txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) : -		  smp_processor_id(); +	int txq; +	struct virtnet_info *vi = netdev_priv(dev); + +	if (skb_rx_queue_recorded(skb)) { +		txq = skb_get_rx_queue(skb); +	} else { +		txq = *__this_cpu_ptr(vi->vq_index); +		if (txq == -1) +			txq = 0; +	}  	while (unlikely(txq >= dev->real_num_tx_queues))  		txq -= dev->real_num_tx_queues; @@ -1248,7 +1307,7 @@ static void virtnet_del_vqs(struct virtnet_info *vi)  {  	struct virtio_device *vdev = vi->vdev; -	virtnet_set_affinity(vi, false); +	virtnet_clean_affinity(vi, -1);  	vdev->config->del_vqs(vdev); @@ -1371,7 +1430,10 @@ static int init_vqs(struct virtnet_info *vi)  	if (ret)  		goto err_free; -	virtnet_set_affinity(vi, true); +	get_online_cpus(); +	virtnet_set_affinity(vi); +	put_online_cpus(); +  	return 0;  err_free: @@ -1453,6 +1515,10 @@ static int virtnet_probe(struct virtio_device *vdev)  	if (vi->stats == NULL)  		goto free; +	vi->vq_index = alloc_percpu(int); +	if (vi->vq_index == NULL) +		goto free_stats; +  	mutex_init(&vi->config_lock);  	vi->config_enable = true;  	INIT_WORK(&vi->config_work, virtnet_config_changed_work); @@ -1476,7 +1542,7 @@ static int virtnet_probe(struct virtio_device *vdev)  	/* Allocate/initialize the rx/tx queues, and invoke find_vqs */  	err = init_vqs(vi);  	if (err) -		goto free_stats; +		goto free_index;  	netif_set_real_num_tx_queues(dev, 1);  	netif_set_real_num_rx_queues(dev, 1); @@ -1499,6 +1565,13 @@ static int virtnet_probe(struct virtio_device *vdev)  		}  	} +	vi->nb.notifier_call = &virtnet_cpu_callback; +	err = register_hotcpu_notifier(&vi->nb); +	if (err) { +		pr_debug("virtio_net: registering cpu notifier failed\n"); +		goto free_recv_bufs; +	} +  	/* Assume link up if device can't report link status,  	   otherwise get link status from config. */  	if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) { @@ -1520,6 +1593,8 @@ free_recv_bufs:  free_vqs:  	cancel_delayed_work_sync(&vi->refill);  	virtnet_del_vqs(vi); +free_index: +	free_percpu(vi->vq_index);  free_stats:  	free_percpu(vi->stats);  free: @@ -1543,6 +1618,8 @@ static void virtnet_remove(struct virtio_device *vdev)  {  	struct virtnet_info *vi = vdev->priv; +	unregister_hotcpu_notifier(&vi->nb); +  	/* Prevent config work handler from accessing the device. */  	mutex_lock(&vi->config_lock);  	vi->config_enable = false; @@ -1554,6 +1631,7 @@ static void virtnet_remove(struct virtio_device *vdev)  	flush_work(&vi->config_work); +	free_percpu(vi->vq_index);  	free_percpu(vi->stats);  	free_netdev(vi->dev);  } diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c index dc8913c6238..12c6440d164 100644 --- a/drivers/net/vmxnet3/vmxnet3_drv.c +++ b/drivers/net/vmxnet3/vmxnet3_drv.c @@ -154,8 +154,7 @@ vmxnet3_check_link(struct vmxnet3_adapter *adapter, bool affectTxQueue)  	if (ret & 1) { /* Link is up. */  		printk(KERN_INFO "%s: NIC Link is Up %d Mbps\n",  		       adapter->netdev->name, adapter->link_speed); -		if (!netif_carrier_ok(adapter->netdev)) -			netif_carrier_on(adapter->netdev); +		netif_carrier_on(adapter->netdev);  		if (affectTxQueue) {  			for (i = 0; i < adapter->num_tx_queues; i++) @@ -165,8 +164,7 @@ vmxnet3_check_link(struct vmxnet3_adapter *adapter, bool affectTxQueue)  	} else {  		printk(KERN_INFO "%s: NIC Link is Down\n",  		       adapter->netdev->name); -		if (netif_carrier_ok(adapter->netdev)) -			netif_carrier_off(adapter->netdev); +		netif_carrier_off(adapter->netdev);  		if (affectTxQueue) {  			for (i = 0; i < adapter->num_tx_queues; i++) @@ -3061,6 +3059,7 @@ vmxnet3_probe_device(struct pci_dev *pdev,  	netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues);  	netif_set_real_num_rx_queues(adapter->netdev, adapter->num_rx_queues); +	netif_carrier_off(netdev);  	err = register_netdev(netdev);  	if (err) { diff --git a/drivers/net/wireless/ath/ath9k/ar9003_calib.c b/drivers/net/wireless/ath/ath9k/ar9003_calib.c index 8b0d8dcd762..56317b0fb6b 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_calib.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_calib.c @@ -976,6 +976,8 @@ static bool ar9003_hw_init_cal(struct ath_hw *ah,  					  AR_PHY_CL_TAB_1,  					  AR_PHY_CL_TAB_2 }; +	ar9003_hw_set_chain_masks(ah, ah->caps.rx_chainmask, ah->caps.tx_chainmask); +  	if (rtt) {  		if (!ar9003_hw_rtt_restore(ah, chan))  			run_rtt_cal = true; diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.c b/drivers/net/wireless/ath/ath9k/ar9003_phy.c index ce19c09fa8e..3afc24bde6d 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_phy.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.c @@ -586,32 +586,19 @@ static void ar9003_hw_init_bb(struct ath_hw *ah,  	ath9k_hw_synth_delay(ah, chan, synthDelay);  } -static void ar9003_hw_set_chain_masks(struct ath_hw *ah, u8 rx, u8 tx) +void ar9003_hw_set_chain_masks(struct ath_hw *ah, u8 rx, u8 tx)  { -	switch (rx) { -	case 0x5: +	if (ah->caps.tx_chainmask == 5 || ah->caps.rx_chainmask == 5)  		REG_SET_BIT(ah, AR_PHY_ANALOG_SWAP,  			    AR_PHY_SWAP_ALT_CHAIN); -	case 0x3: -	case 0x1: -	case 0x2: -	case 0x7: -		REG_WRITE(ah, AR_PHY_RX_CHAINMASK, rx); -		REG_WRITE(ah, AR_PHY_CAL_CHAINMASK, rx); -		break; -	default: -		break; -	} + +	REG_WRITE(ah, AR_PHY_RX_CHAINMASK, rx); +	REG_WRITE(ah, AR_PHY_CAL_CHAINMASK, rx);  	if ((ah->caps.hw_caps & ATH9K_HW_CAP_APM) && (tx == 0x7)) -		REG_WRITE(ah, AR_SELFGEN_MASK, 0x3); -	else -		REG_WRITE(ah, AR_SELFGEN_MASK, tx); +		tx = 3; -	if (tx == 0x5) { -		REG_SET_BIT(ah, AR_PHY_ANALOG_SWAP, -			    AR_PHY_SWAP_ALT_CHAIN); -	} +	REG_WRITE(ah, AR_SELFGEN_MASK, tx);  }  /* diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h index 86e26a19efd..42794c546a4 100644 --- a/drivers/net/wireless/ath/ath9k/ath9k.h +++ b/drivers/net/wireless/ath/ath9k/ath9k.h @@ -317,7 +317,6 @@ struct ath_rx {  	u32 *rxlink;  	u32 num_pkts;  	unsigned int rxfilter; -	spinlock_t rxbuflock;  	struct list_head rxbuf;  	struct ath_descdma rxdma;  	struct ath_buf *rx_bufptr; @@ -328,7 +327,6 @@ struct ath_rx {  int ath_startrecv(struct ath_softc *sc);  bool ath_stoprecv(struct ath_softc *sc); -void ath_flushrecv(struct ath_softc *sc);  u32 ath_calcrxfilter(struct ath_softc *sc);  int ath_rx_init(struct ath_softc *sc, int nbufs);  void ath_rx_cleanup(struct ath_softc *sc); @@ -646,7 +644,6 @@ void ath_ant_comb_update(struct ath_softc *sc);  enum sc_op_flags {  	SC_OP_INVALID,  	SC_OP_BEACONS, -	SC_OP_RXFLUSH,  	SC_OP_ANI_RUN,  	SC_OP_PRIM_STA_VIF,  	SC_OP_HW_RESET, diff --git a/drivers/net/wireless/ath/ath9k/beacon.c b/drivers/net/wireless/ath/ath9k/beacon.c index 531fffd801a..2ca355e94da 100644 --- a/drivers/net/wireless/ath/ath9k/beacon.c +++ b/drivers/net/wireless/ath/ath9k/beacon.c @@ -147,6 +147,7 @@ static struct ath_buf *ath9k_beacon_generate(struct ieee80211_hw *hw,  				 skb->len, DMA_TO_DEVICE);  		dev_kfree_skb_any(skb);  		bf->bf_buf_addr = 0; +		bf->bf_mpdu = NULL;  	}  	skb = ieee80211_beacon_get(hw, vif); @@ -359,7 +360,6 @@ void ath9k_beacon_tasklet(unsigned long data)  		return;  	bf = ath9k_beacon_generate(sc->hw, vif); -	WARN_ON(!bf);  	if (sc->beacon.bmisscnt != 0) {  		ath_dbg(common, BSTUCK, "resume beacon xmit after %u misses\n", diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c index 13ff9edc240..e585fc827c5 100644 --- a/drivers/net/wireless/ath/ath9k/debug.c +++ b/drivers/net/wireless/ath/ath9k/debug.c @@ -861,7 +861,6 @@ static ssize_t read_file_recv(struct file *file, char __user *user_buf,  	RXS_ERR("RX-LENGTH-ERR", rx_len_err);  	RXS_ERR("RX-OOM-ERR", rx_oom_err);  	RXS_ERR("RX-RATE-ERR", rx_rate_err); -	RXS_ERR("RX-DROP-RXFLUSH", rx_drop_rxflush);  	RXS_ERR("RX-TOO-MANY-FRAGS", rx_too_many_frags_err);  	PHY_ERR("UNDERRUN ERR", ATH9K_PHYERR_UNDERRUN); diff --git a/drivers/net/wireless/ath/ath9k/debug.h b/drivers/net/wireless/ath/ath9k/debug.h index 375c3b46411..6df2ab62dcb 100644 --- a/drivers/net/wireless/ath/ath9k/debug.h +++ b/drivers/net/wireless/ath/ath9k/debug.h @@ -216,7 +216,6 @@ struct ath_tx_stats {   * @rx_oom_err:  No. of frames dropped due to OOM issues.   * @rx_rate_err:  No. of frames dropped due to rate errors.   * @rx_too_many_frags_err:  Frames dropped due to too-many-frags received. - * @rx_drop_rxflush: No. of frames dropped due to RX-FLUSH.   * @rx_beacons:  No. of beacons received.   * @rx_frags:  No. of rx-fragements received.   */ @@ -235,7 +234,6 @@ struct ath_rx_stats {  	u32 rx_oom_err;  	u32 rx_rate_err;  	u32 rx_too_many_frags_err; -	u32 rx_drop_rxflush;  	u32 rx_beacons;  	u32 rx_frags;  }; diff --git a/drivers/net/wireless/ath/ath9k/htc_hst.c b/drivers/net/wireless/ath/ath9k/htc_hst.c index 4a9570dfba7..aac4a406a51 100644 --- a/drivers/net/wireless/ath/ath9k/htc_hst.c +++ b/drivers/net/wireless/ath/ath9k/htc_hst.c @@ -344,6 +344,8 @@ void ath9k_htc_txcompletion_cb(struct htc_target *htc_handle,  			endpoint->ep_callbacks.tx(endpoint->ep_callbacks.priv,  						  skb, htc_hdr->endpoint_id,  						  txok); +		} else { +			kfree_skb(skb);  		}  	} diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h index 7f1a8e91c90..9d26fc56ca5 100644 --- a/drivers/net/wireless/ath/ath9k/hw.h +++ b/drivers/net/wireless/ath/ath9k/hw.h @@ -1066,6 +1066,7 @@ void ar9003_paprd_setup_gain_table(struct ath_hw *ah, int chain);  int ar9003_paprd_init_table(struct ath_hw *ah);  bool ar9003_paprd_is_done(struct ath_hw *ah);  bool ar9003_is_paprd_enabled(struct ath_hw *ah); +void ar9003_hw_set_chain_masks(struct ath_hw *ah, u8 rx, u8 tx);  /* Hardware family op attach helpers */  void ar5008_hw_attach_phy_ops(struct ath_hw *ah); diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c index be30a9af152..dd91f8fdc01 100644 --- a/drivers/net/wireless/ath/ath9k/main.c +++ b/drivers/net/wireless/ath/ath9k/main.c @@ -182,7 +182,7 @@ static void ath_restart_work(struct ath_softc *sc)  	ath_start_ani(sc);  } -static bool ath_prepare_reset(struct ath_softc *sc, bool retry_tx, bool flush) +static bool ath_prepare_reset(struct ath_softc *sc, bool retry_tx)  {  	struct ath_hw *ah = sc->sc_ah;  	bool ret = true; @@ -202,14 +202,6 @@ static bool ath_prepare_reset(struct ath_softc *sc, bool retry_tx, bool flush)  	if (!ath_drain_all_txq(sc, retry_tx))  		ret = false; -	if (!flush) { -		if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) -			ath_rx_tasklet(sc, 1, true); -		ath_rx_tasklet(sc, 1, false); -	} else { -		ath_flushrecv(sc); -	} -  	return ret;  } @@ -262,11 +254,11 @@ static int ath_reset_internal(struct ath_softc *sc, struct ath9k_channel *hchan,  	struct ath_common *common = ath9k_hw_common(ah);  	struct ath9k_hw_cal_data *caldata = NULL;  	bool fastcc = true; -	bool flush = false;  	int r;  	__ath_cancel_work(sc); +	tasklet_disable(&sc->intr_tq);  	spin_lock_bh(&sc->sc_pcu_lock);  	if (!(sc->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL)) { @@ -276,11 +268,10 @@ static int ath_reset_internal(struct ath_softc *sc, struct ath9k_channel *hchan,  	if (!hchan) {  		fastcc = false; -		flush = true;  		hchan = ah->curchan;  	} -	if (!ath_prepare_reset(sc, retry_tx, flush)) +	if (!ath_prepare_reset(sc, retry_tx))  		fastcc = false;  	ath_dbg(common, CONFIG, "Reset to %u MHz, HT40: %d fastcc: %d\n", @@ -302,6 +293,8 @@ static int ath_reset_internal(struct ath_softc *sc, struct ath9k_channel *hchan,  out:  	spin_unlock_bh(&sc->sc_pcu_lock); +	tasklet_enable(&sc->intr_tq); +  	return r;  } @@ -804,7 +797,7 @@ static void ath9k_stop(struct ieee80211_hw *hw)  		ath9k_hw_cfg_gpio_input(ah, ah->led_pin);  	} -	ath_prepare_reset(sc, false, true); +	ath_prepare_reset(sc, false);  	if (sc->rx.frag) {  		dev_kfree_skb_any(sc->rx.frag); @@ -1833,6 +1826,9 @@ static u32 fill_chainmask(u32 cap, u32 new)  static bool validate_antenna_mask(struct ath_hw *ah, u32 val)  { +	if (AR_SREV_9300_20_OR_LATER(ah)) +		return true; +  	switch (val & 0x7) {  	case 0x1:  	case 0x3: diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c index d4df98a938b..90752f24697 100644 --- a/drivers/net/wireless/ath/ath9k/recv.c +++ b/drivers/net/wireless/ath/ath9k/recv.c @@ -254,8 +254,6 @@ rx_init_fail:  static void ath_edma_start_recv(struct ath_softc *sc)  { -	spin_lock_bh(&sc->rx.rxbuflock); -  	ath9k_hw_rxena(sc->sc_ah);  	ath_rx_addbuffer_edma(sc, ATH9K_RX_QUEUE_HP, @@ -267,8 +265,6 @@ static void ath_edma_start_recv(struct ath_softc *sc)  	ath_opmode_init(sc);  	ath9k_hw_startpcureceive(sc->sc_ah, !!(sc->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL)); - -	spin_unlock_bh(&sc->rx.rxbuflock);  }  static void ath_edma_stop_recv(struct ath_softc *sc) @@ -285,8 +281,6 @@ int ath_rx_init(struct ath_softc *sc, int nbufs)  	int error = 0;  	spin_lock_init(&sc->sc_pcu_lock); -	spin_lock_init(&sc->rx.rxbuflock); -	clear_bit(SC_OP_RXFLUSH, &sc->sc_flags);  	common->rx_bufsize = IEEE80211_MAX_MPDU_LEN / 2 +  			     sc->sc_ah->caps.rx_status_len; @@ -447,7 +441,6 @@ int ath_startrecv(struct ath_softc *sc)  		return 0;  	} -	spin_lock_bh(&sc->rx.rxbuflock);  	if (list_empty(&sc->rx.rxbuf))  		goto start_recv; @@ -468,26 +461,31 @@ start_recv:  	ath_opmode_init(sc);  	ath9k_hw_startpcureceive(ah, !!(sc->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL)); -	spin_unlock_bh(&sc->rx.rxbuflock); -  	return 0;  } +static void ath_flushrecv(struct ath_softc *sc) +{ +	if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) +		ath_rx_tasklet(sc, 1, true); +	ath_rx_tasklet(sc, 1, false); +} +  bool ath_stoprecv(struct ath_softc *sc)  {  	struct ath_hw *ah = sc->sc_ah;  	bool stopped, reset = false; -	spin_lock_bh(&sc->rx.rxbuflock);  	ath9k_hw_abortpcurecv(ah);  	ath9k_hw_setrxfilter(ah, 0);  	stopped = ath9k_hw_stopdmarecv(ah, &reset); +	ath_flushrecv(sc); +  	if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)  		ath_edma_stop_recv(sc);  	else  		sc->rx.rxlink = NULL; -	spin_unlock_bh(&sc->rx.rxbuflock);  	if (!(ah->ah_flags & AH_UNPLUGGED) &&  	    unlikely(!stopped)) { @@ -499,15 +497,6 @@ bool ath_stoprecv(struct ath_softc *sc)  	return stopped && !reset;  } -void ath_flushrecv(struct ath_softc *sc) -{ -	set_bit(SC_OP_RXFLUSH, &sc->sc_flags); -	if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) -		ath_rx_tasklet(sc, 1, true); -	ath_rx_tasklet(sc, 1, false); -	clear_bit(SC_OP_RXFLUSH, &sc->sc_flags); -} -  static bool ath_beacon_dtim_pending_cab(struct sk_buff *skb)  {  	/* Check whether the Beacon frame has DTIM indicating buffered bc/mc */ @@ -744,6 +733,7 @@ static struct ath_buf *ath_get_next_rx_buf(struct ath_softc *sc,  			return NULL;  	} +	list_del(&bf->list);  	if (!bf->bf_mpdu)  		return bf; @@ -1059,16 +1049,12 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)  		dma_type = DMA_FROM_DEVICE;  	qtype = hp ? ATH9K_RX_QUEUE_HP : ATH9K_RX_QUEUE_LP; -	spin_lock_bh(&sc->rx.rxbuflock);  	tsf = ath9k_hw_gettsf64(ah);  	tsf_lower = tsf & 0xffffffff;  	do {  		bool decrypt_error = false; -		/* If handling rx interrupt and flush is in progress => exit */ -		if (test_bit(SC_OP_RXFLUSH, &sc->sc_flags) && (flush == 0)) -			break;  		memset(&rs, 0, sizeof(rs));  		if (edma) @@ -1111,15 +1097,6 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)  		ath_debug_stat_rx(sc, &rs); -		/* -		 * If we're asked to flush receive queue, directly -		 * chain it back at the queue without processing it. -		 */ -		if (test_bit(SC_OP_RXFLUSH, &sc->sc_flags)) { -			RX_STAT_INC(rx_drop_rxflush); -			goto requeue_drop_frag; -		} -  		memset(rxs, 0, sizeof(struct ieee80211_rx_status));  		rxs->mactime = (tsf & ~0xffffffffULL) | rs.rs_tstamp; @@ -1254,19 +1231,18 @@ requeue_drop_frag:  			sc->rx.frag = NULL;  		}  requeue: +		list_add_tail(&bf->list, &sc->rx.rxbuf); +		if (flush) +			continue; +  		if (edma) { -			list_add_tail(&bf->list, &sc->rx.rxbuf);  			ath_rx_edma_buf_link(sc, qtype);  		} else { -			list_move_tail(&bf->list, &sc->rx.rxbuf);  			ath_rx_buf_link(sc, bf); -			if (!flush) -				ath9k_hw_rxena(ah); +			ath9k_hw_rxena(ah);  		}  	} while (1); -	spin_unlock_bh(&sc->rx.rxbuflock); -  	if (!(ah->imask & ATH9K_INT_RXEOL)) {  		ah->imask |= (ATH9K_INT_RXEOL | ATH9K_INT_RXORN);  		ath9k_hw_set_interrupts(ah); diff --git a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c index 1fbd8ecbe2e..e5fd20994be 100644 --- a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c +++ b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c @@ -36,6 +36,7 @@  #include "debug.h"  #define N_TX_QUEUES	4 /* #tx queues on mac80211<->driver interface */ +#define BRCMS_FLUSH_TIMEOUT	500 /* msec */  /* Flags we support */  #define MAC_FILTERS (FIF_PROMISC_IN_BSS | \ @@ -708,16 +709,29 @@ static void brcms_ops_rfkill_poll(struct ieee80211_hw *hw)  	wiphy_rfkill_set_hw_state(wl->pub->ieee_hw->wiphy, blocked);  } +static bool brcms_tx_flush_completed(struct brcms_info *wl) +{ +	bool result; + +	spin_lock_bh(&wl->lock); +	result = brcms_c_tx_flush_completed(wl->wlc); +	spin_unlock_bh(&wl->lock); +	return result; +} +  static void brcms_ops_flush(struct ieee80211_hw *hw, bool drop)  {  	struct brcms_info *wl = hw->priv; +	int ret;  	no_printk("%s: drop = %s\n", __func__, drop ? "true" : "false"); -	/* wait for packet queue and dma fifos to run empty */ -	spin_lock_bh(&wl->lock); -	brcms_c_wait_for_tx_completion(wl->wlc, drop); -	spin_unlock_bh(&wl->lock); +	ret = wait_event_timeout(wl->tx_flush_wq, +				 brcms_tx_flush_completed(wl), +				 msecs_to_jiffies(BRCMS_FLUSH_TIMEOUT)); + +	brcms_dbg_mac80211(wl->wlc->hw->d11core, +			   "ret=%d\n", jiffies_to_msecs(ret));  }  static const struct ieee80211_ops brcms_ops = { @@ -772,6 +786,7 @@ void brcms_dpc(unsigned long data)   done:  	spin_unlock_bh(&wl->lock); +	wake_up(&wl->tx_flush_wq);  }  /* @@ -1020,6 +1035,8 @@ static struct brcms_info *brcms_attach(struct bcma_device *pdev)  	atomic_set(&wl->callbacks, 0); +	init_waitqueue_head(&wl->tx_flush_wq); +  	/* setup the bottom half handler */  	tasklet_init(&wl->tasklet, brcms_dpc, (unsigned long) wl); @@ -1407,9 +1424,10 @@ void brcms_add_timer(struct brcms_timer *t, uint ms, int periodic)  #endif  	t->ms = ms;  	t->periodic = (bool) periodic; -	t->set = true; - -	atomic_inc(&t->wl->callbacks); +	if (!t->set) { +		t->set = true; +		atomic_inc(&t->wl->callbacks); +	}  	ieee80211_queue_delayed_work(hw, &t->dly_wrk, msecs_to_jiffies(ms));  } @@ -1608,13 +1626,3 @@ bool brcms_rfkill_set_hw_state(struct brcms_info *wl)  	spin_lock_bh(&wl->lock);  	return blocked;  } - -/* - * precondition: perimeter lock has been acquired - */ -void brcms_msleep(struct brcms_info *wl, uint ms) -{ -	spin_unlock_bh(&wl->lock); -	msleep(ms); -	spin_lock_bh(&wl->lock); -} diff --git a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.h b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.h index 9358bd5ebd3..947ccacf43e 100644 --- a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.h +++ b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.h @@ -68,6 +68,8 @@ struct brcms_info {  	spinlock_t lock;	/* per-device perimeter lock */  	spinlock_t isr_lock;	/* per-device ISR synchronization lock */ +	/* tx flush */ +	wait_queue_head_t tx_flush_wq;  	/* timer related fields */  	atomic_t callbacks;	/* # outstanding callback functions */ @@ -100,7 +102,6 @@ extern struct brcms_timer *brcms_init_timer(struct brcms_info *wl,  extern void brcms_free_timer(struct brcms_timer *timer);  extern void brcms_add_timer(struct brcms_timer *timer, uint ms, int periodic);  extern bool brcms_del_timer(struct brcms_timer *timer); -extern void brcms_msleep(struct brcms_info *wl, uint ms);  extern void brcms_dpc(unsigned long data);  extern void brcms_timer(struct brcms_timer *t);  extern void brcms_fatal_error(struct brcms_info *wl); diff --git a/drivers/net/wireless/brcm80211/brcmsmac/main.c b/drivers/net/wireless/brcm80211/brcmsmac/main.c index 17594de4199..8b5839008af 100644 --- a/drivers/net/wireless/brcm80211/brcmsmac/main.c +++ b/drivers/net/wireless/brcm80211/brcmsmac/main.c @@ -1027,7 +1027,6 @@ brcms_c_dotxstatus(struct brcms_c_info *wlc, struct tx_status *txs)  static bool  brcms_b_txstatus(struct brcms_hardware *wlc_hw, bool bound, bool *fatal)  { -	bool morepending = false;  	struct bcma_device *core;  	struct tx_status txstatus, *txs;  	u32 s1, s2; @@ -1041,23 +1040,20 @@ brcms_b_txstatus(struct brcms_hardware *wlc_hw, bool bound, bool *fatal)  	txs = &txstatus;  	core = wlc_hw->d11core;  	*fatal = false; -	s1 = bcma_read32(core, D11REGOFFS(frmtxstatus)); -	while (!(*fatal) -	       && (s1 & TXS_V)) { -		/* !give others some time to run! */ -		if (n >= max_tx_num) { -			morepending = true; -			break; -		} +	while (n < max_tx_num) { +		s1 = bcma_read32(core, D11REGOFFS(frmtxstatus));  		if (s1 == 0xffffffff) {  			brcms_err(core, "wl%d: %s: dead chip\n", wlc_hw->unit,  				  __func__);  			*fatal = true;  			return false;  		} -		s2 = bcma_read32(core, D11REGOFFS(frmtxstatus2)); +		/* only process when valid */ +		if (!(s1 & TXS_V)) +			break; +		s2 = bcma_read32(core, D11REGOFFS(frmtxstatus2));  		txs->status = s1 & TXS_STATUS_MASK;  		txs->frameid = (s1 & TXS_FID_MASK) >> TXS_FID_SHIFT;  		txs->sequence = s2 & TXS_SEQ_MASK; @@ -1065,15 +1061,12 @@ brcms_b_txstatus(struct brcms_hardware *wlc_hw, bool bound, bool *fatal)  		txs->lasttxtime = 0;  		*fatal = brcms_c_dotxstatus(wlc_hw->wlc, txs); - -		s1 = bcma_read32(core, D11REGOFFS(frmtxstatus)); +		if (*fatal == true) +			return false;  		n++;  	} -	if (*fatal) -		return false; - -	return morepending; +	return n >= max_tx_num;  }  static void brcms_c_tbtt(struct brcms_c_info *wlc) @@ -7518,25 +7511,16 @@ int brcms_c_get_curband(struct brcms_c_info *wlc)  	return wlc->band->bandunit;  } -void brcms_c_wait_for_tx_completion(struct brcms_c_info *wlc, bool drop) +bool brcms_c_tx_flush_completed(struct brcms_c_info *wlc)  { -	int timeout = 20;  	int i;  	/* Kick DMA to send any pending AMPDU */  	for (i = 0; i < ARRAY_SIZE(wlc->hw->di); i++)  		if (wlc->hw->di[i]) -			dma_txflush(wlc->hw->di[i]); - -	/* wait for queue and DMA fifos to run dry */ -	while (brcms_txpktpendtot(wlc) > 0) { -		brcms_msleep(wlc->wl, 1); - -		if (--timeout == 0) -			break; -	} +			dma_kick_tx(wlc->hw->di[i]); -	WARN_ON_ONCE(timeout == 0); +	return !brcms_txpktpendtot(wlc);  }  void brcms_c_set_beacon_listen_interval(struct brcms_c_info *wlc, u8 interval) diff --git a/drivers/net/wireless/brcm80211/brcmsmac/pub.h b/drivers/net/wireless/brcm80211/brcmsmac/pub.h index 4fb2834f4e6..b0f14b7b861 100644 --- a/drivers/net/wireless/brcm80211/brcmsmac/pub.h +++ b/drivers/net/wireless/brcm80211/brcmsmac/pub.h @@ -314,8 +314,6 @@ extern void brcms_c_associate_upd(struct brcms_c_info *wlc, bool state);  extern void brcms_c_scan_start(struct brcms_c_info *wlc);  extern void brcms_c_scan_stop(struct brcms_c_info *wlc);  extern int brcms_c_get_curband(struct brcms_c_info *wlc); -extern void brcms_c_wait_for_tx_completion(struct brcms_c_info *wlc, -					   bool drop);  extern int brcms_c_set_channel(struct brcms_c_info *wlc, u16 channel);  extern int brcms_c_set_rate_limit(struct brcms_c_info *wlc, u16 srl, u16 lrl);  extern void brcms_c_get_current_rateset(struct brcms_c_info *wlc, @@ -332,5 +330,6 @@ extern int brcms_c_set_tx_power(struct brcms_c_info *wlc, int txpwr);  extern int brcms_c_get_tx_power(struct brcms_c_info *wlc);  extern bool brcms_c_check_radio_disabled(struct brcms_c_info *wlc);  extern void brcms_c_mute(struct brcms_c_info *wlc, bool on); +extern bool brcms_c_tx_flush_completed(struct brcms_c_info *wlc);  #endif				/* _BRCM_PUB_H_ */ diff --git a/drivers/net/wireless/iwlegacy/common.c b/drivers/net/wireless/iwlegacy/common.c index 7e16d10a7f1..90b8970eadf 100644 --- a/drivers/net/wireless/iwlegacy/common.c +++ b/drivers/net/wireless/iwlegacy/common.c @@ -3958,17 +3958,21 @@ il_connection_init_rx_config(struct il_priv *il)  	memset(&il->staging, 0, sizeof(il->staging)); -	if (!il->vif) { +	switch (il->iw_mode) { +	case NL80211_IFTYPE_UNSPECIFIED:  		il->staging.dev_type = RXON_DEV_TYPE_ESS; -	} else if (il->vif->type == NL80211_IFTYPE_STATION) { +		break; +	case NL80211_IFTYPE_STATION:  		il->staging.dev_type = RXON_DEV_TYPE_ESS;  		il->staging.filter_flags = RXON_FILTER_ACCEPT_GRP_MSK; -	} else if (il->vif->type == NL80211_IFTYPE_ADHOC) { +		break; +	case NL80211_IFTYPE_ADHOC:  		il->staging.dev_type = RXON_DEV_TYPE_IBSS;  		il->staging.flags = RXON_FLG_SHORT_PREAMBLE_MSK;  		il->staging.filter_flags =  		    RXON_FILTER_BCON_AWARE_MSK | RXON_FILTER_ACCEPT_GRP_MSK; -	} else { +		break; +	default:  		IL_ERR("Unsupported interface type %d\n", il->vif->type);  		return;  	} @@ -4550,8 +4554,7 @@ out:  EXPORT_SYMBOL(il_mac_add_interface);  static void -il_teardown_interface(struct il_priv *il, struct ieee80211_vif *vif, -		      bool mode_change) +il_teardown_interface(struct il_priv *il, struct ieee80211_vif *vif)  {  	lockdep_assert_held(&il->mutex); @@ -4560,9 +4563,7 @@ il_teardown_interface(struct il_priv *il, struct ieee80211_vif *vif,  		il_force_scan_end(il);  	} -	if (!mode_change) -		il_set_mode(il); - +	il_set_mode(il);  }  void @@ -4575,8 +4576,8 @@ il_mac_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)  	WARN_ON(il->vif != vif);  	il->vif = NULL; - -	il_teardown_interface(il, vif, false); +	il->iw_mode = NL80211_IFTYPE_UNSPECIFIED; +	il_teardown_interface(il, vif);  	memset(il->bssid, 0, ETH_ALEN);  	D_MAC80211("leave\n"); @@ -4685,18 +4686,10 @@ il_mac_change_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif,  	}  	/* success */ -	il_teardown_interface(il, vif, true);  	vif->type = newtype;  	vif->p2p = false; -	err = il_set_mode(il); -	WARN_ON(err); -	/* -	 * We've switched internally, but submitting to the -	 * device may have failed for some reason. Mask this -	 * error, because otherwise mac80211 will not switch -	 * (and set the interface type back) and we'll be -	 * out of sync with it. -	 */ +	il->iw_mode = newtype; +	il_teardown_interface(il, vif);  	err = 0;  out: diff --git a/drivers/net/wireless/iwlwifi/dvm/tx.c b/drivers/net/wireless/iwlwifi/dvm/tx.c index a790599fe2c..279796419ea 100644 --- a/drivers/net/wireless/iwlwifi/dvm/tx.c +++ b/drivers/net/wireless/iwlwifi/dvm/tx.c @@ -1079,6 +1079,8 @@ static void iwlagn_set_tx_status(struct iwl_priv *priv,  {  	u16 status = le16_to_cpu(tx_resp->status.status); +	info->flags &= ~IEEE80211_TX_CTL_AMPDU; +  	info->status.rates[0].count = tx_resp->failure_frame + 1;  	info->flags |= iwl_tx_status_to_mac80211(status);  	iwlagn_hwrate_to_tx_control(priv, le32_to_cpu(tx_resp->rate_n_flags), @@ -1151,6 +1153,13 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,  			next_reclaimed = ssn;  		} +		if (tid != IWL_TID_NON_QOS) { +			priv->tid_data[sta_id][tid].next_reclaimed = +				next_reclaimed; +			IWL_DEBUG_TX_REPLY(priv, "Next reclaimed packet:%d\n", +						  next_reclaimed); +		} +  		iwl_trans_reclaim(priv->trans, txq_id, ssn, &skbs);  		iwlagn_check_ratid_empty(priv, sta_id, tid); @@ -1201,28 +1210,11 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,  			if (!is_agg)  				iwlagn_non_agg_tx_status(priv, ctx, hdr->addr1); -			/* -			 * W/A for FW bug - the seq_ctl isn't updated when the -			 * queues are flushed. Fetch it from the packet itself -			 */ -			if (!is_agg && status == TX_STATUS_FAIL_FIFO_FLUSHED) { -				next_reclaimed = le16_to_cpu(hdr->seq_ctrl); -				next_reclaimed = -					SEQ_TO_SN(next_reclaimed + 0x10); -			} -  			is_offchannel_skb =  				(info->flags & IEEE80211_TX_CTL_TX_OFFCHAN);  			freed++;  		} -		if (tid != IWL_TID_NON_QOS) { -			priv->tid_data[sta_id][tid].next_reclaimed = -				next_reclaimed; -			IWL_DEBUG_TX_REPLY(priv, "Next reclaimed packet:%d\n", -					   next_reclaimed); -		} -  		WARN_ON(!is_agg && freed != 1);  		/* diff --git a/drivers/net/wireless/mwifiex/cfg80211.c b/drivers/net/wireless/mwifiex/cfg80211.c index efe525be27d..cdb11b3964e 100644 --- a/drivers/net/wireless/mwifiex/cfg80211.c +++ b/drivers/net/wireless/mwifiex/cfg80211.c @@ -1459,7 +1459,7 @@ mwifiex_cfg80211_assoc(struct mwifiex_private *priv, size_t ssid_len, u8 *ssid,  	struct cfg80211_ssid req_ssid;  	int ret, auth_type = 0;  	struct cfg80211_bss *bss = NULL; -	u8 is_scanning_required = 0, config_bands = 0; +	u8 is_scanning_required = 0;  	memset(&req_ssid, 0, sizeof(struct cfg80211_ssid)); @@ -1478,19 +1478,6 @@ mwifiex_cfg80211_assoc(struct mwifiex_private *priv, size_t ssid_len, u8 *ssid,  	/* disconnect before try to associate */  	mwifiex_deauthenticate(priv, NULL); -	if (channel) { -		if (mode == NL80211_IFTYPE_STATION) { -			if (channel->band == IEEE80211_BAND_2GHZ) -				config_bands = BAND_B | BAND_G | BAND_GN; -			else -				config_bands = BAND_A | BAND_AN; - -			if (!((config_bands | priv->adapter->fw_bands) & -			      ~priv->adapter->fw_bands)) -				priv->adapter->config_bands = config_bands; -		} -	} -  	/* As this is new association, clear locally stored  	 * keys and security related flags */  	priv->sec_info.wpa_enabled = false; @@ -1707,7 +1694,7 @@ static int mwifiex_set_ibss_params(struct mwifiex_private *priv,  		if (cfg80211_get_chandef_type(¶ms->chandef) !=  						NL80211_CHAN_NO_HT) -			config_bands |= BAND_GN; +			config_bands |= BAND_G | BAND_GN;  	} else {  		if (cfg80211_get_chandef_type(¶ms->chandef) ==  						NL80211_CHAN_NO_HT) diff --git a/drivers/net/wireless/mwifiex/pcie.c b/drivers/net/wireless/mwifiex/pcie.c index 13fbc4eb159..b879e1338a5 100644 --- a/drivers/net/wireless/mwifiex/pcie.c +++ b/drivers/net/wireless/mwifiex/pcie.c @@ -161,7 +161,7 @@ static int mwifiex_pcie_suspend(struct pci_dev *pdev, pm_message_t state)  	if (pdev) {  		card = (struct pcie_service_card *) pci_get_drvdata(pdev); -		if (!card || card->adapter) { +		if (!card || !card->adapter) {  			pr_err("Card or adapter structure is not valid\n");  			return 0;  		} diff --git a/drivers/net/wireless/mwifiex/scan.c b/drivers/net/wireless/mwifiex/scan.c index 9189a32b784..973a9d90e9e 100644 --- a/drivers/net/wireless/mwifiex/scan.c +++ b/drivers/net/wireless/mwifiex/scan.c @@ -1563,7 +1563,7 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,  		dev_err(adapter->dev, "SCAN_RESP: too many AP returned (%d)\n",  			scan_rsp->number_of_sets);  		ret = -1; -		goto done; +		goto check_next_scan;  	}  	bytes_left = le16_to_cpu(scan_rsp->bss_descript_size); @@ -1634,7 +1634,8 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,  		if (!beacon_size || beacon_size > bytes_left) {  			bss_info += bytes_left;  			bytes_left = 0; -			return -1; +			ret = -1; +			goto check_next_scan;  		}  		/* Initialize the current working beacon pointer for this BSS @@ -1690,7 +1691,7 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,  				dev_err(priv->adapter->dev,  					"%s: bytes left < IE length\n",  					__func__); -				goto done; +				goto check_next_scan;  			}  			if (element_id == WLAN_EID_DS_PARAMS) {  				channel = *(current_ptr + sizeof(struct ieee_types_header)); @@ -1753,6 +1754,7 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,  		}  	} +check_next_scan:  	spin_lock_irqsave(&adapter->scan_pending_q_lock, flags);  	if (list_empty(&adapter->scan_pending_q)) {  		spin_unlock_irqrestore(&adapter->scan_pending_q_lock, flags); @@ -1813,7 +1815,6 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,  		}  	} -done:  	return ret;  } diff --git a/drivers/net/wireless/mwifiex/sta_ioctl.c b/drivers/net/wireless/mwifiex/sta_ioctl.c index 60e88b58039..f542bb8ccbc 100644 --- a/drivers/net/wireless/mwifiex/sta_ioctl.c +++ b/drivers/net/wireless/mwifiex/sta_ioctl.c @@ -283,6 +283,20 @@ int mwifiex_bss_start(struct mwifiex_private *priv, struct cfg80211_bss *bss,  		if (ret)  			goto done; +		if (bss_desc) { +			u8 config_bands = 0; + +			if (mwifiex_band_to_radio_type((u8) bss_desc->bss_band) +			    == HostCmd_SCAN_RADIO_TYPE_BG) +				config_bands = BAND_B | BAND_G | BAND_GN; +			else +				config_bands = BAND_A | BAND_AN; + +			if (!((config_bands | adapter->fw_bands) & +			      ~adapter->fw_bands)) +				adapter->config_bands = config_bands; +		} +  		ret = mwifiex_check_network_compatibility(priv, bss_desc);  		if (ret)  			goto done; diff --git a/drivers/net/wireless/mwl8k.c b/drivers/net/wireless/mwl8k.c index 83564d36e80..a00a03ea4ec 100644 --- a/drivers/net/wireless/mwl8k.c +++ b/drivers/net/wireless/mwl8k.c @@ -318,20 +318,20 @@ struct mwl8k_sta {  #define MWL8K_STA(_sta) ((struct mwl8k_sta *)&((_sta)->drv_priv))  static const struct ieee80211_channel mwl8k_channels_24[] = { -	{ .center_freq = 2412, .hw_value = 1, }, -	{ .center_freq = 2417, .hw_value = 2, }, -	{ .center_freq = 2422, .hw_value = 3, }, -	{ .center_freq = 2427, .hw_value = 4, }, -	{ .center_freq = 2432, .hw_value = 5, }, -	{ .center_freq = 2437, .hw_value = 6, }, -	{ .center_freq = 2442, .hw_value = 7, }, -	{ .center_freq = 2447, .hw_value = 8, }, -	{ .center_freq = 2452, .hw_value = 9, }, -	{ .center_freq = 2457, .hw_value = 10, }, -	{ .center_freq = 2462, .hw_value = 11, }, -	{ .center_freq = 2467, .hw_value = 12, }, -	{ .center_freq = 2472, .hw_value = 13, }, -	{ .center_freq = 2484, .hw_value = 14, }, +	{ .band = IEEE80211_BAND_2GHZ, .center_freq = 2412, .hw_value = 1, }, +	{ .band = IEEE80211_BAND_2GHZ, .center_freq = 2417, .hw_value = 2, }, +	{ .band = IEEE80211_BAND_2GHZ, .center_freq = 2422, .hw_value = 3, }, +	{ .band = IEEE80211_BAND_2GHZ, .center_freq = 2427, .hw_value = 4, }, +	{ .band = IEEE80211_BAND_2GHZ, .center_freq = 2432, .hw_value = 5, }, +	{ .band = IEEE80211_BAND_2GHZ, .center_freq = 2437, .hw_value = 6, }, +	{ .band = IEEE80211_BAND_2GHZ, .center_freq = 2442, .hw_value = 7, }, +	{ .band = IEEE80211_BAND_2GHZ, .center_freq = 2447, .hw_value = 8, }, +	{ .band = IEEE80211_BAND_2GHZ, .center_freq = 2452, .hw_value = 9, }, +	{ .band = IEEE80211_BAND_2GHZ, .center_freq = 2457, .hw_value = 10, }, +	{ .band = IEEE80211_BAND_2GHZ, .center_freq = 2462, .hw_value = 11, }, +	{ .band = IEEE80211_BAND_2GHZ, .center_freq = 2467, .hw_value = 12, }, +	{ .band = IEEE80211_BAND_2GHZ, .center_freq = 2472, .hw_value = 13, }, +	{ .band = IEEE80211_BAND_2GHZ, .center_freq = 2484, .hw_value = 14, },  };  static const struct ieee80211_rate mwl8k_rates_24[] = { @@ -352,10 +352,10 @@ static const struct ieee80211_rate mwl8k_rates_24[] = {  };  static const struct ieee80211_channel mwl8k_channels_50[] = { -	{ .center_freq = 5180, .hw_value = 36, }, -	{ .center_freq = 5200, .hw_value = 40, }, -	{ .center_freq = 5220, .hw_value = 44, }, -	{ .center_freq = 5240, .hw_value = 48, }, +	{ .band = IEEE80211_BAND_5GHZ, .center_freq = 5180, .hw_value = 36, }, +	{ .band = IEEE80211_BAND_5GHZ, .center_freq = 5200, .hw_value = 40, }, +	{ .band = IEEE80211_BAND_5GHZ, .center_freq = 5220, .hw_value = 44, }, +	{ .band = IEEE80211_BAND_5GHZ, .center_freq = 5240, .hw_value = 48, },  };  static const struct ieee80211_rate mwl8k_rates_50[] = { diff --git a/drivers/net/wireless/rtlwifi/Kconfig b/drivers/net/wireless/rtlwifi/Kconfig index 21b1bbb93a7..b80bc461258 100644 --- a/drivers/net/wireless/rtlwifi/Kconfig +++ b/drivers/net/wireless/rtlwifi/Kconfig @@ -57,12 +57,12 @@ config RTL8192CU  config RTLWIFI  	tristate -	depends on RTL8192CE || RTL8192CU || RTL8192SE || RTL8192DE +	depends on RTL8192CE || RTL8192CU || RTL8192SE || RTL8192DE || RTL8723AE  	default m  config RTLWIFI_DEBUG  	bool "Additional debugging output" -	depends on RTL8192CE || RTL8192CU || RTL8192SE || RTL8192DE +	depends on RTL8192CE || RTL8192CU || RTL8192SE || RTL8192DE || RTL8723AE  	default y  config RTL8192C_COMMON diff --git a/drivers/net/wireless/rtlwifi/base.c b/drivers/net/wireless/rtlwifi/base.c index 4494d130b37..0f8b05185ed 100644 --- a/drivers/net/wireless/rtlwifi/base.c +++ b/drivers/net/wireless/rtlwifi/base.c @@ -1004,7 +1004,8 @@ u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx)  					 is_tx ? "Tx" : "Rx");  				if (is_tx) { -					rtl_lps_leave(hw); +					schedule_work(&rtlpriv-> +						      works.lps_leave_work);  					ppsc->last_delaylps_stamp_jiffies =  					    jiffies;  				} @@ -1014,7 +1015,7 @@ u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx)  		}  	} else if (ETH_P_ARP == ether_type) {  		if (is_tx) { -			rtl_lps_leave(hw); +			schedule_work(&rtlpriv->works.lps_leave_work);  			ppsc->last_delaylps_stamp_jiffies = jiffies;  		} @@ -1024,7 +1025,7 @@ u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx)  			 "802.1X %s EAPOL pkt!!\n", is_tx ? "Tx" : "Rx");  		if (is_tx) { -			rtl_lps_leave(hw); +			schedule_work(&rtlpriv->works.lps_leave_work);  			ppsc->last_delaylps_stamp_jiffies = jiffies;  		} diff --git a/drivers/net/wireless/rtlwifi/usb.c b/drivers/net/wireless/rtlwifi/usb.c index f2ecdeb3a90..1535efda3d5 100644 --- a/drivers/net/wireless/rtlwifi/usb.c +++ b/drivers/net/wireless/rtlwifi/usb.c @@ -542,8 +542,8 @@ static void _rtl_rx_pre_process(struct ieee80211_hw *hw, struct sk_buff *skb)  	WARN_ON(skb_queue_empty(&rx_queue));  	while (!skb_queue_empty(&rx_queue)) {  		_skb = skb_dequeue(&rx_queue); -		_rtl_usb_rx_process_agg(hw, skb); -		ieee80211_rx_irqsafe(hw, skb); +		_rtl_usb_rx_process_agg(hw, _skb); +		ieee80211_rx_irqsafe(hw, _skb);  	}  } diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h index 94b79c3338c..9d7f1723dd8 100644 --- a/drivers/net/xen-netback/common.h +++ b/drivers/net/xen-netback/common.h @@ -151,6 +151,9 @@ void xen_netbk_queue_tx_skb(struct xenvif *vif, struct sk_buff *skb);  /* Notify xenvif that ring now has space to send an skb to the frontend */  void xenvif_notify_tx_completion(struct xenvif *vif); +/* Prevent the device from generating any further traffic. */ +void xenvif_carrier_off(struct xenvif *vif); +  /* Returns number of ring slots required to send an skb to the frontend */  unsigned int xen_netbk_count_skb_slots(struct xenvif *vif, struct sk_buff *skb); diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c index b7d41f8c338..b8c5193bd42 100644 --- a/drivers/net/xen-netback/interface.c +++ b/drivers/net/xen-netback/interface.c @@ -343,17 +343,22 @@ err:  	return err;  } -void xenvif_disconnect(struct xenvif *vif) +void xenvif_carrier_off(struct xenvif *vif)  {  	struct net_device *dev = vif->dev; -	if (netif_carrier_ok(dev)) { -		rtnl_lock(); -		netif_carrier_off(dev); /* discard queued packets */ -		if (netif_running(dev)) -			xenvif_down(vif); -		rtnl_unlock(); -		xenvif_put(vif); -	} + +	rtnl_lock(); +	netif_carrier_off(dev); /* discard queued packets */ +	if (netif_running(dev)) +		xenvif_down(vif); +	rtnl_unlock(); +	xenvif_put(vif); +} + +void xenvif_disconnect(struct xenvif *vif) +{ +	if (netif_carrier_ok(vif->dev)) +		xenvif_carrier_off(vif);  	atomic_dec(&vif->refcnt);  	wait_event(vif->waiting_to_free, atomic_read(&vif->refcnt) == 0); diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c index f2d6b78d901..2b9520c46e9 100644 --- a/drivers/net/xen-netback/netback.c +++ b/drivers/net/xen-netback/netback.c @@ -147,7 +147,8 @@ void xen_netbk_remove_xenvif(struct xenvif *vif)  	atomic_dec(&netbk->netfront_count);  } -static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx); +static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx, +				  u8 status);  static void make_tx_response(struct xenvif *vif,  			     struct xen_netif_tx_request *txp,  			     s8       st); @@ -879,7 +880,7 @@ static void netbk_tx_err(struct xenvif *vif,  	do {  		make_tx_response(vif, txp, XEN_NETIF_RSP_ERROR); -		if (cons >= end) +		if (cons == end)  			break;  		txp = RING_GET_REQUEST(&vif->tx, cons++);  	} while (1); @@ -888,6 +889,13 @@ static void netbk_tx_err(struct xenvif *vif,  	xenvif_put(vif);  } +static void netbk_fatal_tx_err(struct xenvif *vif) +{ +	netdev_err(vif->dev, "fatal error; disabling device\n"); +	xenvif_carrier_off(vif); +	xenvif_put(vif); +} +  static int netbk_count_requests(struct xenvif *vif,  				struct xen_netif_tx_request *first,  				struct xen_netif_tx_request *txp, @@ -901,19 +909,22 @@ static int netbk_count_requests(struct xenvif *vif,  	do {  		if (frags >= work_to_do) { -			netdev_dbg(vif->dev, "Need more frags\n"); +			netdev_err(vif->dev, "Need more frags\n"); +			netbk_fatal_tx_err(vif);  			return -frags;  		}  		if (unlikely(frags >= MAX_SKB_FRAGS)) { -			netdev_dbg(vif->dev, "Too many frags\n"); +			netdev_err(vif->dev, "Too many frags\n"); +			netbk_fatal_tx_err(vif);  			return -frags;  		}  		memcpy(txp, RING_GET_REQUEST(&vif->tx, cons + frags),  		       sizeof(*txp));  		if (txp->size > first->size) { -			netdev_dbg(vif->dev, "Frags galore\n"); +			netdev_err(vif->dev, "Frag is bigger than frame.\n"); +			netbk_fatal_tx_err(vif);  			return -frags;  		} @@ -921,8 +932,9 @@ static int netbk_count_requests(struct xenvif *vif,  		frags++;  		if (unlikely((txp->offset + txp->size) > PAGE_SIZE)) { -			netdev_dbg(vif->dev, "txp->offset: %x, size: %u\n", +			netdev_err(vif->dev, "txp->offset: %x, size: %u\n",  				 txp->offset, txp->size); +			netbk_fatal_tx_err(vif);  			return -frags;  		}  	} while ((txp++)->flags & XEN_NETTXF_more_data); @@ -966,7 +978,7 @@ static struct gnttab_copy *xen_netbk_get_requests(struct xen_netbk *netbk,  		pending_idx = netbk->pending_ring[index];  		page = xen_netbk_alloc_page(netbk, skb, pending_idx);  		if (!page) -			return NULL; +			goto err;  		gop->source.u.ref = txp->gref;  		gop->source.domid = vif->domid; @@ -988,6 +1000,17 @@ static struct gnttab_copy *xen_netbk_get_requests(struct xen_netbk *netbk,  	}  	return gop; +err: +	/* Unwind, freeing all pages and sending error responses. */ +	while (i-- > start) { +		xen_netbk_idx_release(netbk, frag_get_pending_idx(&frags[i]), +				      XEN_NETIF_RSP_ERROR); +	} +	/* The head too, if necessary. */ +	if (start) +		xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_ERROR); + +	return NULL;  }  static int xen_netbk_tx_check_gop(struct xen_netbk *netbk, @@ -996,30 +1019,20 @@ static int xen_netbk_tx_check_gop(struct xen_netbk *netbk,  {  	struct gnttab_copy *gop = *gopp;  	u16 pending_idx = *((u16 *)skb->data); -	struct pending_tx_info *pending_tx_info = netbk->pending_tx_info; -	struct xenvif *vif = pending_tx_info[pending_idx].vif; -	struct xen_netif_tx_request *txp;  	struct skb_shared_info *shinfo = skb_shinfo(skb);  	int nr_frags = shinfo->nr_frags;  	int i, err, start;  	/* Check status of header. */  	err = gop->status; -	if (unlikely(err)) { -		pending_ring_idx_t index; -		index = pending_index(netbk->pending_prod++); -		txp = &pending_tx_info[pending_idx].req; -		make_tx_response(vif, txp, XEN_NETIF_RSP_ERROR); -		netbk->pending_ring[index] = pending_idx; -		xenvif_put(vif); -	} +	if (unlikely(err)) +		xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_ERROR);  	/* Skip first skb fragment if it is on same page as header fragment. */  	start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx);  	for (i = start; i < nr_frags; i++) {  		int j, newerr; -		pending_ring_idx_t index;  		pending_idx = frag_get_pending_idx(&shinfo->frags[i]); @@ -1028,16 +1041,12 @@ static int xen_netbk_tx_check_gop(struct xen_netbk *netbk,  		if (likely(!newerr)) {  			/* Had a previous error? Invalidate this fragment. */  			if (unlikely(err)) -				xen_netbk_idx_release(netbk, pending_idx); +				xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY);  			continue;  		}  		/* Error on this fragment: respond to client with an error. */ -		txp = &netbk->pending_tx_info[pending_idx].req; -		make_tx_response(vif, txp, XEN_NETIF_RSP_ERROR); -		index = pending_index(netbk->pending_prod++); -		netbk->pending_ring[index] = pending_idx; -		xenvif_put(vif); +		xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_ERROR);  		/* Not the first error? Preceding frags already invalidated. */  		if (err) @@ -1045,10 +1054,10 @@ static int xen_netbk_tx_check_gop(struct xen_netbk *netbk,  		/* First error: invalidate header and preceding fragments. */  		pending_idx = *((u16 *)skb->data); -		xen_netbk_idx_release(netbk, pending_idx); +		xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY);  		for (j = start; j < i; j++) {  			pending_idx = frag_get_pending_idx(&shinfo->frags[j]); -			xen_netbk_idx_release(netbk, pending_idx); +			xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY);  		}  		/* Remember the error: invalidate all subsequent fragments. */ @@ -1082,7 +1091,7 @@ static void xen_netbk_fill_frags(struct xen_netbk *netbk, struct sk_buff *skb)  		/* Take an extra reference to offset xen_netbk_idx_release */  		get_page(netbk->mmap_pages[pending_idx]); -		xen_netbk_idx_release(netbk, pending_idx); +		xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY);  	}  } @@ -1095,7 +1104,8 @@ static int xen_netbk_get_extras(struct xenvif *vif,  	do {  		if (unlikely(work_to_do-- <= 0)) { -			netdev_dbg(vif->dev, "Missing extra info\n"); +			netdev_err(vif->dev, "Missing extra info\n"); +			netbk_fatal_tx_err(vif);  			return -EBADR;  		} @@ -1104,8 +1114,9 @@ static int xen_netbk_get_extras(struct xenvif *vif,  		if (unlikely(!extra.type ||  			     extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) {  			vif->tx.req_cons = ++cons; -			netdev_dbg(vif->dev, +			netdev_err(vif->dev,  				   "Invalid extra type: %d\n", extra.type); +			netbk_fatal_tx_err(vif);  			return -EINVAL;  		} @@ -1121,13 +1132,15 @@ static int netbk_set_skb_gso(struct xenvif *vif,  			     struct xen_netif_extra_info *gso)  {  	if (!gso->u.gso.size) { -		netdev_dbg(vif->dev, "GSO size must not be zero.\n"); +		netdev_err(vif->dev, "GSO size must not be zero.\n"); +		netbk_fatal_tx_err(vif);  		return -EINVAL;  	}  	/* Currently only TCPv4 S.O. is supported. */  	if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4) { -		netdev_dbg(vif->dev, "Bad GSO type %d.\n", gso->u.gso.type); +		netdev_err(vif->dev, "Bad GSO type %d.\n", gso->u.gso.type); +		netbk_fatal_tx_err(vif);  		return -EINVAL;  	} @@ -1264,9 +1277,25 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)  		/* Get a netif from the list with work to do. */  		vif = poll_net_schedule_list(netbk); +		/* This can sometimes happen because the test of +		 * list_empty(net_schedule_list) at the top of the +		 * loop is unlocked.  Just go back and have another +		 * look. +		 */  		if (!vif)  			continue; +		if (vif->tx.sring->req_prod - vif->tx.req_cons > +		    XEN_NETIF_TX_RING_SIZE) { +			netdev_err(vif->dev, +				   "Impossible number of requests. " +				   "req_prod %d, req_cons %d, size %ld\n", +				   vif->tx.sring->req_prod, vif->tx.req_cons, +				   XEN_NETIF_TX_RING_SIZE); +			netbk_fatal_tx_err(vif); +			continue; +		} +  		RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, work_to_do);  		if (!work_to_do) {  			xenvif_put(vif); @@ -1294,17 +1323,14 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)  			work_to_do = xen_netbk_get_extras(vif, extras,  							  work_to_do);  			idx = vif->tx.req_cons; -			if (unlikely(work_to_do < 0)) { -				netbk_tx_err(vif, &txreq, idx); +			if (unlikely(work_to_do < 0))  				continue; -			}  		}  		ret = netbk_count_requests(vif, &txreq, txfrags, work_to_do); -		if (unlikely(ret < 0)) { -			netbk_tx_err(vif, &txreq, idx - ret); +		if (unlikely(ret < 0))  			continue; -		} +  		idx += ret;  		if (unlikely(txreq.size < ETH_HLEN)) { @@ -1316,11 +1342,11 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)  		/* No crossing a page as the payload mustn't fragment. */  		if (unlikely((txreq.offset + txreq.size) > PAGE_SIZE)) { -			netdev_dbg(vif->dev, +			netdev_err(vif->dev,  				   "txreq.offset: %x, size: %u, end: %lu\n",  				   txreq.offset, txreq.size,  				   (txreq.offset&~PAGE_MASK) + txreq.size); -			netbk_tx_err(vif, &txreq, idx); +			netbk_fatal_tx_err(vif);  			continue;  		} @@ -1348,8 +1374,8 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)  			gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];  			if (netbk_set_skb_gso(vif, skb, gso)) { +				/* Failure in netbk_set_skb_gso is fatal. */  				kfree_skb(skb); -				netbk_tx_err(vif, &txreq, idx);  				continue;  			}  		} @@ -1448,7 +1474,7 @@ static void xen_netbk_tx_submit(struct xen_netbk *netbk)  			txp->size -= data_len;  		} else {  			/* Schedule a response immediately. */ -			xen_netbk_idx_release(netbk, pending_idx); +			xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY);  		}  		if (txp->flags & XEN_NETTXF_csum_blank) @@ -1500,7 +1526,8 @@ static void xen_netbk_tx_action(struct xen_netbk *netbk)  	xen_netbk_tx_submit(netbk);  } -static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx) +static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx, +				  u8 status)  {  	struct xenvif *vif;  	struct pending_tx_info *pending_tx_info; @@ -1514,7 +1541,7 @@ static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx)  	vif = pending_tx_info->vif; -	make_tx_response(vif, &pending_tx_info->req, XEN_NETIF_RSP_OKAY); +	make_tx_response(vif, &pending_tx_info->req, status);  	index = pending_index(netbk->pending_prod++);  	netbk->pending_ring[index] = pending_idx; diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h index 26ffd3e3fb7..2c113de9432 100644 --- a/drivers/pci/hotplug/pciehp.h +++ b/drivers/pci/hotplug/pciehp.h @@ -44,7 +44,6 @@ extern bool pciehp_poll_mode;  extern int pciehp_poll_time;  extern bool pciehp_debug;  extern bool pciehp_force; -extern struct workqueue_struct *pciehp_wq;  #define dbg(format, arg...)						\  do {									\ @@ -78,6 +77,7 @@ struct slot {  	struct hotplug_slot *hotplug_slot;  	struct delayed_work work;	/* work for button event */  	struct mutex lock; +	struct workqueue_struct *wq;  };  struct event_info { diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c index 916bf4f53ab..939bd1d4b5b 100644 --- a/drivers/pci/hotplug/pciehp_core.c +++ b/drivers/pci/hotplug/pciehp_core.c @@ -42,7 +42,6 @@ bool pciehp_debug;  bool pciehp_poll_mode;  int pciehp_poll_time;  bool pciehp_force; -struct workqueue_struct *pciehp_wq;  #define DRIVER_VERSION	"0.4"  #define DRIVER_AUTHOR	"Dan Zink <dan.zink@compaq.com>, Greg Kroah-Hartman <greg@kroah.com>, Dely Sy <dely.l.sy@intel.com>" @@ -340,18 +339,13 @@ static int __init pcied_init(void)  {  	int retval = 0; -	pciehp_wq = alloc_workqueue("pciehp", 0, 0); -	if (!pciehp_wq) -		return -ENOMEM; -  	pciehp_firmware_init();  	retval = pcie_port_service_register(&hpdriver_portdrv);   	dbg("pcie_port_service_register = %d\n", retval);    	info(DRIVER_DESC " version: " DRIVER_VERSION "\n"); - 	if (retval) { -		destroy_workqueue(pciehp_wq); +	if (retval)  		dbg("Failure to register service\n"); -	} +  	return retval;  } @@ -359,7 +353,6 @@ static void __exit pcied_cleanup(void)  {  	dbg("unload_pciehpd()\n");  	pcie_port_service_unregister(&hpdriver_portdrv); -	destroy_workqueue(pciehp_wq);  	info(DRIVER_DESC " version: " DRIVER_VERSION " unloaded\n");  } diff --git a/drivers/pci/hotplug/pciehp_ctrl.c b/drivers/pci/hotplug/pciehp_ctrl.c index 27f44295a65..38f01867917 100644 --- a/drivers/pci/hotplug/pciehp_ctrl.c +++ b/drivers/pci/hotplug/pciehp_ctrl.c @@ -49,7 +49,7 @@ static int queue_interrupt_event(struct slot *p_slot, u32 event_type)  	info->p_slot = p_slot;  	INIT_WORK(&info->work, interrupt_event_handler); -	queue_work(pciehp_wq, &info->work); +	queue_work(p_slot->wq, &info->work);  	return 0;  } @@ -344,7 +344,7 @@ void pciehp_queue_pushbutton_work(struct work_struct *work)  		kfree(info);  		goto out;  	} -	queue_work(pciehp_wq, &info->work); +	queue_work(p_slot->wq, &info->work);   out:  	mutex_unlock(&p_slot->lock);  } @@ -377,7 +377,7 @@ static void handle_button_press_event(struct slot *p_slot)  		if (ATTN_LED(ctrl))  			pciehp_set_attention_status(p_slot, 0); -		queue_delayed_work(pciehp_wq, &p_slot->work, 5*HZ); +		queue_delayed_work(p_slot->wq, &p_slot->work, 5*HZ);  		break;  	case BLINKINGOFF_STATE:  	case BLINKINGON_STATE: @@ -439,7 +439,7 @@ static void handle_surprise_event(struct slot *p_slot)  	else  		p_slot->state = POWERON_STATE; -	queue_work(pciehp_wq, &info->work); +	queue_work(p_slot->wq, &info->work);  }  static void interrupt_event_handler(struct work_struct *work) diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c index 13b2eaf7ba4..5127f3f4182 100644 --- a/drivers/pci/hotplug/pciehp_hpc.c +++ b/drivers/pci/hotplug/pciehp_hpc.c @@ -773,23 +773,32 @@ static void pcie_shutdown_notification(struct controller *ctrl)  static int pcie_init_slot(struct controller *ctrl)  {  	struct slot *slot; +	char name[32];  	slot = kzalloc(sizeof(*slot), GFP_KERNEL);  	if (!slot)  		return -ENOMEM; +	snprintf(name, sizeof(name), "pciehp-%u", PSN(ctrl)); +	slot->wq = alloc_workqueue(name, 0, 0); +	if (!slot->wq) +		goto abort; +  	slot->ctrl = ctrl;  	mutex_init(&slot->lock);  	INIT_DELAYED_WORK(&slot->work, pciehp_queue_pushbutton_work);  	ctrl->slot = slot;  	return 0; +abort: +	kfree(slot); +	return -ENOMEM;  }  static void pcie_cleanup_slot(struct controller *ctrl)  {  	struct slot *slot = ctrl->slot;  	cancel_delayed_work(&slot->work); -	flush_workqueue(pciehp_wq); +	destroy_workqueue(slot->wq);  	kfree(slot);  } diff --git a/drivers/pci/hotplug/shpchp.h b/drivers/pci/hotplug/shpchp.h index ca64932e658..b849f995075 100644 --- a/drivers/pci/hotplug/shpchp.h +++ b/drivers/pci/hotplug/shpchp.h @@ -46,8 +46,6 @@  extern bool shpchp_poll_mode;  extern int shpchp_poll_time;  extern bool shpchp_debug; -extern struct workqueue_struct *shpchp_wq; -extern struct workqueue_struct *shpchp_ordered_wq;  #define dbg(format, arg...)						\  do {									\ @@ -91,6 +89,7 @@ struct slot {  	struct list_head	slot_list;  	struct delayed_work work;	/* work for button event */  	struct mutex lock; +	struct workqueue_struct *wq;  	u8 hp_slot;  }; diff --git a/drivers/pci/hotplug/shpchp_core.c b/drivers/pci/hotplug/shpchp_core.c index b6de307248e..3100c52c837 100644 --- a/drivers/pci/hotplug/shpchp_core.c +++ b/drivers/pci/hotplug/shpchp_core.c @@ -39,8 +39,6 @@  bool shpchp_debug;  bool shpchp_poll_mode;  int shpchp_poll_time; -struct workqueue_struct *shpchp_wq; -struct workqueue_struct *shpchp_ordered_wq;  #define DRIVER_VERSION	"0.4"  #define DRIVER_AUTHOR	"Dan Zink <dan.zink@compaq.com>, Greg Kroah-Hartman <greg@kroah.com>, Dely Sy <dely.l.sy@intel.com>" @@ -129,6 +127,14 @@ static int init_slots(struct controller *ctrl)  		slot->device = ctrl->slot_device_offset + i;  		slot->hpc_ops = ctrl->hpc_ops;  		slot->number = ctrl->first_slot + (ctrl->slot_num_inc * i); + +		snprintf(name, sizeof(name), "shpchp-%d", slot->number); +		slot->wq = alloc_workqueue(name, 0, 0); +		if (!slot->wq) { +			retval = -ENOMEM; +			goto error_info; +		} +  		mutex_init(&slot->lock);  		INIT_DELAYED_WORK(&slot->work, shpchp_queue_pushbutton_work); @@ -148,7 +154,7 @@ static int init_slots(struct controller *ctrl)  		if (retval) {  			ctrl_err(ctrl, "pci_hp_register failed with error %d\n",  				 retval); -			goto error_info; +			goto error_slotwq;  		}  		get_power_status(hotplug_slot, &info->power_status); @@ -160,6 +166,8 @@ static int init_slots(struct controller *ctrl)  	}  	return 0; +error_slotwq: +	destroy_workqueue(slot->wq);  error_info:  	kfree(info);  error_hpslot: @@ -180,8 +188,7 @@ void cleanup_slots(struct controller *ctrl)  		slot = list_entry(tmp, struct slot, slot_list);  		list_del(&slot->slot_list);  		cancel_delayed_work(&slot->work); -		flush_workqueue(shpchp_wq); -		flush_workqueue(shpchp_ordered_wq); +		destroy_workqueue(slot->wq);  		pci_hp_deregister(slot->hotplug_slot);  	}  } @@ -364,25 +371,12 @@ static struct pci_driver shpc_driver = {  static int __init shpcd_init(void)  { -	int retval = 0; - -	shpchp_wq = alloc_ordered_workqueue("shpchp", 0); -	if (!shpchp_wq) -		return -ENOMEM; - -	shpchp_ordered_wq = alloc_ordered_workqueue("shpchp_ordered", 0); -	if (!shpchp_ordered_wq) { -		destroy_workqueue(shpchp_wq); -		return -ENOMEM; -	} +	int retval;  	retval = pci_register_driver(&shpc_driver);  	dbg("%s: pci_register_driver = %d\n", __func__, retval);  	info(DRIVER_DESC " version: " DRIVER_VERSION "\n"); -	if (retval) { -		destroy_workqueue(shpchp_ordered_wq); -		destroy_workqueue(shpchp_wq); -	} +  	return retval;  } @@ -390,8 +384,6 @@ static void __exit shpcd_cleanup(void)  {  	dbg("unload_shpchpd()\n");  	pci_unregister_driver(&shpc_driver); -	destroy_workqueue(shpchp_ordered_wq); -	destroy_workqueue(shpchp_wq);  	info(DRIVER_DESC " version: " DRIVER_VERSION " unloaded\n");  } diff --git a/drivers/pci/hotplug/shpchp_ctrl.c b/drivers/pci/hotplug/shpchp_ctrl.c index f9b5a52e411..58499277903 100644 --- a/drivers/pci/hotplug/shpchp_ctrl.c +++ b/drivers/pci/hotplug/shpchp_ctrl.c @@ -51,7 +51,7 @@ static int queue_interrupt_event(struct slot *p_slot, u32 event_type)  	info->p_slot = p_slot;  	INIT_WORK(&info->work, interrupt_event_handler); -	queue_work(shpchp_wq, &info->work); +	queue_work(p_slot->wq, &info->work);  	return 0;  } @@ -453,7 +453,7 @@ void shpchp_queue_pushbutton_work(struct work_struct *work)  		kfree(info);  		goto out;  	} -	queue_work(shpchp_ordered_wq, &info->work); +	queue_work(p_slot->wq, &info->work);   out:  	mutex_unlock(&p_slot->lock);  } @@ -501,7 +501,7 @@ static void handle_button_press_event(struct slot *p_slot)  		p_slot->hpc_ops->green_led_blink(p_slot);  		p_slot->hpc_ops->set_attention_status(p_slot, 0); -		queue_delayed_work(shpchp_wq, &p_slot->work, 5*HZ); +		queue_delayed_work(p_slot->wq, &p_slot->work, 5*HZ);  		break;  	case BLINKINGOFF_STATE:  	case BLINKINGON_STATE: diff --git a/drivers/pci/pcie/Kconfig b/drivers/pci/pcie/Kconfig index 6c8bc580978..fde4a32a029 100644 --- a/drivers/pci/pcie/Kconfig +++ b/drivers/pci/pcie/Kconfig @@ -82,4 +82,4 @@ endchoice  config PCIE_PME  	def_bool y -	depends on PCIEPORTBUS && PM_RUNTIME && EXPERIMENTAL && ACPI +	depends on PCIEPORTBUS && PM_RUNTIME && ACPI diff --git a/drivers/pci/pcie/aer/aerdrv_core.c b/drivers/pci/pcie/aer/aerdrv_core.c index 421bbc5fee3..564d97f94b6 100644 --- a/drivers/pci/pcie/aer/aerdrv_core.c +++ b/drivers/pci/pcie/aer/aerdrv_core.c @@ -630,6 +630,7 @@ static void aer_recover_work_func(struct work_struct *work)  			continue;  		}  		do_recovery(pdev, entry.severity); +		pci_dev_put(pdev);  	}  }  #endif diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c index b52630b8ead..8474b6a4fc9 100644 --- a/drivers/pci/pcie/aspm.c +++ b/drivers/pci/pcie/aspm.c @@ -771,6 +771,9 @@ void pcie_clear_aspm(struct pci_bus *bus)  {  	struct pci_dev *child; +	if (aspm_force) +		return; +  	/*  	 * Clear any ASPM setup that the firmware has carried out on this bus  	 */ diff --git a/drivers/pci/remove.c b/drivers/pci/remove.c index 7c0fd9252e6..84954a726a9 100644 --- a/drivers/pci/remove.c +++ b/drivers/pci/remove.c @@ -19,6 +19,8 @@ static void pci_free_resources(struct pci_dev *dev)  static void pci_stop_dev(struct pci_dev *dev)  { +	pci_pme_active(dev, false); +  	if (dev->is_added) {  		pci_proc_detach_device(dev);  		pci_remove_sysfs_dev_files(dev); diff --git a/drivers/pinctrl/Kconfig b/drivers/pinctrl/Kconfig index c31aeb01bb0..a5f3c8ca480 100644 --- a/drivers/pinctrl/Kconfig +++ b/drivers/pinctrl/Kconfig @@ -181,12 +181,11 @@ config PINCTRL_COH901  config PINCTRL_SAMSUNG  	bool -	depends on OF && GPIOLIB  	select PINMUX  	select PINCONF -config PINCTRL_EXYNOS4 -	bool "Pinctrl driver data for Exynos4 SoC" +config PINCTRL_EXYNOS +	bool "Pinctrl driver data for Samsung EXYNOS SoCs"  	depends on OF && GPIOLIB  	select PINCTRL_SAMSUNG diff --git a/drivers/pinctrl/Makefile b/drivers/pinctrl/Makefile index fc4606f27dc..6e87e52eab5 100644 --- a/drivers/pinctrl/Makefile +++ b/drivers/pinctrl/Makefile @@ -36,7 +36,7 @@ obj-$(CONFIG_PINCTRL_TEGRA30)	+= pinctrl-tegra30.o  obj-$(CONFIG_PINCTRL_U300)	+= pinctrl-u300.o  obj-$(CONFIG_PINCTRL_COH901)	+= pinctrl-coh901.o  obj-$(CONFIG_PINCTRL_SAMSUNG)	+= pinctrl-samsung.o -obj-$(CONFIG_PINCTRL_EXYNOS4)	+= pinctrl-exynos.o +obj-$(CONFIG_PINCTRL_EXYNOS)	+= pinctrl-exynos.o  obj-$(CONFIG_PINCTRL_EXYNOS5440)	+= pinctrl-exynos5440.o  obj-$(CONFIG_PINCTRL_XWAY)	+= pinctrl-xway.o  obj-$(CONFIG_PINCTRL_LANTIQ)	+= pinctrl-lantiq.o diff --git a/drivers/pinctrl/mvebu/pinctrl-dove.c b/drivers/pinctrl/mvebu/pinctrl-dove.c index 69aba369728..428ea96a94d 100644 --- a/drivers/pinctrl/mvebu/pinctrl-dove.c +++ b/drivers/pinctrl/mvebu/pinctrl-dove.c @@ -588,7 +588,7 @@ static int dove_pinctrl_probe(struct platform_device *pdev)  {  	const struct of_device_id *match =  		of_match_device(dove_pinctrl_of_match, &pdev->dev); -	pdev->dev.platform_data = match->data; +	pdev->dev.platform_data = (void *)match->data;  	/*  	 * General MPP Configuration Register is part of pdma registers. diff --git a/drivers/pinctrl/mvebu/pinctrl-kirkwood.c b/drivers/pinctrl/mvebu/pinctrl-kirkwood.c index f12084e1805..cdd483df673 100644 --- a/drivers/pinctrl/mvebu/pinctrl-kirkwood.c +++ b/drivers/pinctrl/mvebu/pinctrl-kirkwood.c @@ -66,9 +66,9 @@ static struct mvebu_mpp_mode mv88f6xxx_mpp_modes[] = {  		MPP_VAR_FUNCTION(0x5, "sata0", "act",    V(0, 1, 1, 1, 1, 0)),  		MPP_VAR_FUNCTION(0xb, "lcd", "vsync",    V(0, 0, 0, 0, 1, 0))),  	MPP_MODE(6, -		MPP_VAR_FUNCTION(0x0, "sysrst", "out",   V(1, 1, 1, 1, 1, 1)), -		MPP_VAR_FUNCTION(0x1, "spi", "mosi",     V(1, 1, 1, 1, 1, 1)), -		MPP_VAR_FUNCTION(0x2, "ptp", "trig",     V(1, 1, 1, 1, 0, 0))), +		MPP_VAR_FUNCTION(0x1, "sysrst", "out",   V(1, 1, 1, 1, 1, 1)), +		MPP_VAR_FUNCTION(0x2, "spi", "mosi",     V(1, 1, 1, 1, 1, 1)), +		MPP_VAR_FUNCTION(0x3, "ptp", "trig",     V(1, 1, 1, 1, 0, 0))),  	MPP_MODE(7,  		MPP_VAR_FUNCTION(0x0, "gpo", NULL,       V(1, 1, 1, 1, 1, 1)),  		MPP_VAR_FUNCTION(0x1, "pex", "rsto",     V(1, 1, 1, 1, 0, 1)), @@ -458,7 +458,7 @@ static int kirkwood_pinctrl_probe(struct platform_device *pdev)  {  	const struct of_device_id *match =  		of_match_device(kirkwood_pinctrl_of_match, &pdev->dev); -	pdev->dev.platform_data = match->data; +	pdev->dev.platform_data = (void *)match->data;  	return mvebu_pinctrl_probe(pdev);  } diff --git a/drivers/pinctrl/pinctrl-exynos5440.c b/drivers/pinctrl/pinctrl-exynos5440.c index de05b64f0da..142729914c3 100644 --- a/drivers/pinctrl/pinctrl-exynos5440.c +++ b/drivers/pinctrl/pinctrl-exynos5440.c @@ -599,7 +599,7 @@ static int exynos5440_gpio_direction_output(struct gpio_chip *gc, unsigned offse  }  /* parse the pin numbers listed in the 'samsung,exynos5440-pins' property */ -static int __init exynos5440_pinctrl_parse_dt_pins(struct platform_device *pdev, +static int exynos5440_pinctrl_parse_dt_pins(struct platform_device *pdev,  			struct device_node *cfg_np, unsigned int **pin_list,  			unsigned int *npins)  { @@ -630,7 +630,7 @@ static int __init exynos5440_pinctrl_parse_dt_pins(struct platform_device *pdev,   * Parse the information about all the available pin groups and pin functions   * from device node of the pin-controller.   */ -static int __init exynos5440_pinctrl_parse_dt(struct platform_device *pdev, +static int exynos5440_pinctrl_parse_dt(struct platform_device *pdev,  				struct exynos5440_pinctrl_priv_data *priv)  {  	struct device *dev = &pdev->dev; @@ -723,7 +723,7 @@ static int __init exynos5440_pinctrl_parse_dt(struct platform_device *pdev,  }  /* register the pinctrl interface with the pinctrl subsystem */ -static int __init exynos5440_pinctrl_register(struct platform_device *pdev, +static int exynos5440_pinctrl_register(struct platform_device *pdev,  				struct exynos5440_pinctrl_priv_data *priv)  {  	struct device *dev = &pdev->dev; @@ -798,7 +798,7 @@ static int __init exynos5440_pinctrl_register(struct platform_device *pdev,  }  /* register the gpiolib interface with the gpiolib subsystem */ -static int __init exynos5440_gpiolib_register(struct platform_device *pdev, +static int exynos5440_gpiolib_register(struct platform_device *pdev,  				struct exynos5440_pinctrl_priv_data *priv)  {  	struct gpio_chip *gc; @@ -831,7 +831,7 @@ static int __init exynos5440_gpiolib_register(struct platform_device *pdev,  }  /* unregister the gpiolib interface with the gpiolib subsystem */ -static int __init exynos5440_gpiolib_unregister(struct platform_device *pdev, +static int exynos5440_gpiolib_unregister(struct platform_device *pdev,  				struct exynos5440_pinctrl_priv_data *priv)  {  	int ret = gpiochip_remove(priv->gc); diff --git a/drivers/pinctrl/pinctrl-mxs.c b/drivers/pinctrl/pinctrl-mxs.c index dd227d21dcf..23af9f1f9c3 100644 --- a/drivers/pinctrl/pinctrl-mxs.c +++ b/drivers/pinctrl/pinctrl-mxs.c @@ -146,7 +146,7 @@ free:  static void mxs_dt_free_map(struct pinctrl_dev *pctldev,  			    struct pinctrl_map *map, unsigned num_maps)  { -	int i; +	u32 i;  	for (i = 0; i < num_maps; i++) {  		if (map[i].type == PIN_MAP_TYPE_MUX_GROUP) @@ -203,7 +203,7 @@ static int mxs_pinctrl_enable(struct pinctrl_dev *pctldev, unsigned selector,  	void __iomem *reg;  	u8 bank, shift;  	u16 pin; -	int i; +	u32 i;  	for (i = 0; i < g->npins; i++) {  		bank = PINID_TO_BANK(g->pins[i]); @@ -256,7 +256,7 @@ static int mxs_pinconf_group_set(struct pinctrl_dev *pctldev,  	void __iomem *reg;  	u8 ma, vol, pull, bank, shift;  	u16 pin; -	int i; +	u32 i;  	ma = CONFIG_TO_MA(config);  	vol = CONFIG_TO_VOL(config); @@ -345,8 +345,7 @@ static int mxs_pinctrl_parse_group(struct platform_device *pdev,  	const char *propname = "fsl,pinmux-ids";  	char *group;  	int length = strlen(np->name) + SUFFIX_LEN; -	int i; -	u32 val; +	u32 val, i;  	group = devm_kzalloc(&pdev->dev, length, GFP_KERNEL);  	if (!group) diff --git a/drivers/pinctrl/pinctrl-nomadik.c b/drivers/pinctrl/pinctrl-nomadik.c index 1bb16ffb4e4..5767b18ebdf 100644 --- a/drivers/pinctrl/pinctrl-nomadik.c +++ b/drivers/pinctrl/pinctrl-nomadik.c @@ -676,7 +676,7 @@ int nmk_gpio_set_mode(int gpio, int gpio_mode)  }  EXPORT_SYMBOL(nmk_gpio_set_mode); -static int nmk_prcm_gpiocr_get_mode(struct pinctrl_dev *pctldev, int gpio) +static int __maybe_unused nmk_prcm_gpiocr_get_mode(struct pinctrl_dev *pctldev, int gpio)  {  	int i;  	u16 reg; diff --git a/drivers/pinctrl/pinctrl-single.c b/drivers/pinctrl/pinctrl-single.c index f6a360b86eb..5c32e880bcb 100644 --- a/drivers/pinctrl/pinctrl-single.c +++ b/drivers/pinctrl/pinctrl-single.c @@ -30,7 +30,6 @@  #define PCS_MUX_BITS_NAME		"pinctrl-single,bits"  #define PCS_REG_NAME_LEN		((sizeof(unsigned long) * 2) + 1)  #define PCS_OFF_DISABLED		~0U -#define PCS_MAX_GPIO_VALUES		2  /**   * struct pcs_pingroup - pingroups for a function @@ -78,16 +77,6 @@ struct pcs_function {  };  /** - * struct pcs_gpio_range - pinctrl gpio range - * @range:	subrange of the GPIO number space - * @gpio_func:	gpio function value in the pinmux register - */ -struct pcs_gpio_range { -	struct pinctrl_gpio_range range; -	int gpio_func; -}; - -/**   * struct pcs_data - wrapper for data needed by pinctrl framework   * @pa:		pindesc array   * @cur:	index to current element @@ -414,26 +403,9 @@ static void pcs_disable(struct pinctrl_dev *pctldev, unsigned fselector,  }  static int pcs_request_gpio(struct pinctrl_dev *pctldev, -			    struct pinctrl_gpio_range *range, unsigned pin) +			struct pinctrl_gpio_range *range, unsigned offset)  { -	struct pcs_device *pcs = pinctrl_dev_get_drvdata(pctldev); -	struct pcs_gpio_range *gpio = NULL; -	int end, mux_bytes; -	unsigned data; - -	gpio = container_of(range, struct pcs_gpio_range, range); -	end = range->pin_base + range->npins - 1; -	if (pin < range->pin_base || pin > end) { -		dev_err(pctldev->dev, -			"pin %d isn't in the range of %d to %d\n", -			pin, range->pin_base, end); -		return -EINVAL; -	} -	mux_bytes = pcs->width / BITS_PER_BYTE; -	data = pcs->read(pcs->base + pin * mux_bytes) & ~pcs->fmask; -	data |= gpio->gpio_func; -	pcs->write(data, pcs->base + pin * mux_bytes); -	return 0; +	return -ENOTSUPP;  }  static struct pinmux_ops pcs_pinmux_ops = { @@ -907,49 +879,6 @@ static void pcs_free_resources(struct pcs_device *pcs)  static struct of_device_id pcs_of_match[]; -static int pcs_add_gpio_range(struct device_node *node, struct pcs_device *pcs) -{ -	struct pcs_gpio_range *gpio; -	struct device_node *child; -	struct resource r; -	const char name[] = "pinctrl-single"; -	u32 gpiores[PCS_MAX_GPIO_VALUES]; -	int ret, i = 0, mux_bytes = 0; - -	for_each_child_of_node(node, child) { -		ret = of_address_to_resource(child, 0, &r); -		if (ret < 0) -			continue; -		memset(gpiores, 0, sizeof(u32) * PCS_MAX_GPIO_VALUES); -		ret = of_property_read_u32_array(child, "pinctrl-single,gpio", -						 gpiores, PCS_MAX_GPIO_VALUES); -		if (ret < 0) -			continue; -		gpio = devm_kzalloc(pcs->dev, sizeof(*gpio), GFP_KERNEL); -		if (!gpio) { -			dev_err(pcs->dev, "failed to allocate pcs gpio\n"); -			return -ENOMEM; -		} -		gpio->range.name = devm_kzalloc(pcs->dev, sizeof(name), -						GFP_KERNEL); -		if (!gpio->range.name) { -			dev_err(pcs->dev, "failed to allocate range name\n"); -			return -ENOMEM; -		} -		memcpy((char *)gpio->range.name, name, sizeof(name)); - -		gpio->range.id = i++; -		gpio->range.base = gpiores[0]; -		gpio->gpio_func = gpiores[1]; -		mux_bytes = pcs->width / BITS_PER_BYTE; -		gpio->range.pin_base = (r.start - pcs->res->start) / mux_bytes; -		gpio->range.npins = (r.end - r.start) / mux_bytes + 1; - -		pinctrl_add_gpio_range(pcs->pctl, &gpio->range); -	} -	return 0; -} -  static int pcs_probe(struct platform_device *pdev)  {  	struct device_node *np = pdev->dev.of_node; @@ -1046,10 +975,6 @@ static int pcs_probe(struct platform_device *pdev)  		goto free;  	} -	ret = pcs_add_gpio_range(np, pcs); -	if (ret < 0) -		goto free; -  	dev_info(pcs->dev, "%i pins at pa %p size %u\n",  		 pcs->desc.npins, pcs->base, pcs->size); diff --git a/drivers/pinctrl/pinctrl-sirf.c b/drivers/pinctrl/pinctrl-sirf.c index 498b2ba905d..d02498b30c6 100644 --- a/drivers/pinctrl/pinctrl-sirf.c +++ b/drivers/pinctrl/pinctrl-sirf.c @@ -1246,6 +1246,22 @@ static void __iomem *sirfsoc_rsc_of_iomap(void)  	return of_iomap(np, 0);  } +static int sirfsoc_gpio_of_xlate(struct gpio_chip *gc, +       const struct of_phandle_args *gpiospec, +       u32 *flags) +{ +       if (gpiospec->args[0] > SIRFSOC_GPIO_NO_OF_BANKS * SIRFSOC_GPIO_BANK_SIZE) +               return -EINVAL; + +       if (gc != &sgpio_bank[gpiospec->args[0] / SIRFSOC_GPIO_BANK_SIZE].chip.gc) +               return -EINVAL; + +       if (flags) +               *flags = gpiospec->args[1]; + +       return gpiospec->args[0] % SIRFSOC_GPIO_BANK_SIZE; +} +  static int sirfsoc_pinmux_probe(struct platform_device *pdev)  {  	int ret; @@ -1736,6 +1752,8 @@ static int sirfsoc_gpio_probe(struct device_node *np)  		bank->chip.gc.ngpio = SIRFSOC_GPIO_BANK_SIZE;  		bank->chip.gc.label = kstrdup(np->full_name, GFP_KERNEL);  		bank->chip.gc.of_node = np; +		bank->chip.gc.of_xlate = sirfsoc_gpio_of_xlate; +		bank->chip.gc.of_gpio_n_cells = 2;  		bank->chip.regs = regs;  		bank->id = i;  		bank->is_marco = is_marco; diff --git a/drivers/platform/x86/ibm_rtl.c b/drivers/platform/x86/ibm_rtl.c index 7481146a5b4..97c2be195ef 100644 --- a/drivers/platform/x86/ibm_rtl.c +++ b/drivers/platform/x86/ibm_rtl.c @@ -244,7 +244,7 @@ static int __init ibm_rtl_init(void) {  	if (force)  		pr_warn("module loaded by force\n");  	/* first ensure that we are running on IBM HW */ -	else if (efi_enabled || !dmi_check_system(ibm_rtl_dmi_table)) +	else if (efi_enabled(EFI_BOOT) || !dmi_check_system(ibm_rtl_dmi_table))  		return -ENODEV;  	/* Get the address for the Extended BIOS Data Area */ diff --git a/drivers/platform/x86/samsung-laptop.c b/drivers/platform/x86/samsung-laptop.c index 71623a2ff3e..d1f03005317 100644 --- a/drivers/platform/x86/samsung-laptop.c +++ b/drivers/platform/x86/samsung-laptop.c @@ -26,6 +26,7 @@  #include <linux/seq_file.h>  #include <linux/debugfs.h>  #include <linux/ctype.h> +#include <linux/efi.h>  #include <acpi/video.h>  /* @@ -1544,6 +1545,9 @@ static int __init samsung_init(void)  	struct samsung_laptop *samsung;  	int ret; +	if (efi_enabled(EFI_BOOT)) +		return -ENODEV; +  	quirks = &samsung_unknown;  	if (!force && !dmi_check_system(samsung_dmi_table))  		return -ENODEV; diff --git a/drivers/regulator/dbx500-prcmu.c b/drivers/regulator/dbx500-prcmu.c index 261f3d2299b..89bd2faaef8 100644 --- a/drivers/regulator/dbx500-prcmu.c +++ b/drivers/regulator/dbx500-prcmu.c @@ -14,6 +14,7 @@  #include <linux/debugfs.h>  #include <linux/seq_file.h>  #include <linux/slab.h> +#include <linux/module.h>  #include "dbx500-prcmu.h" diff --git a/drivers/regulator/max77686.c b/drivers/regulator/max77686.c index b85040caaea..cca18a3c029 100644 --- a/drivers/regulator/max77686.c +++ b/drivers/regulator/max77686.c @@ -379,9 +379,10 @@ static struct regulator_desc regulators[] = {  };  #ifdef CONFIG_OF -static int max77686_pmic_dt_parse_pdata(struct max77686_dev *iodev, +static int max77686_pmic_dt_parse_pdata(struct platform_device *pdev,  					struct max77686_platform_data *pdata)  { +	struct max77686_dev *iodev = dev_get_drvdata(pdev->dev.parent);  	struct device_node *pmic_np, *regulators_np;  	struct max77686_regulator_data *rdata;  	struct of_regulator_match rmatch; @@ -390,15 +391,15 @@ static int max77686_pmic_dt_parse_pdata(struct max77686_dev *iodev,  	pmic_np = iodev->dev->of_node;  	regulators_np = of_find_node_by_name(pmic_np, "voltage-regulators");  	if (!regulators_np) { -		dev_err(iodev->dev, "could not find regulators sub-node\n"); +		dev_err(&pdev->dev, "could not find regulators sub-node\n");  		return -EINVAL;  	}  	pdata->num_regulators = ARRAY_SIZE(regulators); -	rdata = devm_kzalloc(iodev->dev, sizeof(*rdata) * +	rdata = devm_kzalloc(&pdev->dev, sizeof(*rdata) *  			     pdata->num_regulators, GFP_KERNEL);  	if (!rdata) { -		dev_err(iodev->dev, +		dev_err(&pdev->dev,  			"could not allocate memory for regulator data\n");  		return -ENOMEM;  	} @@ -407,7 +408,7 @@ static int max77686_pmic_dt_parse_pdata(struct max77686_dev *iodev,  		rmatch.name = regulators[i].name;  		rmatch.init_data = NULL;  		rmatch.of_node = NULL; -		of_regulator_match(iodev->dev, regulators_np, &rmatch, 1); +		of_regulator_match(&pdev->dev, regulators_np, &rmatch, 1);  		rdata[i].initdata = rmatch.init_data;  		rdata[i].of_node = rmatch.of_node;  	} @@ -417,7 +418,7 @@ static int max77686_pmic_dt_parse_pdata(struct max77686_dev *iodev,  	return 0;  }  #else -static int max77686_pmic_dt_parse_pdata(struct max77686_dev *iodev, +static int max77686_pmic_dt_parse_pdata(struct platform_device *pdev,  					struct max77686_platform_data *pdata)  {  	return 0; @@ -440,7 +441,7 @@ static int max77686_pmic_probe(struct platform_device *pdev)  	}  	if (iodev->dev->of_node) { -		ret = max77686_pmic_dt_parse_pdata(iodev, pdata); +		ret = max77686_pmic_dt_parse_pdata(pdev, pdata);  		if (ret)  			return ret;  	} diff --git a/drivers/regulator/max8907-regulator.c b/drivers/regulator/max8907-regulator.c index d1a77512d83..d40cf7fdb54 100644 --- a/drivers/regulator/max8907-regulator.c +++ b/drivers/regulator/max8907-regulator.c @@ -237,8 +237,7 @@ static int max8907_regulator_parse_dt(struct platform_device *pdev)  		return -EINVAL;  	} -	ret = of_regulator_match(pdev->dev.parent, regulators, -				 max8907_matches, +	ret = of_regulator_match(&pdev->dev, regulators, max8907_matches,  				 ARRAY_SIZE(max8907_matches));  	if (ret < 0) {  		dev_err(&pdev->dev, "Error parsing regulator init data: %d\n", diff --git a/drivers/regulator/max8997.c b/drivers/regulator/max8997.c index 02be7fcae32..836908ce505 100644 --- a/drivers/regulator/max8997.c +++ b/drivers/regulator/max8997.c @@ -934,7 +934,7 @@ static struct regulator_desc regulators[] = {  };  #ifdef CONFIG_OF -static int max8997_pmic_dt_parse_dvs_gpio(struct max8997_dev *iodev, +static int max8997_pmic_dt_parse_dvs_gpio(struct platform_device *pdev,  			struct max8997_platform_data *pdata,  			struct device_node *pmic_np)  { @@ -944,7 +944,7 @@ static int max8997_pmic_dt_parse_dvs_gpio(struct max8997_dev *iodev,  		gpio = of_get_named_gpio(pmic_np,  					"max8997,pmic-buck125-dvs-gpios", i);  		if (!gpio_is_valid(gpio)) { -			dev_err(iodev->dev, "invalid gpio[%d]: %d\n", i, gpio); +			dev_err(&pdev->dev, "invalid gpio[%d]: %d\n", i, gpio);  			return -EINVAL;  		}  		pdata->buck125_gpios[i] = gpio; @@ -952,22 +952,23 @@ static int max8997_pmic_dt_parse_dvs_gpio(struct max8997_dev *iodev,  	return 0;  } -static int max8997_pmic_dt_parse_pdata(struct max8997_dev *iodev, +static int max8997_pmic_dt_parse_pdata(struct platform_device *pdev,  					struct max8997_platform_data *pdata)  { +	struct max8997_dev *iodev = dev_get_drvdata(pdev->dev.parent);  	struct device_node *pmic_np, *regulators_np, *reg_np;  	struct max8997_regulator_data *rdata;  	unsigned int i, dvs_voltage_nr = 1, ret;  	pmic_np = iodev->dev->of_node;  	if (!pmic_np) { -		dev_err(iodev->dev, "could not find pmic sub-node\n"); +		dev_err(&pdev->dev, "could not find pmic sub-node\n");  		return -ENODEV;  	}  	regulators_np = of_find_node_by_name(pmic_np, "regulators");  	if (!regulators_np) { -		dev_err(iodev->dev, "could not find regulators sub-node\n"); +		dev_err(&pdev->dev, "could not find regulators sub-node\n");  		return -EINVAL;  	} @@ -976,11 +977,10 @@ static int max8997_pmic_dt_parse_pdata(struct max8997_dev *iodev,  	for_each_child_of_node(regulators_np, reg_np)  		pdata->num_regulators++; -	rdata = devm_kzalloc(iodev->dev, sizeof(*rdata) * +	rdata = devm_kzalloc(&pdev->dev, sizeof(*rdata) *  				pdata->num_regulators, GFP_KERNEL);  	if (!rdata) { -		dev_err(iodev->dev, "could not allocate memory for " -						"regulator data\n"); +		dev_err(&pdev->dev, "could not allocate memory for regulator data\n");  		return -ENOMEM;  	} @@ -991,14 +991,14 @@ static int max8997_pmic_dt_parse_pdata(struct max8997_dev *iodev,  				break;  		if (i == ARRAY_SIZE(regulators)) { -			dev_warn(iodev->dev, "don't know how to configure " -				"regulator %s\n", reg_np->name); +			dev_warn(&pdev->dev, "don't know how to configure regulator %s\n", +				 reg_np->name);  			continue;  		}  		rdata->id = i; -		rdata->initdata = of_get_regulator_init_data( -						iodev->dev, reg_np); +		rdata->initdata = of_get_regulator_init_data(&pdev->dev, +							     reg_np);  		rdata->reg_node = reg_np;  		rdata++;  	} @@ -1014,7 +1014,7 @@ static int max8997_pmic_dt_parse_pdata(struct max8997_dev *iodev,  	if (pdata->buck1_gpiodvs || pdata->buck2_gpiodvs ||  						pdata->buck5_gpiodvs) { -		ret = max8997_pmic_dt_parse_dvs_gpio(iodev, pdata, pmic_np); +		ret = max8997_pmic_dt_parse_dvs_gpio(pdev, pdata, pmic_np);  		if (ret)  			return -EINVAL; @@ -1025,8 +1025,7 @@ static int max8997_pmic_dt_parse_pdata(struct max8997_dev *iodev,  		} else {  			if (pdata->buck125_default_idx >= 8) {  				pdata->buck125_default_idx = 0; -				dev_info(iodev->dev, "invalid value for " -				"default dvs index, using 0 instead\n"); +				dev_info(&pdev->dev, "invalid value for default dvs index, using 0 instead\n");  			}  		} @@ -1040,28 +1039,28 @@ static int max8997_pmic_dt_parse_pdata(struct max8997_dev *iodev,  	if (of_property_read_u32_array(pmic_np,  				"max8997,pmic-buck1-dvs-voltage",  				pdata->buck1_voltage, dvs_voltage_nr)) { -		dev_err(iodev->dev, "buck1 voltages not specified\n"); +		dev_err(&pdev->dev, "buck1 voltages not specified\n");  		return -EINVAL;  	}  	if (of_property_read_u32_array(pmic_np,  				"max8997,pmic-buck2-dvs-voltage",  				pdata->buck2_voltage, dvs_voltage_nr)) { -		dev_err(iodev->dev, "buck2 voltages not specified\n"); +		dev_err(&pdev->dev, "buck2 voltages not specified\n");  		return -EINVAL;  	}  	if (of_property_read_u32_array(pmic_np,  				"max8997,pmic-buck5-dvs-voltage",  				pdata->buck5_voltage, dvs_voltage_nr)) { -		dev_err(iodev->dev, "buck5 voltages not specified\n"); +		dev_err(&pdev->dev, "buck5 voltages not specified\n");  		return -EINVAL;  	}  	return 0;  }  #else -static int max8997_pmic_dt_parse_pdata(struct max8997_dev *iodev, +static int max8997_pmic_dt_parse_pdata(struct platform_device *pdev,  					struct max8997_platform_data *pdata)  {  	return 0; @@ -1085,7 +1084,7 @@ static int max8997_pmic_probe(struct platform_device *pdev)  	}  	if (iodev->dev->of_node) { -		ret = max8997_pmic_dt_parse_pdata(iodev, pdata); +		ret = max8997_pmic_dt_parse_pdata(pdev, pdata);  		if (ret)  			return ret;  	} diff --git a/drivers/regulator/max8998.c b/drivers/regulator/max8998.c index 1f0df4046b8..0a8dd1cbee6 100644 --- a/drivers/regulator/max8998.c +++ b/drivers/regulator/max8998.c @@ -65,7 +65,7 @@ static const struct voltage_map_desc ldo9_voltage_map_desc = {  	.min = 2800000,	.step = 100000,	.max = 3100000,  };  static const struct voltage_map_desc ldo10_voltage_map_desc = { -	.min = 95000,	.step = 50000,	.max = 1300000, +	.min = 950000,	.step = 50000,	.max = 1300000,  };  static const struct voltage_map_desc ldo1213_voltage_map_desc = {  	.min = 800000,	.step = 100000,	.max = 3300000, diff --git a/drivers/regulator/of_regulator.c b/drivers/regulator/of_regulator.c index 6f684916fd7..66ca769287a 100644 --- a/drivers/regulator/of_regulator.c +++ b/drivers/regulator/of_regulator.c @@ -120,6 +120,12 @@ int of_regulator_match(struct device *dev, struct device_node *node,  	if (!dev || !node)  		return -EINVAL; +	for (i = 0; i < num_matches; i++) { +		struct of_regulator_match *match = &matches[i]; +		match->init_data = NULL; +		match->of_node = NULL; +	} +  	for_each_child_of_node(node, child) {  		name = of_get_property(child,  					"regulator-compatible", NULL); diff --git a/drivers/regulator/s2mps11.c b/drivers/regulator/s2mps11.c index bd062a2ffbe..cd9ea2ea182 100644 --- a/drivers/regulator/s2mps11.c +++ b/drivers/regulator/s2mps11.c @@ -174,9 +174,9 @@ static struct regulator_ops s2mps11_buck_ops = {  	.min_uV		= S2MPS11_BUCK_MIN2,			\  	.uV_step	= S2MPS11_BUCK_STEP2,			\  	.n_voltages	= S2MPS11_BUCK_N_VOLTAGES,		\ -	.vsel_reg	= S2MPS11_REG_B9CTRL2,			\ +	.vsel_reg	= S2MPS11_REG_B10CTRL2,			\  	.vsel_mask	= S2MPS11_BUCK_VSEL_MASK,		\ -	.enable_reg	= S2MPS11_REG_B9CTRL1,			\ +	.enable_reg	= S2MPS11_REG_B10CTRL1,			\  	.enable_mask	= S2MPS11_ENABLE_MASK			\  } diff --git a/drivers/regulator/tps65217-regulator.c b/drivers/regulator/tps65217-regulator.c index 73dce766412..df395187c06 100644 --- a/drivers/regulator/tps65217-regulator.c +++ b/drivers/regulator/tps65217-regulator.c @@ -305,8 +305,8 @@ static struct tps65217_board *tps65217_parse_dt(struct platform_device *pdev)  	if (!regs)  		return NULL; -	count = of_regulator_match(pdev->dev.parent, regs, -				reg_matches, TPS65217_NUM_REGULATOR); +	count = of_regulator_match(&pdev->dev, regs, reg_matches, +				   TPS65217_NUM_REGULATOR);  	of_node_put(regs);  	if ((count < 0) || (count > TPS65217_NUM_REGULATOR))  		return NULL; diff --git a/drivers/regulator/tps65910-regulator.c b/drivers/regulator/tps65910-regulator.c index 59c3770fa77..b0e4c0bc85c 100644 --- a/drivers/regulator/tps65910-regulator.c +++ b/drivers/regulator/tps65910-regulator.c @@ -998,7 +998,7 @@ static struct tps65910_board *tps65910_parse_dt_reg_data(  		return NULL;  	} -	ret = of_regulator_match(pdev->dev.parent, regulators, matches, count); +	ret = of_regulator_match(&pdev->dev, regulators, matches, count);  	if (ret < 0) {  		dev_err(&pdev->dev, "Error parsing regulator init data: %d\n",  			ret); diff --git a/drivers/regulator/tps80031-regulator.c b/drivers/regulator/tps80031-regulator.c index b15d711bc8c..9019d0e7ecb 100644 --- a/drivers/regulator/tps80031-regulator.c +++ b/drivers/regulator/tps80031-regulator.c @@ -728,7 +728,7 @@ static int tps80031_regulator_probe(struct platform_device *pdev)  			}  		}  		rdev = regulator_register(&ri->rinfo->desc, &config); -		if (IS_ERR_OR_NULL(rdev)) { +		if (IS_ERR(rdev)) {  			dev_err(&pdev->dev,  				"register regulator failed %s\n",  					ri->rinfo->desc.name); diff --git a/drivers/rtc/rtc-isl1208.c b/drivers/rtc/rtc-isl1208.c index afb7cfa85cc..c016ad81767 100644 --- a/drivers/rtc/rtc-isl1208.c +++ b/drivers/rtc/rtc-isl1208.c @@ -506,6 +506,7 @@ isl1208_rtc_interrupt(int irq, void *data)  {  	unsigned long timeout = jiffies + msecs_to_jiffies(1000);  	struct i2c_client *client = data; +	struct rtc_device *rtc = i2c_get_clientdata(client);  	int handled = 0, sr, err;  	/* @@ -528,6 +529,8 @@ isl1208_rtc_interrupt(int irq, void *data)  	if (sr & ISL1208_REG_SR_ALM) {  		dev_dbg(&client->dev, "alarm!\n"); +		rtc_update_irq(rtc, 1, RTC_IRQF | RTC_AF); +  		/* Clear the alarm */  		sr &= ~ISL1208_REG_SR_ALM;  		sr = i2c_smbus_write_byte_data(client, ISL1208_REG_SR, sr); diff --git a/drivers/rtc/rtc-pl031.c b/drivers/rtc/rtc-pl031.c index 08378e3cc21..81c5077feff 100644 --- a/drivers/rtc/rtc-pl031.c +++ b/drivers/rtc/rtc-pl031.c @@ -44,6 +44,7 @@  #define RTC_YMR		0x34	/* Year match register */  #define RTC_YLR		0x38	/* Year data load register */ +#define RTC_CR_EN	(1 << 0)	/* counter enable bit */  #define RTC_CR_CWEN	(1 << 26)	/* Clockwatch enable bit */  #define RTC_TCR_EN	(1 << 1) /* Periodic timer enable bit */ @@ -320,7 +321,7 @@ static int pl031_probe(struct amba_device *adev, const struct amba_id *id)  	struct pl031_local *ldata;  	struct pl031_vendor_data *vendor = id->data;  	struct rtc_class_ops *ops = &vendor->ops; -	unsigned long time; +	unsigned long time, data;  	ret = amba_request_regions(adev, NULL);  	if (ret) @@ -345,10 +346,13 @@ static int pl031_probe(struct amba_device *adev, const struct amba_id *id)  	dev_dbg(&adev->dev, "designer ID = 0x%02x\n", amba_manf(adev));  	dev_dbg(&adev->dev, "revision = 0x%01x\n", amba_rev(adev)); +	data = readl(ldata->base + RTC_CR);  	/* Enable the clockwatch on ST Variants */  	if (vendor->clockwatch) -		writel(readl(ldata->base + RTC_CR) | RTC_CR_CWEN, -		       ldata->base + RTC_CR); +		data |= RTC_CR_CWEN; +	else +		data |= RTC_CR_EN; +	writel(data, ldata->base + RTC_CR);  	/*  	 * On ST PL031 variants, the RTC reset value does not provide correct diff --git a/drivers/rtc/rtc-vt8500.c b/drivers/rtc/rtc-vt8500.c index 00c930f4b6f..2730533e2d2 100644 --- a/drivers/rtc/rtc-vt8500.c +++ b/drivers/rtc/rtc-vt8500.c @@ -137,7 +137,7 @@ static int vt8500_rtc_set_time(struct device *dev, struct rtc_time *tm)  		return -EINVAL;  	} -	writel((bin2bcd(tm->tm_year - 100) << DATE_YEAR_S) +	writel((bin2bcd(tm->tm_year % 100) << DATE_YEAR_S)  		| (bin2bcd(tm->tm_mon + 1) << DATE_MONTH_S)  		| (bin2bcd(tm->tm_mday))  		| ((tm->tm_year >= 200) << DATE_CENTURY_S), diff --git a/drivers/scsi/isci/init.c b/drivers/scsi/isci/init.c index d73fdcfeb45..2839baa82a5 100644 --- a/drivers/scsi/isci/init.c +++ b/drivers/scsi/isci/init.c @@ -633,7 +633,7 @@ static int isci_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)  		return -ENOMEM;  	pci_set_drvdata(pdev, pci_info); -	if (efi_enabled) +	if (efi_enabled(EFI_RUNTIME_SERVICES))  		orom = isci_get_efi_var(pdev);  	if (!orom) diff --git a/drivers/ssb/driver_gpio.c b/drivers/ssb/driver_gpio.c index 97ac0a38e3d..eb2753008ef 100644 --- a/drivers/ssb/driver_gpio.c +++ b/drivers/ssb/driver_gpio.c @@ -174,3 +174,15 @@ int ssb_gpio_init(struct ssb_bus *bus)  	return -1;  } + +int ssb_gpio_unregister(struct ssb_bus *bus) +{ +	if (ssb_chipco_available(&bus->chipco) || +	    ssb_extif_available(&bus->extif)) { +		return gpiochip_remove(&bus->gpio); +	} else { +		SSB_WARN_ON(1); +	} + +	return -1; +} diff --git a/drivers/ssb/main.c b/drivers/ssb/main.c index 772ad9b5c30..24dc331b470 100644 --- a/drivers/ssb/main.c +++ b/drivers/ssb/main.c @@ -443,6 +443,15 @@ static void ssb_devices_unregister(struct ssb_bus *bus)  void ssb_bus_unregister(struct ssb_bus *bus)  { +	int err; + +	err = ssb_gpio_unregister(bus); +	if (err == -EBUSY) +		ssb_dprintk(KERN_ERR PFX "Some GPIOs are still in use.\n"); +	else if (err) +		ssb_dprintk(KERN_ERR PFX +			    "Can not unregister GPIO driver: %i\n", err); +  	ssb_buses_lock();  	ssb_devices_unregister(bus);  	list_del(&bus->list); diff --git a/drivers/ssb/ssb_private.h b/drivers/ssb/ssb_private.h index 6c10b66c796..da38305a2d2 100644 --- a/drivers/ssb/ssb_private.h +++ b/drivers/ssb/ssb_private.h @@ -252,11 +252,16 @@ static inline void ssb_extif_init(struct ssb_extif *extif)  #ifdef CONFIG_SSB_DRIVER_GPIO  extern int ssb_gpio_init(struct ssb_bus *bus); +extern int ssb_gpio_unregister(struct ssb_bus *bus);  #else /* CONFIG_SSB_DRIVER_GPIO */  static inline int ssb_gpio_init(struct ssb_bus *bus)  {  	return -ENOTSUPP;  } +static inline int ssb_gpio_unregister(struct ssb_bus *bus) +{ +	return 0; +}  #endif /* CONFIG_SSB_DRIVER_GPIO */  #endif /* LINUX_SSB_PRIVATE_H_ */ diff --git a/drivers/staging/iio/adc/mxs-lradc.c b/drivers/staging/iio/adc/mxs-lradc.c index fb31b457a56..c5ceb9d90ea 100644 --- a/drivers/staging/iio/adc/mxs-lradc.c +++ b/drivers/staging/iio/adc/mxs-lradc.c @@ -239,7 +239,7 @@ static irqreturn_t mxs_lradc_trigger_handler(int irq, void *p)  	struct mxs_lradc *lradc = iio_priv(iio);  	const uint32_t chan_value = LRADC_CH_ACCUMULATE |  		((LRADC_DELAY_TIMER_LOOP - 1) << LRADC_CH_NUM_SAMPLES_OFFSET); -	int i, j = 0; +	unsigned int i, j = 0;  	for_each_set_bit(i, iio->active_scan_mask, iio->masklength) {  		lradc->buffer[j] = readl(lradc->base + LRADC_CH(j)); diff --git a/drivers/staging/iio/gyro/adis16080_core.c b/drivers/staging/iio/gyro/adis16080_core.c index 3525a68d6a7..41d7350d030 100644 --- a/drivers/staging/iio/gyro/adis16080_core.c +++ b/drivers/staging/iio/gyro/adis16080_core.c @@ -69,7 +69,7 @@ static int adis16080_spi_read(struct iio_dev *indio_dev,  	ret = spi_read(st->us, st->buf, 2);  	if (ret == 0) -		*val = ((st->buf[0] & 0xF) << 8) | st->buf[1]; +		*val = sign_extend32(((st->buf[0] & 0xF) << 8) | st->buf[1], 11);  	mutex_unlock(&st->buf_lock);  	return ret; diff --git a/drivers/staging/omapdrm/Kconfig b/drivers/staging/omapdrm/Kconfig index b724a413143..09f65dc3d2c 100644 --- a/drivers/staging/omapdrm/Kconfig +++ b/drivers/staging/omapdrm/Kconfig @@ -3,8 +3,8 @@ config DRM_OMAP  	tristate "OMAP DRM"  	depends on DRM && !CONFIG_FB_OMAP2  	depends on ARCH_OMAP2PLUS || ARCH_MULTIPLATFORM +	depends on OMAP2_DSS  	select DRM_KMS_HELPER -	select OMAP2_DSS  	select FB_SYS_FILLRECT  	select FB_SYS_COPYAREA  	select FB_SYS_IMAGEBLIT diff --git a/drivers/staging/sb105x/sb_pci_mp.c b/drivers/staging/sb105x/sb_pci_mp.c index 131afd0c460..9464f387434 100644 --- a/drivers/staging/sb105x/sb_pci_mp.c +++ b/drivers/staging/sb105x/sb_pci_mp.c @@ -3054,7 +3054,7 @@ static int init_mp_dev(struct pci_dev *pcidev, mppcibrd_t brd)  				sbdev->nr_ports = ((portnum_hex/16)*10) + (portnum_hex % 16);  			}  			break; -#ifdef CONFIG_PARPORT +#ifdef CONFIG_PARPORT_PC  		case PCI_DEVICE_ID_MP2S1P :  			sbdev->nr_ports = 2; diff --git a/drivers/staging/vt6656/bssdb.h b/drivers/staging/vt6656/bssdb.h index 6b2ec390e77..806cbf72fb5 100644 --- a/drivers/staging/vt6656/bssdb.h +++ b/drivers/staging/vt6656/bssdb.h @@ -90,7 +90,6 @@ typedef struct tagSRSNCapObject {  } SRSNCapObject, *PSRSNCapObject;  // BSS info(AP) -#pragma pack(1)  typedef struct tagKnownBSS {      // BSS info      BOOL            bActive; diff --git a/drivers/staging/vt6656/int.h b/drivers/staging/vt6656/int.h index 5d8faf9f96e..e0d2b07ba60 100644 --- a/drivers/staging/vt6656/int.h +++ b/drivers/staging/vt6656/int.h @@ -34,7 +34,6 @@  #include "device.h"  /*---------------------  Export Definitions -------------------------*/ -#pragma pack(1)  typedef struct tagSINTData {  	BYTE byTSR0;  	BYTE byPkt0; diff --git a/drivers/staging/vt6656/iocmd.h b/drivers/staging/vt6656/iocmd.h index 22710cef751..ae6e2d237b2 100644 --- a/drivers/staging/vt6656/iocmd.h +++ b/drivers/staging/vt6656/iocmd.h @@ -95,13 +95,12 @@ typedef enum tagWZONETYPE {  // Ioctl interface structure  // Command structure  // -#pragma pack(1)  typedef struct tagSCmdRequest {  	u8 name[16];  	void	*data;  	u16	    wResult;  	u16     wCmdCode; -} SCmdRequest, *PSCmdRequest; +} __packed SCmdRequest, *PSCmdRequest;  //  // Scan @@ -111,7 +110,7 @@ typedef struct tagSCmdScan {      u8	    ssid[SSID_MAXLEN + 2]; -} SCmdScan, *PSCmdScan; +} __packed SCmdScan, *PSCmdScan;  //  // BSS Join @@ -126,7 +125,7 @@ typedef struct tagSCmdBSSJoin {      BOOL    bPSEnable;      BOOL    bShareKeyAuth; -} SCmdBSSJoin, *PSCmdBSSJoin; +} __packed SCmdBSSJoin, *PSCmdBSSJoin;  //  // Zonetype Setting @@ -137,7 +136,7 @@ typedef struct tagSCmdZoneTypeSet {   BOOL       bWrite;   WZONETYPE  ZoneType; -} SCmdZoneTypeSet, *PSCmdZoneTypeSet; +} __packed SCmdZoneTypeSet, *PSCmdZoneTypeSet;  typedef struct tagSWPAResult {           char	ifname[100]; @@ -145,7 +144,7 @@ typedef struct tagSWPAResult {  	u8 key_mgmt;  	u8 eap_type;           BOOL authenticated; -} SWPAResult, *PSWPAResult; +} __packed SWPAResult, *PSWPAResult;  typedef struct tagSCmdStartAP { @@ -157,7 +156,7 @@ typedef struct tagSCmdStartAP {      BOOL    bShareKeyAuth;      u8      byBasicRate; -} SCmdStartAP, *PSCmdStartAP; +} __packed SCmdStartAP, *PSCmdStartAP;  typedef struct tagSCmdSetWEP { @@ -167,7 +166,7 @@ typedef struct tagSCmdSetWEP {      BOOL    bWepKeyAvailable[WEP_NKEYS];      u32     auWepKeyLength[WEP_NKEYS]; -} SCmdSetWEP, *PSCmdSetWEP; +} __packed SCmdSetWEP, *PSCmdSetWEP;  typedef struct tagSBSSIDItem { @@ -180,14 +179,14 @@ typedef struct tagSBSSIDItem {      BOOL    bWEPOn;      u32     uRSSI; -} SBSSIDItem; +} __packed SBSSIDItem;  typedef struct tagSBSSIDList {  	u32		    uItem;  	SBSSIDItem	sBSSIDList[0]; -} SBSSIDList, *PSBSSIDList; +} __packed SBSSIDList, *PSBSSIDList;  typedef struct tagSNodeItem { @@ -208,7 +207,7 @@ typedef struct tagSNodeItem {      u32            uTxAttempts;      u16            wFailureRatio; -} SNodeItem; +} __packed SNodeItem;  typedef struct tagSNodeList { @@ -216,7 +215,7 @@ typedef struct tagSNodeList {  	u32		    uItem;  	SNodeItem	sNodeList[0]; -} SNodeList, *PSNodeList; +} __packed SNodeList, *PSNodeList;  typedef struct tagSCmdLinkStatus { @@ -229,7 +228,7 @@ typedef struct tagSCmdLinkStatus {      u32     uChannel;      u32     uLinkRate; -} SCmdLinkStatus, *PSCmdLinkStatus; +} __packed SCmdLinkStatus, *PSCmdLinkStatus;  //  // 802.11 counter @@ -247,7 +246,7 @@ typedef struct tagSDot11MIBCount {      u32 ReceivedFragmentCount;      u32 MulticastReceivedFrameCount;      u32 FCSErrorCount; -} SDot11MIBCount, *PSDot11MIBCount; +} __packed SDot11MIBCount, *PSDot11MIBCount; @@ -355,13 +354,13 @@ typedef struct tagSStatMIBCount {      u32   ullTxBroadcastBytes[2];      u32   ullTxMulticastBytes[2];      u32   ullTxDirectedBytes[2]; -} SStatMIBCount, *PSStatMIBCount; +} __packed SStatMIBCount, *PSStatMIBCount;  typedef struct tagSCmdValue {      u32     dwValue; -} SCmdValue,  *PSCmdValue; +} __packed SCmdValue,  *PSCmdValue;  //  // hostapd & viawget ioctl related @@ -431,7 +430,7 @@ struct viawget_hostapd_param {  			u8 ssid[32];  		} scan_req;  	} u; -}; +} __packed;  /*---------------------  Export Classes  ----------------------------*/ diff --git a/drivers/staging/vt6656/iowpa.h b/drivers/staging/vt6656/iowpa.h index 959c8868f6e..2522ddec718 100644 --- a/drivers/staging/vt6656/iowpa.h +++ b/drivers/staging/vt6656/iowpa.h @@ -67,12 +67,11 @@ enum { -#pragma pack(1)  typedef struct viawget_wpa_header {  	u8 type;  	u16 req_ie_len;  	u16 resp_ie_len; -} viawget_wpa_header; +} __packed viawget_wpa_header;  struct viawget_wpa_param {  	u32 cmd; @@ -113,9 +112,8 @@ struct viawget_wpa_param {  			u8 *buf;  		} scan_results;  	} u; -}; +} __packed; -#pragma pack(1)  struct viawget_scan_result {  	u8 bssid[6];  	u8 ssid[32]; @@ -130,7 +128,7 @@ struct viawget_scan_result {  	int noise;  	int level;  	int maxrate; -}; +} __packed;  /*---------------------  Export Classes  ----------------------------*/ diff --git a/drivers/staging/wlan-ng/prism2mgmt.c b/drivers/staging/wlan-ng/prism2mgmt.c index 4efa9bc0fcf..89bfd858bb2 100644 --- a/drivers/staging/wlan-ng/prism2mgmt.c +++ b/drivers/staging/wlan-ng/prism2mgmt.c @@ -406,7 +406,7 @@ int prism2mgmt_scan_results(wlandevice_t *wlandev, void *msgp)  	/* SSID */  	req->ssid.status = P80211ENUM_msgitem_status_data_ok;  	req->ssid.data.len = le16_to_cpu(item->ssid.len); -	req->ssid.data.len = min_t(u16, req->ssid.data.len, WLAN_BSSID_LEN); +	req->ssid.data.len = min_t(u16, req->ssid.data.len, WLAN_SSID_MAXLEN);  	memcpy(req->ssid.data.data, item->ssid.data, req->ssid.data.len);  	/* supported rates */ diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c index e2695101bb9..f2aa7543d20 100644 --- a/drivers/target/target_core_device.c +++ b/drivers/target/target_core_device.c @@ -941,6 +941,8 @@ int se_dev_set_queue_depth(struct se_device *dev, u32 queue_depth)  int se_dev_set_fabric_max_sectors(struct se_device *dev, u32 fabric_max_sectors)  { +	int block_size = dev->dev_attrib.block_size; +  	if (dev->export_count) {  		pr_err("dev[%p]: Unable to change SE Device"  			" fabric_max_sectors while export_count is %d\n", @@ -978,8 +980,12 @@ int se_dev_set_fabric_max_sectors(struct se_device *dev, u32 fabric_max_sectors)  	/*  	 * Align max_sectors down to PAGE_SIZE to follow transport_allocate_data_tasks()  	 */ +	if (!block_size) { +		block_size = 512; +		pr_warn("Defaulting to 512 for zero block_size\n"); +	}  	fabric_max_sectors = se_dev_align_max_sectors(fabric_max_sectors, -						      dev->dev_attrib.block_size); +						      block_size);  	dev->dev_attrib.fabric_max_sectors = fabric_max_sectors;  	pr_debug("dev[%p]: SE Device max_sectors changed to %u\n", diff --git a/drivers/target/target_core_fabric_configfs.c b/drivers/target/target_core_fabric_configfs.c index 810263dfa4a..c57bbbc7a7d 100644 --- a/drivers/target/target_core_fabric_configfs.c +++ b/drivers/target/target_core_fabric_configfs.c @@ -754,6 +754,11 @@ static int target_fabric_port_link(  		return -EFAULT;  	} +	if (!(dev->dev_flags & DF_CONFIGURED)) { +		pr_err("se_device not configured yet, cannot port link\n"); +		return -ENODEV; +	} +  	tpg_ci = &lun_ci->ci_parent->ci_group->cg_item;  	se_tpg = container_of(to_config_group(tpg_ci),  				struct se_portal_group, tpg_group); diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c index 26a6d183ccb..a664c664a31 100644 --- a/drivers/target/target_core_sbc.c +++ b/drivers/target/target_core_sbc.c @@ -58,11 +58,10 @@ sbc_emulate_readcapacity(struct se_cmd *cmd)  	buf[7] = dev->dev_attrib.block_size & 0xff;  	rbuf = transport_kmap_data_sg(cmd); -	if (!rbuf) -		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; - -	memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length)); -	transport_kunmap_data_sg(cmd); +	if (rbuf) { +		memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length)); +		transport_kunmap_data_sg(cmd); +	}  	target_complete_cmd(cmd, GOOD);  	return 0; @@ -97,11 +96,10 @@ sbc_emulate_readcapacity_16(struct se_cmd *cmd)  		buf[14] = 0x80;  	rbuf = transport_kmap_data_sg(cmd); -	if (!rbuf) -		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; - -	memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length)); -	transport_kunmap_data_sg(cmd); +	if (rbuf) { +		memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length)); +		transport_kunmap_data_sg(cmd); +	}  	target_complete_cmd(cmd, GOOD);  	return 0; diff --git a/drivers/target/target_core_spc.c b/drivers/target/target_core_spc.c index 84f9e96e8ac..2d88f087d96 100644 --- a/drivers/target/target_core_spc.c +++ b/drivers/target/target_core_spc.c @@ -641,11 +641,10 @@ spc_emulate_inquiry(struct se_cmd *cmd)  out:  	rbuf = transport_kmap_data_sg(cmd); -	if (!rbuf) -		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; - -	memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length)); -	transport_kunmap_data_sg(cmd); +	if (rbuf) { +		memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length)); +		transport_kunmap_data_sg(cmd); +	}  	if (!ret)  		target_complete_cmd(cmd, GOOD); @@ -851,7 +850,7 @@ static sense_reason_t spc_emulate_modesense(struct se_cmd *cmd)  {  	struct se_device *dev = cmd->se_dev;  	char *cdb = cmd->t_task_cdb; -	unsigned char *buf, *map_buf; +	unsigned char buf[SE_MODE_PAGE_BUF], *rbuf;  	int type = dev->transport->get_device_type(dev);  	int ten = (cmd->t_task_cdb[0] == MODE_SENSE_10);  	bool dbd = !!(cdb[1] & 0x08); @@ -863,26 +862,8 @@ static sense_reason_t spc_emulate_modesense(struct se_cmd *cmd)  	int ret;  	int i; -	map_buf = transport_kmap_data_sg(cmd); -	if (!map_buf) -		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; -	/* -	 * If SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC is not set, then we -	 * know we actually allocated a full page.  Otherwise, if the -	 * data buffer is too small, allocate a temporary buffer so we -	 * don't have to worry about overruns in all our INQUIRY -	 * emulation handling. -	 */ -	if (cmd->data_length < SE_MODE_PAGE_BUF && -	    (cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC)) { -		buf = kzalloc(SE_MODE_PAGE_BUF, GFP_KERNEL); -		if (!buf) { -			transport_kunmap_data_sg(cmd); -			return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; -		} -	} else { -		buf = map_buf; -	} +	memset(buf, 0, SE_MODE_PAGE_BUF); +  	/*  	 * Skip over MODE DATA LENGTH + MEDIUM TYPE fields to byte 3 for  	 * MODE_SENSE_10 and byte 2 for MODE_SENSE (6). @@ -934,8 +915,6 @@ static sense_reason_t spc_emulate_modesense(struct se_cmd *cmd)  	if (page == 0x3f) {  		if (subpage != 0x00 && subpage != 0xff) {  			pr_warn("MODE_SENSE: Invalid subpage code: 0x%02x\n", subpage); -			kfree(buf); -			transport_kunmap_data_sg(cmd);  			return TCM_INVALID_CDB_FIELD;  		} @@ -972,7 +951,6 @@ static sense_reason_t spc_emulate_modesense(struct se_cmd *cmd)  		pr_err("MODE SENSE: unimplemented page/subpage: 0x%02x/0x%02x\n",  		       page, subpage); -	transport_kunmap_data_sg(cmd);  	return TCM_UNKNOWN_MODE_PAGE;  set_length: @@ -981,12 +959,12 @@ set_length:  	else  		buf[0] = length - 1; -	if (buf != map_buf) { -		memcpy(map_buf, buf, cmd->data_length); -		kfree(buf); +	rbuf = transport_kmap_data_sg(cmd); +	if (rbuf) { +		memcpy(rbuf, buf, min_t(u32, SE_MODE_PAGE_BUF, cmd->data_length)); +		transport_kunmap_data_sg(cmd);  	} -	transport_kunmap_data_sg(cmd);  	target_complete_cmd(cmd, GOOD);  	return 0;  } diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c index be6a373601b..79ff3a5e925 100644 --- a/drivers/tty/pty.c +++ b/drivers/tty/pty.c @@ -441,6 +441,8 @@ static int pty_bsd_ioctl(struct tty_struct *tty,  		return pty_get_pktmode(tty, (int __user *)arg);  	case TIOCSIG:    /* Send signal to other side of pty */  		return pty_signal(tty, (int) arg); +	case TIOCGPTN: /* TTY returns ENOTTY, but glibc expects EINVAL here */ +		return -EINVAL;  	}  	return -ENOIOCTLCMD;  } diff --git a/drivers/tty/serial/8250/8250.c b/drivers/tty/serial/8250/8250.c index d085e3a8ec0..f9320437a64 100644 --- a/drivers/tty/serial/8250/8250.c +++ b/drivers/tty/serial/8250/8250.c @@ -300,6 +300,12 @@ static const struct serial8250_config uart_config[] = {  				  UART_FCR_R_TRIG_00 | UART_FCR_T_TRIG_00,  		.flags		= UART_CAP_FIFO,  	}, +	[PORT_BRCM_TRUMANAGE] = { +		.name		= "TruManage", +		.fifo_size	= 1, +		.tx_loadsz	= 1024, +		.flags		= UART_CAP_HFIFO, +	},  	[PORT_8250_CIR] = {  		.name		= "CIR port"  	} @@ -1490,6 +1496,11 @@ void serial8250_tx_chars(struct uart_8250_port *up)  		port->icount.tx++;  		if (uart_circ_empty(xmit))  			break; +		if (up->capabilities & UART_CAP_HFIFO) { +			if ((serial_port_in(port, UART_LSR) & BOTH_EMPTY) != +			    BOTH_EMPTY) +				break; +		}  	} while (--count > 0);  	if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) diff --git a/drivers/tty/serial/8250/8250.h b/drivers/tty/serial/8250/8250.h index 3b4ea84898c..12caa1292b7 100644 --- a/drivers/tty/serial/8250/8250.h +++ b/drivers/tty/serial/8250/8250.h @@ -40,6 +40,7 @@ struct serial8250_config {  #define UART_CAP_AFE	(1 << 11)	/* MCR-based hw flow control */  #define UART_CAP_UUE	(1 << 12)	/* UART needs IER bit 6 set (Xscale) */  #define UART_CAP_RTOIE	(1 << 13)	/* UART needs IER bit 4 set (Xscale, Tegra) */ +#define UART_CAP_HFIFO	(1 << 14)	/* UART has a "hidden" FIFO */  #define UART_BUG_QUOT	(1 << 0)	/* UART has buggy quot LSB */  #define UART_BUG_TXEN	(1 << 1)	/* UART has buggy TX IIR status */ diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c index 1d0dba2d562..096d2ef48b3 100644 --- a/drivers/tty/serial/8250/8250_dw.c +++ b/drivers/tty/serial/8250/8250_dw.c @@ -79,7 +79,7 @@ static int dw8250_handle_irq(struct uart_port *p)  	} else if ((iir & UART_IIR_BUSY) == UART_IIR_BUSY) {  		/* Clear the USR and write the LCR again. */  		(void)p->serial_in(p, UART_USR); -		p->serial_out(p, d->last_lcr, UART_LCR); +		p->serial_out(p, UART_LCR, d->last_lcr);  		return 1;  	} diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c index 26b9dc012ed..a27a98e1b06 100644 --- a/drivers/tty/serial/8250/8250_pci.c +++ b/drivers/tty/serial/8250/8250_pci.c @@ -1085,6 +1085,18 @@ pci_omegapci_setup(struct serial_private *priv,  	return setup_port(priv, port, 2, idx * 8, 0);  } +static int +pci_brcm_trumanage_setup(struct serial_private *priv, +			 const struct pciserial_board *board, +			 struct uart_8250_port *port, int idx) +{ +	int ret = pci_default_setup(priv, board, port, idx); + +	port->port.type = PORT_BRCM_TRUMANAGE; +	port->port.flags = (port->port.flags | UPF_FIXED_PORT | UPF_FIXED_TYPE); +	return ret; +} +  static int skip_tx_en_setup(struct serial_private *priv,  			const struct pciserial_board *board,  			struct uart_8250_port *port, int idx) @@ -1301,9 +1313,10 @@ pci_wch_ch353_setup(struct serial_private *priv,  #define PCI_VENDOR_ID_AGESTAR		0x5372  #define PCI_DEVICE_ID_AGESTAR_9375	0x6872  #define PCI_VENDOR_ID_ASIX		0x9710 -#define PCI_DEVICE_ID_COMMTECH_4222PCIE 0x0019  #define PCI_DEVICE_ID_COMMTECH_4224PCIE	0x0020  #define PCI_DEVICE_ID_COMMTECH_4228PCIE	0x0021 +#define PCI_DEVICE_ID_COMMTECH_4222PCIE	0x0022 +#define PCI_DEVICE_ID_BROADCOM_TRUMANAGE 0x160a  /* Unknown vendors/cards - this should not be in linux/pci_ids.h */ @@ -1954,6 +1967,17 @@ static struct pci_serial_quirk pci_serial_quirks[] __refdata = {  		.setup		= pci_xr17v35x_setup,  	},  	/* +	 * Broadcom TruManage (NetXtreme) +	 */ +	{ +		.vendor		= PCI_VENDOR_ID_BROADCOM, +		.device		= PCI_DEVICE_ID_BROADCOM_TRUMANAGE, +		.subvendor	= PCI_ANY_ID, +		.subdevice	= PCI_ANY_ID, +		.setup		= pci_brcm_trumanage_setup, +	}, + +	/*  	 * Default "match everything" terminator entry  	 */  	{ @@ -2148,6 +2172,7 @@ enum pci_board_num_t {  	pbn_ce4100_1_115200,  	pbn_omegapci,  	pbn_NETMOS9900_2s_115200, +	pbn_brcm_trumanage,  };  /* @@ -2246,7 +2271,7 @@ static struct pciserial_board pci_boards[] = {  	[pbn_b0_8_1152000_200] = {  		.flags		= FL_BASE0, -		.num_ports	= 2, +		.num_ports	= 8,  		.base_baud	= 1152000,  		.uart_offset	= 0x200,  	}, @@ -2892,6 +2917,12 @@ static struct pciserial_board pci_boards[] = {  		.num_ports	= 2,  		.base_baud	= 115200,  	}, +	[pbn_brcm_trumanage] = { +		.flags		= FL_BASE0, +		.num_ports	= 1, +		.reg_shift	= 2, +		.base_baud	= 115200, +	},  };  static const struct pci_device_id blacklist[] = { @@ -4471,6 +4502,13 @@ static struct pci_device_id serial_pci_tbl[] = {  		pbn_omegapci },  	/* +	 * Broadcom TruManage +	 */ +	{	PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BROADCOM_TRUMANAGE, +		PCI_ANY_ID, PCI_ANY_ID, 0, 0, +		pbn_brcm_trumanage }, + +	/*  	 * AgeStar as-prs2-009  	 */  	{	PCI_VENDOR_ID_AGESTAR, PCI_DEVICE_ID_AGESTAR_9375, diff --git a/drivers/tty/serial/ifx6x60.c b/drivers/tty/serial/ifx6x60.c index 675d94ab0af..8cb6d8d66a1 100644 --- a/drivers/tty/serial/ifx6x60.c +++ b/drivers/tty/serial/ifx6x60.c @@ -637,6 +637,7 @@ static void ifx_port_shutdown(struct tty_port *port)  	clear_bit(IFX_SPI_STATE_IO_AVAILABLE, &ifx_dev->flags);  	mrdy_set_low(ifx_dev); +	del_timer(&ifx_dev->spi_timer);  	clear_bit(IFX_SPI_STATE_TIMER_PENDING, &ifx_dev->flags);  	tasklet_kill(&ifx_dev->io_work_tasklet);  } @@ -810,7 +811,8 @@ static void ifx_spi_io(unsigned long data)  		ifx_dev->spi_xfer.cs_change = 0;  		ifx_dev->spi_xfer.speed_hz = ifx_dev->spi_dev->max_speed_hz;  		/* ifx_dev->spi_xfer.speed_hz = 390625; */ -		ifx_dev->spi_xfer.bits_per_word = spi_bpw; +		ifx_dev->spi_xfer.bits_per_word = +			ifx_dev->spi_dev->bits_per_word;  		ifx_dev->spi_xfer.tx_buf = ifx_dev->tx_buffer;  		ifx_dev->spi_xfer.rx_buf = ifx_dev->rx_buffer; diff --git a/drivers/tty/serial/mxs-auart.c b/drivers/tty/serial/mxs-auart.c index 6db23b035ef..e55615eb34a 100644 --- a/drivers/tty/serial/mxs-auart.c +++ b/drivers/tty/serial/mxs-auart.c @@ -253,7 +253,7 @@ static void mxs_auart_tx_chars(struct mxs_auart_port *s)  	struct circ_buf *xmit = &s->port.state->xmit;  	if (auart_dma_enabled(s)) { -		int i = 0; +		u32 i = 0;  		int size;  		void *buffer = s->tx_dma_buf; @@ -412,10 +412,12 @@ static void mxs_auart_set_mctrl(struct uart_port *u, unsigned mctrl)  	u32 ctrl = readl(u->membase + AUART_CTRL2); -	ctrl &= ~AUART_CTRL2_RTSEN; +	ctrl &= ~(AUART_CTRL2_RTSEN | AUART_CTRL2_RTS);  	if (mctrl & TIOCM_RTS) {  		if (tty_port_cts_enabled(&u->state->port))  			ctrl |= AUART_CTRL2_RTSEN; +		else +			ctrl |= AUART_CTRL2_RTS;  	}  	s->ctrl = mctrl; diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c index 12e5249d053..e514b3a4dc5 100644 --- a/drivers/tty/serial/samsung.c +++ b/drivers/tty/serial/samsung.c @@ -1006,7 +1006,6 @@ static void s3c24xx_serial_resetport(struct uart_port *port,  	ucon &= ucon_mask;  	wr_regl(port, S3C2410_UCON,  ucon | cfg->ucon); -	wr_regl(port, S3C2410_ULCON, cfg->ulcon);  	/* reset both fifos */  	wr_regl(port, S3C2410_UFCON, cfg->ufcon | S3C2410_UFCON_RESETBOTH); diff --git a/drivers/tty/serial/vt8500_serial.c b/drivers/tty/serial/vt8500_serial.c index 8fd181436a6..d5ed9f61300 100644 --- a/drivers/tty/serial/vt8500_serial.c +++ b/drivers/tty/serial/vt8500_serial.c @@ -604,7 +604,7 @@ static int vt8500_serial_probe(struct platform_device *pdev)  	vt8500_port->uart.flags = UPF_IOREMAP | UPF_BOOT_AUTOCONF;  	vt8500_port->clk = of_clk_get(pdev->dev.of_node, 0); -	if (vt8500_port->clk) { +	if (!IS_ERR(vt8500_port->clk)) {  		vt8500_port->uart.uartclk = clk_get_rate(vt8500_port->clk);  	} else {  		/* use the default of 24Mhz if not specified and warn */ diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c index 4225d5e7213..8e64adf8e4d 100644 --- a/drivers/usb/core/hcd.c +++ b/drivers/usb/core/hcd.c @@ -39,6 +39,7 @@  #include <asm/unaligned.h>  #include <linux/platform_device.h>  #include <linux/workqueue.h> +#include <linux/pm_runtime.h>  #include <linux/usb.h>  #include <linux/usb/hcd.h> @@ -1025,6 +1026,49 @@ static int register_root_hub(struct usb_hcd *hcd)  	return retval;  } +/* + * usb_hcd_start_port_resume - a root-hub port is sending a resume signal + * @bus: the bus which the root hub belongs to + * @portnum: the port which is being resumed + * + * HCDs should call this function when they know that a resume signal is + * being sent to a root-hub port.  The root hub will be prevented from + * going into autosuspend until usb_hcd_end_port_resume() is called. + * + * The bus's private lock must be held by the caller. + */ +void usb_hcd_start_port_resume(struct usb_bus *bus, int portnum) +{ +	unsigned bit = 1 << portnum; + +	if (!(bus->resuming_ports & bit)) { +		bus->resuming_ports |= bit; +		pm_runtime_get_noresume(&bus->root_hub->dev); +	} +} +EXPORT_SYMBOL_GPL(usb_hcd_start_port_resume); + +/* + * usb_hcd_end_port_resume - a root-hub port has stopped sending a resume signal + * @bus: the bus which the root hub belongs to + * @portnum: the port which is being resumed + * + * HCDs should call this function when they know that a resume signal has + * stopped being sent to a root-hub port.  The root hub will be allowed to + * autosuspend again. + * + * The bus's private lock must be held by the caller. + */ +void usb_hcd_end_port_resume(struct usb_bus *bus, int portnum) +{ +	unsigned bit = 1 << portnum; + +	if (bus->resuming_ports & bit) { +		bus->resuming_ports &= ~bit; +		pm_runtime_put_noidle(&bus->root_hub->dev); +	} +} +EXPORT_SYMBOL_GPL(usb_hcd_end_port_resume);  /*-------------------------------------------------------------------------*/ diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c index 957ed2c4148..cbf7168e3ce 100644 --- a/drivers/usb/core/hub.c +++ b/drivers/usb/core/hub.c @@ -2838,6 +2838,23 @@ void usb_enable_ltm(struct usb_device *udev)  EXPORT_SYMBOL_GPL(usb_enable_ltm);  #ifdef	CONFIG_USB_SUSPEND +/* + * usb_disable_function_remotewakeup - disable usb3.0 + * device's function remote wakeup + * @udev: target device + * + * Assume there's only one function on the USB 3.0 + * device and disable remote wake for the first + * interface. FIXME if the interface association + * descriptor shows there's more than one function. + */ +static int usb_disable_function_remotewakeup(struct usb_device *udev) +{ +	return usb_control_msg(udev, usb_sndctrlpipe(udev, 0), +				USB_REQ_CLEAR_FEATURE, USB_RECIP_INTERFACE, +				USB_INTRF_FUNC_SUSPEND,	0, NULL, 0, +				USB_CTRL_SET_TIMEOUT); +}  /*   * usb_port_suspend - suspend a usb device's upstream port @@ -2955,12 +2972,19 @@ int usb_port_suspend(struct usb_device *udev, pm_message_t msg)  		dev_dbg(hub->intfdev, "can't suspend port %d, status %d\n",  				port1, status);  		/* paranoia:  "should not happen" */ -		if (udev->do_remote_wakeup) -			(void) usb_control_msg(udev, usb_sndctrlpipe(udev, 0), -				USB_REQ_CLEAR_FEATURE, USB_RECIP_DEVICE, -				USB_DEVICE_REMOTE_WAKEUP, 0, -				NULL, 0, -				USB_CTRL_SET_TIMEOUT); +		if (udev->do_remote_wakeup) { +			if (!hub_is_superspeed(hub->hdev)) { +				(void) usb_control_msg(udev, +						usb_sndctrlpipe(udev, 0), +						USB_REQ_CLEAR_FEATURE, +						USB_RECIP_DEVICE, +						USB_DEVICE_REMOTE_WAKEUP, 0, +						NULL, 0, +						USB_CTRL_SET_TIMEOUT); +			} else +				(void) usb_disable_function_remotewakeup(udev); + +		}  		/* Try to enable USB2 hardware LPM again */  		if (udev->usb2_hw_lpm_capable == 1) @@ -3052,20 +3076,30 @@ static int finish_port_resume(struct usb_device *udev)  	 * udev->reset_resume  	 */  	} else if (udev->actconfig && !udev->reset_resume) { -		le16_to_cpus(&devstatus); -		if (devstatus & (1 << USB_DEVICE_REMOTE_WAKEUP)) { -			status = usb_control_msg(udev, -					usb_sndctrlpipe(udev, 0), -					USB_REQ_CLEAR_FEATURE, +		if (!hub_is_superspeed(udev->parent)) { +			le16_to_cpus(&devstatus); +			if (devstatus & (1 << USB_DEVICE_REMOTE_WAKEUP)) +				status = usb_control_msg(udev, +						usb_sndctrlpipe(udev, 0), +						USB_REQ_CLEAR_FEATURE,  						USB_RECIP_DEVICE, -					USB_DEVICE_REMOTE_WAKEUP, 0, -					NULL, 0, -					USB_CTRL_SET_TIMEOUT); -			if (status) -				dev_dbg(&udev->dev, -					"disable remote wakeup, status %d\n", -					status); +						USB_DEVICE_REMOTE_WAKEUP, 0, +						NULL, 0, +						USB_CTRL_SET_TIMEOUT); +		} else { +			status = usb_get_status(udev, USB_RECIP_INTERFACE, 0, +					&devstatus); +			le16_to_cpus(&devstatus); +			if (!status && devstatus & (USB_INTRF_STAT_FUNC_RW_CAP +					| USB_INTRF_STAT_FUNC_RW)) +				status = +					usb_disable_function_remotewakeup(udev);  		} + +		if (status) +			dev_dbg(&udev->dev, +				"disable remote wakeup, status %d\n", +				status);  		status = 0;  	}  	return status; diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c index 2e43b332aae..2fdd767f8fe 100644 --- a/drivers/usb/dwc3/gadget.c +++ b/drivers/usb/dwc3/gadget.c @@ -1605,6 +1605,7 @@ static int dwc3_gadget_init_endpoints(struct dwc3 *dwc)  		if (epnum == 0 || epnum == 1) {  			dep->endpoint.maxpacket = 512; +			dep->endpoint.maxburst = 1;  			dep->endpoint.ops = &dwc3_gadget_ep0_ops;  			if (!epnum)  				dwc->gadget.ep0 = &dep->endpoint; diff --git a/drivers/usb/gadget/f_fs.c b/drivers/usb/gadget/f_fs.c index 4a6961c517f..8c2f2512114 100644 --- a/drivers/usb/gadget/f_fs.c +++ b/drivers/usb/gadget/f_fs.c @@ -1153,15 +1153,15 @@ static int ffs_fs_parse_opts(struct ffs_sb_fill_data *data, char *opts)  					pr_err("%s: unmapped value: %lu\n", opts, value);  					return -EINVAL;  				} -			} -			else if (!memcmp(opts, "gid", 3)) +			} else if (!memcmp(opts, "gid", 3)) {  				data->perms.gid = make_kgid(current_user_ns(), value);  				if (!gid_valid(data->perms.gid)) {  					pr_err("%s: unmapped value: %lu\n", opts, value);  					return -EINVAL;  				} -			else +			} else {  				goto invalid; +			}  			break;  		default: diff --git a/drivers/usb/gadget/fsl_mxc_udc.c b/drivers/usb/gadget/fsl_mxc_udc.c index 1b0f086426b..d3bd7b095ba 100644 --- a/drivers/usb/gadget/fsl_mxc_udc.c +++ b/drivers/usb/gadget/fsl_mxc_udc.c @@ -18,14 +18,13 @@  #include <linux/platform_device.h>  #include <linux/io.h> -#include <mach/hardware.h> -  static struct clk *mxc_ahb_clk;  static struct clk *mxc_per_clk;  static struct clk *mxc_ipg_clk;  /* workaround ENGcm09152 for i.MX35 */ -#define USBPHYCTRL_OTGBASE_OFFSET	0x608 +#define MX35_USBPHYCTRL_OFFSET		0x600 +#define USBPHYCTRL_OTGBASE_OFFSET	0x8  #define USBPHYCTRL_EVDO			(1 << 23)  int fsl_udc_clk_init(struct platform_device *pdev) @@ -59,7 +58,7 @@ int fsl_udc_clk_init(struct platform_device *pdev)  	clk_prepare_enable(mxc_per_clk);  	/* make sure USB_CLK is running at 60 MHz +/- 1000 Hz */ -	if (!cpu_is_mx51()) { +	if (!strcmp(pdev->id_entry->name, "imx-udc-mx27")) {  		freq = clk_get_rate(mxc_per_clk);  		if (pdata->phy_mode != FSL_USB2_PHY_ULPI &&  		    (freq < 59999000 || freq > 60001000)) { @@ -79,27 +78,40 @@ eclkrate:  	return ret;  } -void fsl_udc_clk_finalize(struct platform_device *pdev) +int fsl_udc_clk_finalize(struct platform_device *pdev)  {  	struct fsl_usb2_platform_data *pdata = pdev->dev.platform_data; -	if (cpu_is_mx35()) { -		unsigned int v; +	int ret = 0; -		/* workaround ENGcm09152 for i.MX35 */ -		if (pdata->workaround & FLS_USB2_WORKAROUND_ENGCM09152) { -			v = readl(MX35_IO_ADDRESS(MX35_USB_BASE_ADDR + -					USBPHYCTRL_OTGBASE_OFFSET)); -			writel(v | USBPHYCTRL_EVDO, -				MX35_IO_ADDRESS(MX35_USB_BASE_ADDR + -					USBPHYCTRL_OTGBASE_OFFSET)); +	/* workaround ENGcm09152 for i.MX35 */ +	if (pdata->workaround & FLS_USB2_WORKAROUND_ENGCM09152) { +		unsigned int v; +		struct resource *res = platform_get_resource +			(pdev, IORESOURCE_MEM, 0); +		void __iomem *phy_regs = ioremap(res->start + +						MX35_USBPHYCTRL_OFFSET, 512); +		if (!phy_regs) { +			dev_err(&pdev->dev, "ioremap for phy address fails\n"); +			ret = -EINVAL; +			goto ioremap_err;  		} + +		v = readl(phy_regs + USBPHYCTRL_OTGBASE_OFFSET); +		writel(v | USBPHYCTRL_EVDO, +			phy_regs + USBPHYCTRL_OTGBASE_OFFSET); + +		iounmap(phy_regs);  	} + +ioremap_err:  	/* ULPI transceivers don't need usbpll */  	if (pdata->phy_mode == FSL_USB2_PHY_ULPI) {  		clk_disable_unprepare(mxc_per_clk);  		mxc_per_clk = NULL;  	} + +	return ret;  }  void fsl_udc_clk_release(void) diff --git a/drivers/usb/gadget/fsl_udc_core.c b/drivers/usb/gadget/fsl_udc_core.c index c19f7f13790..667275cb7ba 100644 --- a/drivers/usb/gadget/fsl_udc_core.c +++ b/drivers/usb/gadget/fsl_udc_core.c @@ -41,6 +41,7 @@  #include <linux/fsl_devices.h>  #include <linux/dmapool.h>  #include <linux/delay.h> +#include <linux/of_device.h>  #include <asm/byteorder.h>  #include <asm/io.h> @@ -2438,11 +2439,6 @@ static int __init fsl_udc_probe(struct platform_device *pdev)  	unsigned int i;  	u32 dccparams; -	if (strcmp(pdev->name, driver_name)) { -		VDBG("Wrong device"); -		return -ENODEV; -	} -  	udc_controller = kzalloc(sizeof(struct fsl_udc), GFP_KERNEL);  	if (udc_controller == NULL) {  		ERR("malloc udc failed\n"); @@ -2547,7 +2543,9 @@ static int __init fsl_udc_probe(struct platform_device *pdev)  		dr_controller_setup(udc_controller);  	} -	fsl_udc_clk_finalize(pdev); +	ret = fsl_udc_clk_finalize(pdev); +	if (ret) +		goto err_free_irq;  	/* Setup gadget structure */  	udc_controller->gadget.ops = &fsl_gadget_ops; @@ -2756,22 +2754,32 @@ static int fsl_udc_otg_resume(struct device *dev)  	return fsl_udc_resume(NULL);  } -  /*-------------------------------------------------------------------------  	Register entry point for the peripheral controller driver  --------------------------------------------------------------------------*/ - +static const struct platform_device_id fsl_udc_devtype[] = { +	{ +		.name = "imx-udc-mx27", +	}, { +		.name = "imx-udc-mx51", +	}, { +		/* sentinel */ +	} +}; +MODULE_DEVICE_TABLE(platform, fsl_udc_devtype);  static struct platform_driver udc_driver = { -	.remove  = __exit_p(fsl_udc_remove), +	.remove		= __exit_p(fsl_udc_remove), +	/* Just for FSL i.mx SoC currently */ +	.id_table	= fsl_udc_devtype,  	/* these suspend and resume are not usb suspend and resume */ -	.suspend = fsl_udc_suspend, -	.resume  = fsl_udc_resume, -	.driver  = { -		.name = (char *)driver_name, -		.owner = THIS_MODULE, -		/* udc suspend/resume called from OTG driver */ -		.suspend = fsl_udc_otg_suspend, -		.resume  = fsl_udc_otg_resume, +	.suspend	= fsl_udc_suspend, +	.resume		= fsl_udc_resume, +	.driver		= { +			.name = (char *)driver_name, +			.owner = THIS_MODULE, +			/* udc suspend/resume called from OTG driver */ +			.suspend = fsl_udc_otg_suspend, +			.resume  = fsl_udc_otg_resume,  	},  }; diff --git a/drivers/usb/gadget/fsl_usb2_udc.h b/drivers/usb/gadget/fsl_usb2_udc.h index f61a967f708..c6703bb07b2 100644 --- a/drivers/usb/gadget/fsl_usb2_udc.h +++ b/drivers/usb/gadget/fsl_usb2_udc.h @@ -592,15 +592,16 @@ static inline struct ep_queue_head *get_qh_by_ep(struct fsl_ep *ep)  struct platform_device;  #ifdef CONFIG_ARCH_MXC  int fsl_udc_clk_init(struct platform_device *pdev); -void fsl_udc_clk_finalize(struct platform_device *pdev); +int fsl_udc_clk_finalize(struct platform_device *pdev);  void fsl_udc_clk_release(void);  #else  static inline int fsl_udc_clk_init(struct platform_device *pdev)  {  	return 0;  } -static inline void fsl_udc_clk_finalize(struct platform_device *pdev) +static inline int fsl_udc_clk_finalize(struct platform_device *pdev)  { +	return 0;  }  static inline void fsl_udc_clk_release(void)  { diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig index d6bb128ce21..3a21c5d683c 100644 --- a/drivers/usb/host/Kconfig +++ b/drivers/usb/host/Kconfig @@ -148,7 +148,7 @@ config USB_EHCI_FSL  	  Variation of ARC USB block used in some Freescale chips.  config USB_EHCI_MXC -	bool "Support for Freescale i.MX on-chip EHCI USB controller" +	tristate "Support for Freescale i.MX on-chip EHCI USB controller"  	depends on USB_EHCI_HCD && ARCH_MXC  	select USB_EHCI_ROOT_HUB_TT  	---help--- diff --git a/drivers/usb/host/Makefile b/drivers/usb/host/Makefile index 1eb4c3006e9..001fbff2fde 100644 --- a/drivers/usb/host/Makefile +++ b/drivers/usb/host/Makefile @@ -26,6 +26,7 @@ obj-$(CONFIG_PCI)		+= pci-quirks.o  obj-$(CONFIG_USB_EHCI_HCD)	+= ehci-hcd.o  obj-$(CONFIG_USB_EHCI_PCI)	+= ehci-pci.o  obj-$(CONFIG_USB_EHCI_HCD_PLATFORM)	+= ehci-platform.o +obj-$(CONFIG_USB_EHCI_MXC)	+= ehci-mxc.o  obj-$(CONFIG_USB_OXU210HP_HCD)	+= oxu210hp-hcd.o  obj-$(CONFIG_USB_ISP116X_HCD)	+= isp116x-hcd.o diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c index c97503bb0b0..b416a3fc995 100644 --- a/drivers/usb/host/ehci-hcd.c +++ b/drivers/usb/host/ehci-hcd.c @@ -74,10 +74,6 @@ static const char	hcd_name [] = "ehci_hcd";  #undef VERBOSE_DEBUG  #undef EHCI_URB_TRACE -#ifdef DEBUG -#define EHCI_STATS -#endif -  /* magic numbers that can affect system performance */  #define	EHCI_TUNE_CERR		3	/* 0-3 qtd retries; 0 == don't stop */  #define	EHCI_TUNE_RL_HS		4	/* nak throttle; see 4.9 */ @@ -801,6 +797,7 @@ static irqreturn_t ehci_irq (struct usb_hcd *hcd)  			ehci->reset_done[i] = jiffies + msecs_to_jiffies(25);  			set_bit(i, &ehci->resuming_ports);  			ehci_dbg (ehci, "port %d remote wakeup\n", i + 1); +			usb_hcd_start_port_resume(&hcd->self, i);  			mod_timer(&hcd->rh_timer, ehci->reset_done[i]);  		}  	} @@ -1250,11 +1247,6 @@ MODULE_LICENSE ("GPL");  #define	PLATFORM_DRIVER		ehci_fsl_driver  #endif -#ifdef CONFIG_USB_EHCI_MXC -#include "ehci-mxc.c" -#define PLATFORM_DRIVER		ehci_mxc_driver -#endif -  #ifdef CONFIG_USB_EHCI_SH  #include "ehci-sh.c"  #define PLATFORM_DRIVER		ehci_hcd_sh_driver @@ -1352,7 +1344,8 @@ MODULE_LICENSE ("GPL");  #if !IS_ENABLED(CONFIG_USB_EHCI_PCI) && \  	!IS_ENABLED(CONFIG_USB_EHCI_HCD_PLATFORM) && \ -	!defined(CONFIG_USB_CHIPIDEA_HOST) && \ +	!IS_ENABLED(CONFIG_USB_CHIPIDEA_HOST) && \ +	!IS_ENABLED(CONFIG_USB_EHCI_MXC) && \  	!defined(PLATFORM_DRIVER) && \  	!defined(PS3_SYSTEM_BUS_DRIVER) && \  	!defined(OF_PLATFORM_DRIVER) && \ diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c index 4ccb97c0678..4d3b294f203 100644 --- a/drivers/usb/host/ehci-hub.c +++ b/drivers/usb/host/ehci-hub.c @@ -649,7 +649,11 @@ ehci_hub_status_data (struct usb_hcd *hcd, char *buf)  			status = STS_PCD;  		}  	} -	/* FIXME autosuspend idle root hubs */ + +	/* If a resume is in progress, make sure it can finish */ +	if (ehci->resuming_ports) +		mod_timer(&hcd->rh_timer, jiffies + msecs_to_jiffies(25)); +  	spin_unlock_irqrestore (&ehci->lock, flags);  	return status ? retval : 0;  } @@ -851,6 +855,7 @@ static int ehci_hub_control (  				/* resume signaling for 20 msec */  				ehci->reset_done[wIndex] = jiffies  						+ msecs_to_jiffies(20); +				usb_hcd_start_port_resume(&hcd->self, wIndex);  				/* check the port again */  				mod_timer(&ehci_to_hcd(ehci)->rh_timer,  						ehci->reset_done[wIndex]); @@ -862,6 +867,7 @@ static int ehci_hub_control (  				clear_bit(wIndex, &ehci->suspended_ports);  				set_bit(wIndex, &ehci->port_c_suspend);  				ehci->reset_done[wIndex] = 0; +				usb_hcd_end_port_resume(&hcd->self, wIndex);  				/* stop resume signaling */  				temp = ehci_readl(ehci, status_reg); @@ -950,6 +956,7 @@ static int ehci_hub_control (  			ehci->reset_done[wIndex] = 0;  			if (temp & PORT_PE)  				set_bit(wIndex, &ehci->port_c_suspend); +			usb_hcd_end_port_resume(&hcd->self, wIndex);  		}  		if (temp & PORT_OC) diff --git a/drivers/usb/host/ehci-mxc.c b/drivers/usb/host/ehci-mxc.c index ec7f5d2c90d..dedb80bb8d4 100644 --- a/drivers/usb/host/ehci-mxc.c +++ b/drivers/usb/host/ehci-mxc.c @@ -17,75 +17,38 @@   * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.   */ +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/io.h>  #include <linux/platform_device.h>  #include <linux/clk.h>  #include <linux/delay.h>  #include <linux/usb/otg.h>  #include <linux/usb/ulpi.h>  #include <linux/slab.h> +#include <linux/usb.h> +#include <linux/usb/hcd.h>  #include <linux/platform_data/usb-ehci-mxc.h>  #include <asm/mach-types.h> +#include "ehci.h" + +#define DRIVER_DESC "Freescale On-Chip EHCI Host driver" + +static const char hcd_name[] = "ehci-mxc"; +  #define ULPI_VIEWPORT_OFFSET	0x170  struct ehci_mxc_priv {  	struct clk *usbclk, *ahbclk, *phyclk; -	struct usb_hcd *hcd;  }; -/* called during probe() after chip reset completes */ -static int ehci_mxc_setup(struct usb_hcd *hcd) -{ -	hcd->has_tt = 1; - -	return ehci_setup(hcd); -} - -static const struct hc_driver ehci_mxc_hc_driver = { -	.description = hcd_name, -	.product_desc = "Freescale On-Chip EHCI Host Controller", -	.hcd_priv_size = sizeof(struct ehci_hcd), - -	/* -	 * generic hardware linkage -	 */ -	.irq = ehci_irq, -	.flags = HCD_USB2 | HCD_MEMORY, +static struct hc_driver __read_mostly ehci_mxc_hc_driver; -	/* -	 * basic lifecycle operations -	 */ -	.reset = ehci_mxc_setup, -	.start = ehci_run, -	.stop = ehci_stop, -	.shutdown = ehci_shutdown, - -	/* -	 * managing i/o requests and associated device resources -	 */ -	.urb_enqueue = ehci_urb_enqueue, -	.urb_dequeue = ehci_urb_dequeue, -	.endpoint_disable = ehci_endpoint_disable, -	.endpoint_reset = ehci_endpoint_reset, - -	/* -	 * scheduling support -	 */ -	.get_frame_number = ehci_get_frame, - -	/* -	 * root hub support -	 */ -	.hub_status_data = ehci_hub_status_data, -	.hub_control = ehci_hub_control, -	.bus_suspend = ehci_bus_suspend, -	.bus_resume = ehci_bus_resume, -	.relinquish_port = ehci_relinquish_port, -	.port_handed_over = ehci_port_handed_over, - -	.clear_tt_buffer_complete = ehci_clear_tt_buffer_complete, +static const struct ehci_driver_overrides ehci_mxc_overrides __initdata = { +	.extra_priv_size =	sizeof(struct ehci_mxc_priv),  };  static int ehci_mxc_drv_probe(struct platform_device *pdev) @@ -112,12 +75,6 @@ static int ehci_mxc_drv_probe(struct platform_device *pdev)  	if (!hcd)  		return -ENOMEM; -	priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); -	if (!priv) { -		ret = -ENOMEM; -		goto err_alloc; -	} -  	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);  	if (!res) {  		dev_err(dev, "Found HC with no register addr. Check setup!\n"); @@ -135,6 +92,10 @@ static int ehci_mxc_drv_probe(struct platform_device *pdev)  		goto err_alloc;  	} +	hcd->has_tt = 1; +	ehci = hcd_to_ehci(hcd); +	priv = (struct ehci_mxc_priv *) ehci->priv; +  	/* enable clocks */  	priv->usbclk = devm_clk_get(&pdev->dev, "ipg");  	if (IS_ERR(priv->usbclk)) { @@ -169,8 +130,6 @@ static int ehci_mxc_drv_probe(struct platform_device *pdev)  		mdelay(10);  	} -	ehci = hcd_to_ehci(hcd); -  	/* EHCI registers start at offset 0x100 */  	ehci->caps = hcd->regs + 0x100;  	ehci->regs = hcd->regs + 0x100 + @@ -198,8 +157,7 @@ static int ehci_mxc_drv_probe(struct platform_device *pdev)  		}  	} -	priv->hcd = hcd; -	platform_set_drvdata(pdev, priv); +	platform_set_drvdata(pdev, hcd);  	ret = usb_add_hcd(hcd, irq, IRQF_SHARED);  	if (ret) @@ -244,8 +202,11 @@ err_alloc:  static int __exit ehci_mxc_drv_remove(struct platform_device *pdev)  {  	struct mxc_usbh_platform_data *pdata = pdev->dev.platform_data; -	struct ehci_mxc_priv *priv = platform_get_drvdata(pdev); -	struct usb_hcd *hcd = priv->hcd; +	struct usb_hcd *hcd = platform_get_drvdata(pdev); +	struct ehci_hcd *ehci = hcd_to_ehci(hcd); +	struct ehci_mxc_priv *priv = (struct ehci_mxc_priv *) ehci->priv; + +	usb_remove_hcd(hcd);  	if (pdata && pdata->exit)  		pdata->exit(pdev); @@ -253,23 +214,20 @@ static int __exit ehci_mxc_drv_remove(struct platform_device *pdev)  	if (pdata->otg)  		usb_phy_shutdown(pdata->otg); -	usb_remove_hcd(hcd); -	usb_put_hcd(hcd); -	platform_set_drvdata(pdev, NULL); -  	clk_disable_unprepare(priv->usbclk);  	clk_disable_unprepare(priv->ahbclk);  	if (priv->phyclk)  		clk_disable_unprepare(priv->phyclk); +	usb_put_hcd(hcd); +	platform_set_drvdata(pdev, NULL);  	return 0;  }  static void ehci_mxc_drv_shutdown(struct platform_device *pdev)  { -	struct ehci_mxc_priv *priv = platform_get_drvdata(pdev); -	struct usb_hcd *hcd = priv->hcd; +	struct usb_hcd *hcd = platform_get_drvdata(pdev);  	if (hcd->driver->shutdown)  		hcd->driver->shutdown(hcd); @@ -279,9 +237,31 @@ MODULE_ALIAS("platform:mxc-ehci");  static struct platform_driver ehci_mxc_driver = {  	.probe = ehci_mxc_drv_probe, -	.remove = __exit_p(ehci_mxc_drv_remove), +	.remove = ehci_mxc_drv_remove,  	.shutdown = ehci_mxc_drv_shutdown,  	.driver = {  		   .name = "mxc-ehci",  	},  }; + +static int __init ehci_mxc_init(void) +{ +	if (usb_disabled()) +		return -ENODEV; + +	pr_info("%s: " DRIVER_DESC "\n", hcd_name); + +	ehci_init_driver(&ehci_mxc_hc_driver, &ehci_mxc_overrides); +	return platform_driver_register(&ehci_mxc_driver); +} +module_init(ehci_mxc_init); + +static void __exit ehci_mxc_cleanup(void) +{ +	platform_driver_unregister(&ehci_mxc_driver); +} +module_exit(ehci_mxc_cleanup); + +MODULE_DESCRIPTION(DRIVER_DESC); +MODULE_AUTHOR("Sascha Hauer"); +MODULE_LICENSE("GPL"); diff --git a/drivers/usb/host/ehci-q.c b/drivers/usb/host/ehci-q.c index 3d989028c83..fd252f0cfb3 100644 --- a/drivers/usb/host/ehci-q.c +++ b/drivers/usb/host/ehci-q.c @@ -1197,17 +1197,26 @@ static void start_iaa_cycle(struct ehci_hcd *ehci, bool nested)  	if (ehci->async_iaa || ehci->async_unlinking)  		return; -	/* Do all the waiting QHs at once */ -	ehci->async_iaa = ehci->async_unlink; -	ehci->async_unlink = NULL; -  	/* If the controller isn't running, we don't have to wait for it */  	if (unlikely(ehci->rh_state < EHCI_RH_RUNNING)) { + +		/* Do all the waiting QHs */ +		ehci->async_iaa = ehci->async_unlink; +		ehci->async_unlink = NULL; +  		if (!nested)		/* Avoid recursion */  			end_unlink_async(ehci);  	/* Otherwise start a new IAA cycle */  	} else if (likely(ehci->rh_state == EHCI_RH_RUNNING)) { +		struct ehci_qh		*qh; + +		/* Do only the first waiting QH (nVidia bug?) */ +		qh = ehci->async_unlink; +		ehci->async_iaa = qh; +		ehci->async_unlink = qh->unlink_next; +		qh->unlink_next = NULL; +  		/* Make sure the unlinks are all visible to the hardware */  		wmb(); @@ -1255,34 +1264,35 @@ static void end_unlink_async(struct ehci_hcd *ehci)  	}  } +static void start_unlink_async(struct ehci_hcd *ehci, struct ehci_qh *qh); +  static void unlink_empty_async(struct ehci_hcd *ehci)  { -	struct ehci_qh		*qh, *next; -	bool			stopped = (ehci->rh_state < EHCI_RH_RUNNING); +	struct ehci_qh		*qh; +	struct ehci_qh		*qh_to_unlink = NULL;  	bool			check_unlinks_later = false; +	int			count = 0; -	/* Unlink all the async QHs that have been empty for a timer cycle */ -	next = ehci->async->qh_next.qh; -	while (next) { -		qh = next; -		next = qh->qh_next.qh; - +	/* Find the last async QH which has been empty for a timer cycle */ +	for (qh = ehci->async->qh_next.qh; qh; qh = qh->qh_next.qh) {  		if (list_empty(&qh->qtd_list) &&  				qh->qh_state == QH_STATE_LINKED) { -			if (!stopped && qh->unlink_cycle == -					ehci->async_unlink_cycle) +			++count; +			if (qh->unlink_cycle == ehci->async_unlink_cycle)  				check_unlinks_later = true;  			else -				single_unlink_async(ehci, qh); +				qh_to_unlink = qh;  		}  	} -	/* Start a new IAA cycle if any QHs are waiting for it */ -	if (ehci->async_unlink) -		start_iaa_cycle(ehci, false); +	/* If nothing else is being unlinked, unlink the last empty QH */ +	if (!ehci->async_iaa && !ehci->async_unlink && qh_to_unlink) { +		start_unlink_async(ehci, qh_to_unlink); +		--count; +	} -	/* QHs that haven't been empty for long enough will be handled later */ -	if (check_unlinks_later) { +	/* Other QHs will be handled later */ +	if (count > 0) {  		ehci_enable_event(ehci, EHCI_HRTIMER_ASYNC_UNLINKS, true);  		++ehci->async_unlink_cycle;  	} diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c index 69ebee73c0c..b476daf49f6 100644 --- a/drivers/usb/host/ehci-sched.c +++ b/drivers/usb/host/ehci-sched.c @@ -213,7 +213,7 @@ static inline unsigned char tt_start_uframe(struct ehci_hcd *ehci, __hc32 mask)  }  static const unsigned char -max_tt_usecs[] = { 125, 125, 125, 125, 125, 125, 30, 0 }; +max_tt_usecs[] = { 125, 125, 125, 125, 125, 125, 125, 25 };  /* carryover low/fullspeed bandwidth that crosses uframe boundries */  static inline void carryover_tt_bandwidth(unsigned short tt_usecs[8]) @@ -2212,11 +2212,11 @@ static void scan_isoc(struct ehci_hcd *ehci)  	}  	ehci->now_frame = now_frame; +	frame = ehci->last_iso_frame;  	for (;;) {  		union ehci_shadow	q, *q_p;  		__hc32			type, *hw_p; -		frame = ehci->last_iso_frame;  restart:  		/* scan each element in frame's queue for completions */  		q_p = &ehci->pshadow [frame]; @@ -2321,6 +2321,9 @@ restart:  		/* Stop when we have reached the current frame */  		if (frame == now_frame)  			break; -		ehci->last_iso_frame = (frame + 1) & fmask; + +		/* The last frame may still have active siTDs */ +		ehci->last_iso_frame = frame; +		frame = (frame + 1) & fmask;  	}  } diff --git a/drivers/usb/host/ehci-timer.c b/drivers/usb/host/ehci-timer.c index 20dbdcbe9b0..f904071d70d 100644 --- a/drivers/usb/host/ehci-timer.c +++ b/drivers/usb/host/ehci-timer.c @@ -113,14 +113,15 @@ static void ehci_poll_ASS(struct ehci_hcd *ehci)  	if (want != actual) { -		/* Poll again later, but give up after about 20 ms */ -		if (ehci->ASS_poll_count++ < 20) { -			ehci_enable_event(ehci, EHCI_HRTIMER_POLL_ASS, true); -			return; -		} -		ehci_dbg(ehci, "Waited too long for the async schedule status (%x/%x), giving up\n", -				want, actual); +		/* Poll again later */ +		ehci_enable_event(ehci, EHCI_HRTIMER_POLL_ASS, true); +		++ehci->ASS_poll_count; +		return;  	} + +	if (ehci->ASS_poll_count > 20) +		ehci_dbg(ehci, "ASS poll count reached %d\n", +				ehci->ASS_poll_count);  	ehci->ASS_poll_count = 0;  	/* The status is up-to-date; restart or stop the schedule as needed */ @@ -159,14 +160,14 @@ static void ehci_poll_PSS(struct ehci_hcd *ehci)  	if (want != actual) { -		/* Poll again later, but give up after about 20 ms */ -		if (ehci->PSS_poll_count++ < 20) { -			ehci_enable_event(ehci, EHCI_HRTIMER_POLL_PSS, true); -			return; -		} -		ehci_dbg(ehci, "Waited too long for the periodic schedule status (%x/%x), giving up\n", -				want, actual); +		/* Poll again later */ +		ehci_enable_event(ehci, EHCI_HRTIMER_POLL_PSS, true); +		return;  	} + +	if (ehci->PSS_poll_count > 20) +		ehci_dbg(ehci, "PSS poll count reached %d\n", +				ehci->PSS_poll_count);  	ehci->PSS_poll_count = 0;  	/* The status is up-to-date; restart or stop the schedule as needed */ diff --git a/drivers/usb/host/ehci.h b/drivers/usb/host/ehci.h index 9dadc7118d6..36c3a821059 100644 --- a/drivers/usb/host/ehci.h +++ b/drivers/usb/host/ehci.h @@ -38,6 +38,10 @@ typedef __u16 __bitwise __hc16;  #endif  /* statistics can be kept for tuning/monitoring */ +#ifdef DEBUG +#define EHCI_STATS +#endif +  struct ehci_stats {  	/* irq usage */  	unsigned long		normal; @@ -221,6 +225,9 @@ struct ehci_hcd {			/* one per controller */  #ifdef DEBUG  	struct dentry		*debug_dir;  #endif + +	/* platform-specific data -- must come last */ +	unsigned long		priv[0] __aligned(sizeof(s64));  };  /* convert between an HCD pointer and the corresponding EHCI_HCD */ diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c index a3b6d7104ae..4c338ec03a0 100644 --- a/drivers/usb/host/pci-quirks.c +++ b/drivers/usb/host/pci-quirks.c @@ -780,6 +780,7 @@ void usb_enable_xhci_ports(struct pci_dev *xhci_pdev)  				"defaulting to EHCI.\n");  		dev_warn(&xhci_pdev->dev,  				"USB 3.0 devices will work at USB 2.0 speeds.\n"); +		usb_disable_xhci_ports(xhci_pdev);  		return;  	} diff --git a/drivers/usb/host/uhci-hcd.c b/drivers/usb/host/uhci-hcd.c index 4b9e9aba266..4f64d24eebc 100644 --- a/drivers/usb/host/uhci-hcd.c +++ b/drivers/usb/host/uhci-hcd.c @@ -447,6 +447,10 @@ static irqreturn_t uhci_irq(struct usb_hcd *hcd)  		return IRQ_NONE;  	uhci_writew(uhci, status, USBSTS);		/* Clear it */ +	spin_lock(&uhci->lock); +	if (unlikely(!uhci->is_initialized))	/* not yet configured */ +		goto done; +  	if (status & ~(USBSTS_USBINT | USBSTS_ERROR | USBSTS_RD)) {  		if (status & USBSTS_HSE)  			dev_err(uhci_dev(uhci), "host system error, " @@ -455,7 +459,6 @@ static irqreturn_t uhci_irq(struct usb_hcd *hcd)  			dev_err(uhci_dev(uhci), "host controller process "  					"error, something bad happened!\n");  		if (status & USBSTS_HCH) { -			spin_lock(&uhci->lock);  			if (uhci->rh_state >= UHCI_RH_RUNNING) {  				dev_err(uhci_dev(uhci),  					"host controller halted, " @@ -473,15 +476,15 @@ static irqreturn_t uhci_irq(struct usb_hcd *hcd)  				 * pending unlinks */  				mod_timer(&hcd->rh_timer, jiffies);  			} -			spin_unlock(&uhci->lock);  		}  	} -	if (status & USBSTS_RD) +	if (status & USBSTS_RD) { +		spin_unlock(&uhci->lock);  		usb_hcd_poll_rh_status(hcd); -	else { -		spin_lock(&uhci->lock); +	} else {  		uhci_scan_schedule(uhci); + done:  		spin_unlock(&uhci->lock);  	} @@ -662,9 +665,9 @@ static int uhci_start(struct usb_hcd *hcd)  	 */  	mb(); +	spin_lock_irq(&uhci->lock);  	configure_hc(uhci);  	uhci->is_initialized = 1; -	spin_lock_irq(&uhci->lock);  	start_rh(uhci);  	spin_unlock_irq(&uhci->lock);  	return 0; diff --git a/drivers/usb/host/uhci-hub.c b/drivers/usb/host/uhci-hub.c index 768d54295a2..15d13229ddb 100644 --- a/drivers/usb/host/uhci-hub.c +++ b/drivers/usb/host/uhci-hub.c @@ -116,6 +116,7 @@ static void uhci_finish_suspend(struct uhci_hcd *uhci, int port,  		}  	}  	clear_bit(port, &uhci->resuming_ports); +	usb_hcd_end_port_resume(&uhci_to_hcd(uhci)->self, port);  }  /* Wait for the UHCI controller in HP's iLO2 server management chip. @@ -167,6 +168,8 @@ static void uhci_check_ports(struct uhci_hcd *uhci)  				set_bit(port, &uhci->resuming_ports);  				uhci->ports_timeout = jiffies +  						msecs_to_jiffies(25); +				usb_hcd_start_port_resume( +						&uhci_to_hcd(uhci)->self, port);  				/* Make sure we see the port again  				 * after the resuming period is over. */ diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index 59fb5c677db..7f76a49e90d 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c @@ -1698,7 +1698,7 @@ static void handle_port_status(struct xhci_hcd *xhci,  				faked_port_index + 1);  		if (slot_id && xhci->devs[slot_id])  			xhci_ring_device(xhci, slot_id); -		if (bus_state->port_remote_wakeup && (1 << faked_port_index)) { +		if (bus_state->port_remote_wakeup & (1 << faked_port_index)) {  			bus_state->port_remote_wakeup &=  				~(1 << faked_port_index);  			xhci_test_and_clear_bit(xhci, port_array, @@ -2589,6 +2589,8 @@ cleanup:  				(trb_comp_code != COMP_STALL &&  					trb_comp_code != COMP_BABBLE))  				xhci_urb_free_priv(xhci, urb_priv); +			else +				kfree(urb_priv);  			usb_hcd_unlink_urb_from_ep(bus_to_hcd(urb->dev->bus), urb);  			if ((urb->actual_length != urb->transfer_buffer_length && @@ -3108,7 +3110,7 @@ static u32 xhci_v1_0_td_remainder(int running_total, int trb_buff_len,  	 * running_total.  	 */  	packets_transferred = (running_total + trb_buff_len) / -		usb_endpoint_maxp(&urb->ep->desc); +		GET_MAX_PACKET(usb_endpoint_maxp(&urb->ep->desc));  	if ((total_packet_count - packets_transferred) > 31)  		return 31 << 17; @@ -3642,7 +3644,8 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,  		td_len = urb->iso_frame_desc[i].length;  		td_remain_len = td_len;  		total_packet_count = DIV_ROUND_UP(td_len, -				usb_endpoint_maxp(&urb->ep->desc)); +				GET_MAX_PACKET( +					usb_endpoint_maxp(&urb->ep->desc)));  		/* A zero-length transfer still involves at least one packet. */  		if (total_packet_count == 0)  			total_packet_count++; @@ -3664,9 +3667,11 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,  		td = urb_priv->td[i];  		for (j = 0; j < trbs_per_td; j++) {  			u32 remainder = 0; -			field = TRB_TBC(burst_count) | TRB_TLBPC(residue); +			field = 0;  			if (first_trb) { +				field = TRB_TBC(burst_count) | +					TRB_TLBPC(residue);  				/* Queue the isoc TRB */  				field |= TRB_TYPE(TRB_ISOC);  				/* Assume URB_ISO_ASAP is set */ diff --git a/drivers/usb/musb/cppi_dma.c b/drivers/usb/musb/cppi_dma.c index 0968dd7a859..f522000e8f0 100644 --- a/drivers/usb/musb/cppi_dma.c +++ b/drivers/usb/musb/cppi_dma.c @@ -105,7 +105,7 @@ static void cppi_reset_tx(struct cppi_tx_stateram __iomem *tx, u32 ptr)  	musb_writel(&tx->tx_complete, 0, ptr);  } -static void __init cppi_pool_init(struct cppi *cppi, struct cppi_channel *c) +static void cppi_pool_init(struct cppi *cppi, struct cppi_channel *c)  {  	int	j; @@ -150,7 +150,7 @@ static void cppi_pool_free(struct cppi_channel *c)  	c->last_processed = NULL;  } -static int __init cppi_controller_start(struct dma_controller *c) +static int cppi_controller_start(struct dma_controller *c)  {  	struct cppi	*controller;  	void __iomem	*tibase; diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c index f14736f647f..edc0f0dcad8 100644 --- a/drivers/usb/serial/cp210x.c +++ b/drivers/usb/serial/cp210x.c @@ -60,6 +60,7 @@ static const struct usb_device_id id_table[] = {  	{ USB_DEVICE(0x0FCF, 0x1003) }, /* Dynastream ANT development board */  	{ USB_DEVICE(0x0FCF, 0x1004) }, /* Dynastream ANT2USB */  	{ USB_DEVICE(0x0FCF, 0x1006) }, /* Dynastream ANT development board */ +	{ USB_DEVICE(0x0FDE, 0xCA05) }, /* OWL Wireless Electricity Monitor CM-160 */  	{ USB_DEVICE(0x10A6, 0xAA26) }, /* Knock-off DCU-11 cable */  	{ USB_DEVICE(0x10AB, 0x10C5) }, /* Siemens MC60 Cable */  	{ USB_DEVICE(0x10B5, 0xAC70) }, /* Nokia CA-42 USB */ diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c index ba68835d06a..90ceef1776c 100644 --- a/drivers/usb/serial/ftdi_sio.c +++ b/drivers/usb/serial/ftdi_sio.c @@ -584,6 +584,7 @@ static struct usb_device_id id_table_combined [] = {  	/*  	 * ELV devices:  	 */ +	{ USB_DEVICE(FTDI_ELV_VID, FTDI_ELV_WS300_PID) },  	{ USB_DEVICE(FTDI_VID, FTDI_ELV_USR_PID) },  	{ USB_DEVICE(FTDI_VID, FTDI_ELV_MSM1_PID) },  	{ USB_DEVICE(FTDI_VID, FTDI_ELV_KL100_PID) }, @@ -670,6 +671,7 @@ static struct usb_device_id id_table_combined [] = {  	{ USB_DEVICE(FTDI_VID, XSENS_CONVERTER_5_PID) },  	{ USB_DEVICE(FTDI_VID, XSENS_CONVERTER_6_PID) },  	{ USB_DEVICE(FTDI_VID, XSENS_CONVERTER_7_PID) }, +	{ USB_DEVICE(FTDI_VID, FTDI_OMNI1509) },  	{ USB_DEVICE(MOBILITY_VID, MOBILITY_USB_SERIAL_PID) },  	{ USB_DEVICE(FTDI_VID, FTDI_ACTIVE_ROBOTS_PID) },  	{ USB_DEVICE(FTDI_VID, FTDI_MHAM_KW_PID) }, diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h index fa5d5603827..9d359e189a6 100644 --- a/drivers/usb/serial/ftdi_sio_ids.h +++ b/drivers/usb/serial/ftdi_sio_ids.h @@ -147,6 +147,11 @@  #define XSENS_CONVERTER_6_PID	0xD38E  #define XSENS_CONVERTER_7_PID	0xD38F +/** + * Zolix (www.zolix.com.cb) product ids + */ +#define FTDI_OMNI1509			0xD491	/* Omni1509 embedded USB-serial */ +  /*   * NDI (www.ndigital.com) product ids   */ @@ -204,7 +209,7 @@  /*   * ELV USB devices submitted by Christian Abt of ELV (www.elv.de). - * All of these devices use FTDI's vendor ID (0x0403). + * Almost all of these devices use FTDI's vendor ID (0x0403).   * Further IDs taken from ELV Windows .inf file.   *   * The previously included PID for the UO 100 module was incorrect. @@ -212,6 +217,8 @@   *   * Armin Laeuger originally sent the PID for the UM 100 module.   */ +#define FTDI_ELV_VID	0x1B1F	/* ELV AG */ +#define FTDI_ELV_WS300_PID	0xC006	/* eQ3 WS 300 PC II */  #define FTDI_ELV_USR_PID	0xE000	/* ELV Universal-Sound-Recorder */  #define FTDI_ELV_MSM1_PID	0xE001	/* ELV Mini-Sound-Modul */  #define FTDI_ELV_KL100_PID	0xE002	/* ELV Kfz-Leistungsmesser KL 100 */ diff --git a/drivers/usb/serial/io_ti.c b/drivers/usb/serial/io_ti.c index 58184f3de68..82afc4d6a32 100644 --- a/drivers/usb/serial/io_ti.c +++ b/drivers/usb/serial/io_ti.c @@ -530,6 +530,9 @@ static void chase_port(struct edgeport_port *port, unsigned long timeout,  	wait_queue_t wait;  	unsigned long flags; +	if (!tty) +		return; +  	if (!timeout)  		timeout = (HZ * EDGE_CLOSING_WAIT)/100; diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index 478adcfcdf2..567bc77d639 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c @@ -242,6 +242,7 @@ static void option_instat_callback(struct urb *urb);  #define TELIT_PRODUCT_CC864_DUAL		0x1005  #define TELIT_PRODUCT_CC864_SINGLE		0x1006  #define TELIT_PRODUCT_DE910_DUAL		0x1010 +#define TELIT_PRODUCT_LE920			0x1200  /* ZTE PRODUCTS */  #define ZTE_VENDOR_ID				0x19d2 @@ -449,6 +450,14 @@ static void option_instat_callback(struct urb *urb);  #define PETATEL_VENDOR_ID			0x1ff4  #define PETATEL_PRODUCT_NP10T			0x600e +/* TP-LINK Incorporated products */ +#define TPLINK_VENDOR_ID			0x2357 +#define TPLINK_PRODUCT_MA180			0x0201 + +/* Changhong products */ +#define CHANGHONG_VENDOR_ID			0x2077 +#define CHANGHONG_PRODUCT_CH690			0x7001 +  /* some devices interfaces need special handling due to a number of reasons */  enum option_blacklist_reason {  		OPTION_BLACKLIST_NONE = 0, @@ -530,6 +539,11 @@ static const struct option_blacklist_info zte_1255_blacklist = {  	.reserved = BIT(3) | BIT(4),  }; +static const struct option_blacklist_info telit_le920_blacklist = { +	.sendsetup = BIT(0), +	.reserved = BIT(1) | BIT(5), +}; +  static const struct usb_device_id option_ids[] = {  	{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) },  	{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA) }, @@ -780,6 +794,8 @@ static const struct usb_device_id option_ids[] = {  	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_CC864_DUAL) },  	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_CC864_SINGLE) },  	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_DE910_DUAL) }, +	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920), +		.driver_info = (kernel_ulong_t)&telit_le920_blacklist },  	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 0xff, 0xff) }, /* ZTE WCDMA products */  	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0002, 0xff, 0xff, 0xff),  		.driver_info = (kernel_ulong_t)&net_intf1_blacklist }, @@ -930,7 +946,8 @@ static const struct usb_device_id option_ids[] = {  	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0254, 0xff, 0xff, 0xff) },  	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0257, 0xff, 0xff, 0xff), /* ZTE MF821 */  	  .driver_info = (kernel_ulong_t)&net_intf3_blacklist }, -	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0265, 0xff, 0xff, 0xff) }, +	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0265, 0xff, 0xff, 0xff), /* ONDA MT8205 */ +	  .driver_info = (kernel_ulong_t)&net_intf4_blacklist },  	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0284, 0xff, 0xff, 0xff), /* ZTE MF880 */  	  .driver_info = (kernel_ulong_t)&net_intf4_blacklist },  	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0317, 0xff, 0xff, 0xff) }, @@ -1311,6 +1328,9 @@ static const struct usb_device_id option_ids[] = {  	{ USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_4COM2, 0xff, 0x00, 0x00) },  	{ USB_DEVICE(CELLIENT_VENDOR_ID, CELLIENT_PRODUCT_MEN200) },  	{ USB_DEVICE(PETATEL_VENDOR_ID, PETATEL_PRODUCT_NP10T) }, +	{ USB_DEVICE(TPLINK_VENDOR_ID, TPLINK_PRODUCT_MA180), +	  .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, +	{ USB_DEVICE(CHANGHONG_VENDOR_ID, CHANGHONG_PRODUCT_CH690) },  	{ } /* Terminating entry */  };  MODULE_DEVICE_TABLE(usb, option_ids); diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c index aa148c21ea4..24662547dc5 100644 --- a/drivers/usb/serial/qcserial.c +++ b/drivers/usb/serial/qcserial.c @@ -53,6 +53,7 @@ static const struct usb_device_id id_table[] = {  	{DEVICE_G1K(0x05c6, 0x9221)},	/* Generic Gobi QDL device */  	{DEVICE_G1K(0x05c6, 0x9231)},	/* Generic Gobi QDL device */  	{DEVICE_G1K(0x1f45, 0x0001)},	/* Unknown Gobi QDL device */ +	{DEVICE_G1K(0x1bc7, 0x900e)},	/* Telit Gobi QDL device */  	/* Gobi 2000 devices */  	{USB_DEVICE(0x1410, 0xa010)},	/* Novatel Gobi 2000 QDL device */ diff --git a/drivers/usb/storage/initializers.c b/drivers/usb/storage/initializers.c index 105d900150c..16b0bf055ee 100644 --- a/drivers/usb/storage/initializers.c +++ b/drivers/usb/storage/initializers.c @@ -92,8 +92,8 @@ int usb_stor_ucr61s2b_init(struct us_data *us)  	return 0;  } -/* This places the HUAWEI E220 devices in multi-port mode */ -int usb_stor_huawei_e220_init(struct us_data *us) +/* This places the HUAWEI usb dongles in multi-port mode */ +static int usb_stor_huawei_feature_init(struct us_data *us)  {  	int result; @@ -104,3 +104,75 @@ int usb_stor_huawei_e220_init(struct us_data *us)  	US_DEBUGP("Huawei mode set result is %d\n", result);  	return 0;  } + +/* + * It will send a scsi switch command called rewind' to huawei dongle. + * When the dongle receives this command at the first time, + * it will reboot immediately. After rebooted, it will ignore this command. + * So it is  unnecessary to read its response. + */ +static int usb_stor_huawei_scsi_init(struct us_data *us) +{ +	int result = 0; +	int act_len = 0; +	struct bulk_cb_wrap *bcbw = (struct bulk_cb_wrap *) us->iobuf; +	char rewind_cmd[] = {0x11, 0x06, 0x20, 0x00, 0x00, 0x01, 0x01, 0x00, +			0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}; + +	bcbw->Signature = cpu_to_le32(US_BULK_CB_SIGN); +	bcbw->Tag = 0; +	bcbw->DataTransferLength = 0; +	bcbw->Flags = bcbw->Lun = 0; +	bcbw->Length = sizeof(rewind_cmd); +	memset(bcbw->CDB, 0, sizeof(bcbw->CDB)); +	memcpy(bcbw->CDB, rewind_cmd, sizeof(rewind_cmd)); + +	result = usb_stor_bulk_transfer_buf(us, us->send_bulk_pipe, bcbw, +					US_BULK_CB_WRAP_LEN, &act_len); +	US_DEBUGP("transfer actual length=%d, result=%d\n", act_len, result); +	return result; +} + +/* + * It tries to find the supported Huawei USB dongles. + * In Huawei, they assign the following product IDs + * for all of their mobile broadband dongles, + * including the new dongles in the future. + * So if the product ID is not included in this list, + * it means it is not Huawei's mobile broadband dongles. + */ +static int usb_stor_huawei_dongles_pid(struct us_data *us) +{ +	struct usb_interface_descriptor *idesc; +	int idProduct; + +	idesc = &us->pusb_intf->cur_altsetting->desc; +	idProduct = us->pusb_dev->descriptor.idProduct; +	/* The first port is CDROM, +	 * means the dongle in the single port mode, +	 * and a switch command is required to be sent. */ +	if (idesc && idesc->bInterfaceNumber == 0) { +		if ((idProduct == 0x1001) +			|| (idProduct == 0x1003) +			|| (idProduct == 0x1004) +			|| (idProduct >= 0x1401 && idProduct <= 0x1500) +			|| (idProduct >= 0x1505 && idProduct <= 0x1600) +			|| (idProduct >= 0x1c02 && idProduct <= 0x2202)) { +			return 1; +		} +	} +	return 0; +} + +int usb_stor_huawei_init(struct us_data *us) +{ +	int result = 0; + +	if (usb_stor_huawei_dongles_pid(us)) { +		if (us->pusb_dev->descriptor.idProduct >= 0x1446) +			result = usb_stor_huawei_scsi_init(us); +		else +			result = usb_stor_huawei_feature_init(us); +	} +	return result; +} diff --git a/drivers/usb/storage/initializers.h b/drivers/usb/storage/initializers.h index 529327fbb06..5376d4fc76f 100644 --- a/drivers/usb/storage/initializers.h +++ b/drivers/usb/storage/initializers.h @@ -46,5 +46,5 @@ int usb_stor_euscsi_init(struct us_data *us);   * flash reader */  int usb_stor_ucr61s2b_init(struct us_data *us); -/* This places the HUAWEI E220 devices in multi-port mode */ -int usb_stor_huawei_e220_init(struct us_data *us); +/* This places the HUAWEI usb dongles in multi-port mode */ +int usb_stor_huawei_init(struct us_data *us); diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h index d305a5aa3a5..72923b56bbf 100644 --- a/drivers/usb/storage/unusual_devs.h +++ b/drivers/usb/storage/unusual_devs.h @@ -1527,335 +1527,10 @@ UNUSUAL_DEV(  0x1210, 0x0003, 0x0100, 0x0100,  /* Reported by fangxiaozhi <huananhu@huawei.com>   * This brings the HUAWEI data card devices into multi-port mode   */ -UNUSUAL_DEV(  0x12d1, 0x1001, 0x0000, 0x0000, +UNUSUAL_VENDOR_INTF(0x12d1, 0x08, 0x06, 0x50,  		"HUAWEI MOBILE",  		"Mass Storage", -		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, -		0), -UNUSUAL_DEV(  0x12d1, 0x1003, 0x0000, 0x0000, -		"HUAWEI MOBILE", -		"Mass Storage", -		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, -		0), -UNUSUAL_DEV(  0x12d1, 0x1004, 0x0000, 0x0000, -		"HUAWEI MOBILE", -		"Mass Storage", -		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, -		0), -UNUSUAL_DEV(  0x12d1, 0x1401, 0x0000, 0x0000, -		"HUAWEI MOBILE", -		"Mass Storage", -		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, -		0), -UNUSUAL_DEV(  0x12d1, 0x1402, 0x0000, 0x0000, -		"HUAWEI MOBILE", -		"Mass Storage", -		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, -		0), -UNUSUAL_DEV(  0x12d1, 0x1403, 0x0000, 0x0000, -		"HUAWEI MOBILE", -		"Mass Storage", -		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, -		0), -UNUSUAL_DEV(  0x12d1, 0x1404, 0x0000, 0x0000, -		"HUAWEI MOBILE", -		"Mass Storage", -		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, -		0), -UNUSUAL_DEV(  0x12d1, 0x1405, 0x0000, 0x0000, -		"HUAWEI MOBILE", -		"Mass Storage", -		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, -		0), -UNUSUAL_DEV(  0x12d1, 0x1406, 0x0000, 0x0000, -		"HUAWEI MOBILE", -		"Mass Storage", -		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, -		0), -UNUSUAL_DEV(  0x12d1, 0x1407, 0x0000, 0x0000, -		"HUAWEI MOBILE", -		"Mass Storage", -		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, -		0), -UNUSUAL_DEV(  0x12d1, 0x1408, 0x0000, 0x0000, -		"HUAWEI MOBILE", -		"Mass Storage", -		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, -		0), -UNUSUAL_DEV(  0x12d1, 0x1409, 0x0000, 0x0000, -		"HUAWEI MOBILE", -		"Mass Storage", -		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, -		0), -UNUSUAL_DEV(  0x12d1, 0x140A, 0x0000, 0x0000, -		"HUAWEI MOBILE", -		"Mass Storage", -		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, -		0), -UNUSUAL_DEV(  0x12d1, 0x140B, 0x0000, 0x0000, -		"HUAWEI MOBILE", -		"Mass Storage", -		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, -		0), -UNUSUAL_DEV(  0x12d1, 0x140C, 0x0000, 0x0000, -		"HUAWEI MOBILE", -		"Mass Storage", -		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, -		0), -UNUSUAL_DEV(  0x12d1, 0x140D, 0x0000, 0x0000, -		"HUAWEI MOBILE", -		"Mass Storage", -		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, -		0), -UNUSUAL_DEV(  0x12d1, 0x140E, 0x0000, 0x0000, -		"HUAWEI MOBILE", -		"Mass Storage", -		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, -		0), -UNUSUAL_DEV(  0x12d1, 0x140F, 0x0000, 0x0000, -		"HUAWEI MOBILE", -		"Mass Storage", -		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, -		0), -UNUSUAL_DEV(  0x12d1, 0x1410, 0x0000, 0x0000, -		"HUAWEI MOBILE", -		"Mass Storage", -		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, -		0), -UNUSUAL_DEV(  0x12d1, 0x1411, 0x0000, 0x0000, -		"HUAWEI MOBILE", -		"Mass Storage", -		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, -		0), -UNUSUAL_DEV(  0x12d1, 0x1412, 0x0000, 0x0000, -		"HUAWEI MOBILE", -		"Mass Storage", -		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, -		0), -UNUSUAL_DEV(  0x12d1, 0x1413, 0x0000, 0x0000, -		"HUAWEI MOBILE", -		"Mass Storage", -		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, -		0), -UNUSUAL_DEV(  0x12d1, 0x1414, 0x0000, 0x0000, -		"HUAWEI MOBILE", -		"Mass Storage", -		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, -		0), -UNUSUAL_DEV(  0x12d1, 0x1415, 0x0000, 0x0000, -		"HUAWEI MOBILE", -		"Mass Storage", -		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, -		0), -UNUSUAL_DEV(  0x12d1, 0x1416, 0x0000, 0x0000, -		"HUAWEI MOBILE", -		"Mass Storage", -		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, -		0), -UNUSUAL_DEV(  0x12d1, 0x1417, 0x0000, 0x0000, -		"HUAWEI MOBILE", -		"Mass Storage", -		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, -		0), -UNUSUAL_DEV(  0x12d1, 0x1418, 0x0000, 0x0000, -		"HUAWEI MOBILE", -		"Mass Storage", -		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, -		0), -UNUSUAL_DEV(  0x12d1, 0x1419, 0x0000, 0x0000, -		"HUAWEI MOBILE", -		"Mass Storage", -		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, -		0), -UNUSUAL_DEV(  0x12d1, 0x141A, 0x0000, 0x0000, -		"HUAWEI MOBILE", -		"Mass Storage", -		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, -		0), -UNUSUAL_DEV(  0x12d1, 0x141B, 0x0000, 0x0000, -		"HUAWEI MOBILE", -		"Mass Storage", -		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, -		0), -UNUSUAL_DEV(  0x12d1, 0x141C, 0x0000, 0x0000, -		"HUAWEI MOBILE", -		"Mass Storage", -		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, -		0), -UNUSUAL_DEV(  0x12d1, 0x141D, 0x0000, 0x0000, -		"HUAWEI MOBILE", -		"Mass Storage", -		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, -		0), -UNUSUAL_DEV(  0x12d1, 0x141E, 0x0000, 0x0000, -		"HUAWEI MOBILE", -		"Mass Storage", -		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, -		0), -UNUSUAL_DEV(  0x12d1, 0x141F, 0x0000, 0x0000, -		"HUAWEI MOBILE", -		"Mass Storage", -		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, -		0), -UNUSUAL_DEV(  0x12d1, 0x1420, 0x0000, 0x0000, -		"HUAWEI MOBILE", -		"Mass Storage", -		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, -		0), -UNUSUAL_DEV(  0x12d1, 0x1421, 0x0000, 0x0000, -		"HUAWEI MOBILE", -		"Mass Storage", -		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, -		0), -UNUSUAL_DEV(  0x12d1, 0x1422, 0x0000, 0x0000, -		"HUAWEI MOBILE", -		"Mass Storage", -		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, -		0), -UNUSUAL_DEV(  0x12d1, 0x1423, 0x0000, 0x0000, -		"HUAWEI MOBILE", -		"Mass Storage", -		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, -		0), -UNUSUAL_DEV(  0x12d1, 0x1424, 0x0000, 0x0000, -		"HUAWEI MOBILE", -		"Mass Storage", -		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, -		0), -UNUSUAL_DEV(  0x12d1, 0x1425, 0x0000, 0x0000, -		"HUAWEI MOBILE", -		"Mass Storage", -		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, -		0), -UNUSUAL_DEV(  0x12d1, 0x1426, 0x0000, 0x0000, -		"HUAWEI MOBILE", -		"Mass Storage", -		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, -		0), -UNUSUAL_DEV(  0x12d1, 0x1427, 0x0000, 0x0000, -		"HUAWEI MOBILE", -		"Mass Storage", -		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, -		0), -UNUSUAL_DEV(  0x12d1, 0x1428, 0x0000, 0x0000, -		"HUAWEI MOBILE", -		"Mass Storage", -		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, -		0), -UNUSUAL_DEV(  0x12d1, 0x1429, 0x0000, 0x0000, -		"HUAWEI MOBILE", -		"Mass Storage", -		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, -		0), -UNUSUAL_DEV(  0x12d1, 0x142A, 0x0000, 0x0000, -		"HUAWEI MOBILE", -		"Mass Storage", -		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, -		0), -UNUSUAL_DEV(  0x12d1, 0x142B, 0x0000, 0x0000, -		"HUAWEI MOBILE", -		"Mass Storage", -		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, -		0), -UNUSUAL_DEV(  0x12d1, 0x142C, 0x0000, 0x0000, -		"HUAWEI MOBILE", -		"Mass Storage", -		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, -		0), -UNUSUAL_DEV(  0x12d1, 0x142D, 0x0000, 0x0000, -		"HUAWEI MOBILE", -		"Mass Storage", -		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, -		0), -UNUSUAL_DEV(  0x12d1, 0x142E, 0x0000, 0x0000, -		"HUAWEI MOBILE", -		"Mass Storage", -		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, -		0), -UNUSUAL_DEV(  0x12d1, 0x142F, 0x0000, 0x0000, -		"HUAWEI MOBILE", -		"Mass Storage", -		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, -		0), -UNUSUAL_DEV(  0x12d1, 0x1430, 0x0000, 0x0000, -		"HUAWEI MOBILE", -		"Mass Storage", -		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, -		0), -UNUSUAL_DEV(  0x12d1, 0x1431, 0x0000, 0x0000, -		"HUAWEI MOBILE", -		"Mass Storage", -		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, -		0), -UNUSUAL_DEV(  0x12d1, 0x1432, 0x0000, 0x0000, -		"HUAWEI MOBILE", -		"Mass Storage", -		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, -		0), -UNUSUAL_DEV(  0x12d1, 0x1433, 0x0000, 0x0000, -		"HUAWEI MOBILE", -		"Mass Storage", -		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, -		0), -UNUSUAL_DEV(  0x12d1, 0x1434, 0x0000, 0x0000, -		"HUAWEI MOBILE", -		"Mass Storage", -		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, -		0), -UNUSUAL_DEV(  0x12d1, 0x1435, 0x0000, 0x0000, -		"HUAWEI MOBILE", -		"Mass Storage", -		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, -		0), -UNUSUAL_DEV(  0x12d1, 0x1436, 0x0000, 0x0000, -		"HUAWEI MOBILE", -		"Mass Storage", -		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, -		0), -UNUSUAL_DEV(  0x12d1, 0x1437, 0x0000, 0x0000, -		"HUAWEI MOBILE", -		"Mass Storage", -		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, -		0), -UNUSUAL_DEV(  0x12d1, 0x1438, 0x0000, 0x0000, -		"HUAWEI MOBILE", -		"Mass Storage", -		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, -		0), -UNUSUAL_DEV(  0x12d1, 0x1439, 0x0000, 0x0000, -		"HUAWEI MOBILE", -		"Mass Storage", -		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, -		0), -UNUSUAL_DEV(  0x12d1, 0x143A, 0x0000, 0x0000, -		"HUAWEI MOBILE", -		"Mass Storage", -		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, -		0), -UNUSUAL_DEV(  0x12d1, 0x143B, 0x0000, 0x0000, -		"HUAWEI MOBILE", -		"Mass Storage", -		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, -		0), -UNUSUAL_DEV(  0x12d1, 0x143C, 0x0000, 0x0000, -		"HUAWEI MOBILE", -		"Mass Storage", -		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, -		0), -UNUSUAL_DEV(  0x12d1, 0x143D, 0x0000, 0x0000, -		"HUAWEI MOBILE", -		"Mass Storage", -		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, -		0), -UNUSUAL_DEV(  0x12d1, 0x143E, 0x0000, 0x0000, -		"HUAWEI MOBILE", -		"Mass Storage", -		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, -		0), -UNUSUAL_DEV(  0x12d1, 0x143F, 0x0000, 0x0000, -		"HUAWEI MOBILE", -		"Mass Storage", -		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, +		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_init,  		0),  /* Reported by Vilius Bilinkevicius <vilisas AT xxx DOT lt) */ diff --git a/drivers/usb/storage/usb.c b/drivers/usb/storage/usb.c index 31b3e1a61bb..cf09b6ba71f 100644 --- a/drivers/usb/storage/usb.c +++ b/drivers/usb/storage/usb.c @@ -120,6 +120,17 @@ MODULE_PARM_DESC(quirks, "supplemental list of device IDs and their quirks");  	.useTransport = use_transport,	\  } +#define UNUSUAL_VENDOR_INTF(idVendor, cl, sc, pr, \ +		vendor_name, product_name, use_protocol, use_transport, \ +		init_function, Flags) \ +{ \ +	.vendorName = vendor_name,	\ +	.productName = product_name,	\ +	.useProtocol = use_protocol,	\ +	.useTransport = use_transport,	\ +	.initFunction = init_function,	\ +} +  static struct us_unusual_dev us_unusual_dev_list[] = {  #	include "unusual_devs.h"  	{ }		/* Terminating entry */ @@ -131,6 +142,7 @@ static struct us_unusual_dev for_dynamic_ids =  #undef UNUSUAL_DEV  #undef COMPLIANT_DEV  #undef USUAL_DEV +#undef UNUSUAL_VENDOR_INTF  #ifdef CONFIG_LOCKDEP diff --git a/drivers/usb/storage/usual-tables.c b/drivers/usb/storage/usual-tables.c index b78a526910f..5ef8ce74aae 100644 --- a/drivers/usb/storage/usual-tables.c +++ b/drivers/usb/storage/usual-tables.c @@ -41,6 +41,20 @@  #define USUAL_DEV(useProto, useTrans) \  { USB_INTERFACE_INFO(USB_CLASS_MASS_STORAGE, useProto, useTrans) } +/* Define the device is matched with Vendor ID and interface descriptors */ +#define UNUSUAL_VENDOR_INTF(id_vendor, cl, sc, pr, \ +			vendorName, productName, useProtocol, useTransport, \ +			initFunction, flags) \ +{ \ +	.match_flags = USB_DEVICE_ID_MATCH_INT_INFO \ +				| USB_DEVICE_ID_MATCH_VENDOR, \ +	.idVendor    = (id_vendor), \ +	.bInterfaceClass = (cl), \ +	.bInterfaceSubClass = (sc), \ +	.bInterfaceProtocol = (pr), \ +	.driver_info = (flags) \ +} +  struct usb_device_id usb_storage_usb_ids[] = {  #	include "unusual_devs.h"  	{ }		/* Terminating entry */ @@ -50,6 +64,7 @@ MODULE_DEVICE_TABLE(usb, usb_storage_usb_ids);  #undef UNUSUAL_DEV  #undef COMPLIANT_DEV  #undef USUAL_DEV +#undef UNUSUAL_VENDOR_INTF  /*   * The table of devices to ignore diff --git a/drivers/vfio/pci/vfio_pci_rdwr.c b/drivers/vfio/pci/vfio_pci_rdwr.c index 4362d9e7baa..f72323ef618 100644 --- a/drivers/vfio/pci/vfio_pci_rdwr.c +++ b/drivers/vfio/pci/vfio_pci_rdwr.c @@ -240,17 +240,17 @@ ssize_t vfio_pci_mem_readwrite(struct vfio_pci_device *vdev, char __user *buf,  			filled = 1;  		} else {  			/* Drop writes, fill reads with FF */ +			filled = min((size_t)(x_end - pos), count);  			if (!iswrite) {  				char val = 0xFF;  				size_t i; -				for (i = 0; i < x_end - pos; i++) { +				for (i = 0; i < filled; i++) {  					if (put_user(val, buf + i))  						goto out;  				}  			} -			filled = x_end - pos;  		}  		count -= filled; diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c index ebd08b21b23..959b1cd89e6 100644 --- a/drivers/vhost/net.c +++ b/drivers/vhost/net.c @@ -165,12 +165,16 @@ static void tx_poll_stop(struct vhost_net *net)  }  /* Caller must have TX VQ lock */ -static void tx_poll_start(struct vhost_net *net, struct socket *sock) +static int tx_poll_start(struct vhost_net *net, struct socket *sock)  { +	int ret; +  	if (unlikely(net->tx_poll_state != VHOST_NET_POLL_STOPPED)) -		return; -	vhost_poll_start(net->poll + VHOST_NET_VQ_TX, sock->file); -	net->tx_poll_state = VHOST_NET_POLL_STARTED; +		return 0; +	ret = vhost_poll_start(net->poll + VHOST_NET_VQ_TX, sock->file); +	if (!ret) +		net->tx_poll_state = VHOST_NET_POLL_STARTED; +	return ret;  }  /* In case of DMA done not in order in lower device driver for some reason. @@ -642,20 +646,23 @@ static void vhost_net_disable_vq(struct vhost_net *n,  		vhost_poll_stop(n->poll + VHOST_NET_VQ_RX);  } -static void vhost_net_enable_vq(struct vhost_net *n, +static int vhost_net_enable_vq(struct vhost_net *n,  				struct vhost_virtqueue *vq)  {  	struct socket *sock; +	int ret;  	sock = rcu_dereference_protected(vq->private_data,  					 lockdep_is_held(&vq->mutex));  	if (!sock) -		return; +		return 0;  	if (vq == n->vqs + VHOST_NET_VQ_TX) {  		n->tx_poll_state = VHOST_NET_POLL_STOPPED; -		tx_poll_start(n, sock); +		ret = tx_poll_start(n, sock);  	} else -		vhost_poll_start(n->poll + VHOST_NET_VQ_RX, sock->file); +		ret = vhost_poll_start(n->poll + VHOST_NET_VQ_RX, sock->file); + +	return ret;  }  static struct socket *vhost_net_stop_vq(struct vhost_net *n, @@ -827,15 +834,18 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd)  			r = PTR_ERR(ubufs);  			goto err_ubufs;  		} -		oldubufs = vq->ubufs; -		vq->ubufs = ubufs; +  		vhost_net_disable_vq(n, vq);  		rcu_assign_pointer(vq->private_data, sock); -		vhost_net_enable_vq(n, vq); -  		r = vhost_init_used(vq);  		if (r) -			goto err_vq; +			goto err_used; +		r = vhost_net_enable_vq(n, vq); +		if (r) +			goto err_used; + +		oldubufs = vq->ubufs; +		vq->ubufs = ubufs;  		n->tx_packets = 0;  		n->tx_zcopy_err = 0; @@ -859,6 +869,11 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd)  	mutex_unlock(&n->dev.mutex);  	return 0; +err_used: +	rcu_assign_pointer(vq->private_data, oldsock); +	vhost_net_enable_vq(n, vq); +	if (ubufs) +		vhost_ubuf_put_and_wait(ubufs);  err_ubufs:  	fput(sock->file);  err_vq: diff --git a/drivers/vhost/tcm_vhost.c b/drivers/vhost/tcm_vhost.c index b20df5c829f..22321cf84fb 100644 --- a/drivers/vhost/tcm_vhost.c +++ b/drivers/vhost/tcm_vhost.c @@ -575,10 +575,8 @@ static void vhost_scsi_handle_vq(struct vhost_scsi *vs)  	/* Must use ioctl VHOST_SCSI_SET_ENDPOINT */  	tv_tpg = vs->vs_tpg; -	if (unlikely(!tv_tpg)) { -		pr_err("%s endpoint not set\n", __func__); +	if (unlikely(!tv_tpg))  		return; -	}  	mutex_lock(&vq->mutex);  	vhost_disable_notify(&vs->dev, vq); diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c index 34389f75fe6..9759249e6d9 100644 --- a/drivers/vhost/vhost.c +++ b/drivers/vhost/vhost.c @@ -77,26 +77,38 @@ void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn,  	init_poll_funcptr(&poll->table, vhost_poll_func);  	poll->mask = mask;  	poll->dev = dev; +	poll->wqh = NULL;  	vhost_work_init(&poll->work, fn);  }  /* Start polling a file. We add ourselves to file's wait queue. The caller must   * keep a reference to a file until after vhost_poll_stop is called. */ -void vhost_poll_start(struct vhost_poll *poll, struct file *file) +int vhost_poll_start(struct vhost_poll *poll, struct file *file)  {  	unsigned long mask; +	int ret = 0;  	mask = file->f_op->poll(file, &poll->table);  	if (mask)  		vhost_poll_wakeup(&poll->wait, 0, 0, (void *)mask); +	if (mask & POLLERR) { +		if (poll->wqh) +			remove_wait_queue(poll->wqh, &poll->wait); +		ret = -EINVAL; +	} + +	return ret;  }  /* Stop polling a file. After this function returns, it becomes safe to drop the   * file reference. You must also flush afterwards. */  void vhost_poll_stop(struct vhost_poll *poll)  { -	remove_wait_queue(poll->wqh, &poll->wait); +	if (poll->wqh) { +		remove_wait_queue(poll->wqh, &poll->wait); +		poll->wqh = NULL; +	}  }  static bool vhost_work_seq_done(struct vhost_dev *dev, struct vhost_work *work, @@ -792,7 +804,7 @@ long vhost_vring_ioctl(struct vhost_dev *d, int ioctl, void __user *argp)  		fput(filep);  	if (pollstart && vq->handle_kick) -		vhost_poll_start(&vq->poll, vq->kick); +		r = vhost_poll_start(&vq->poll, vq->kick);  	mutex_unlock(&vq->mutex); diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h index 2639c58b23a..17261e277c0 100644 --- a/drivers/vhost/vhost.h +++ b/drivers/vhost/vhost.h @@ -42,7 +42,7 @@ void vhost_work_queue(struct vhost_dev *dev, struct vhost_work *work);  void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn,  		     unsigned long mask, struct vhost_dev *dev); -void vhost_poll_start(struct vhost_poll *poll, struct file *file); +int vhost_poll_start(struct vhost_poll *poll, struct file *file);  void vhost_poll_stop(struct vhost_poll *poll);  void vhost_poll_flush(struct vhost_poll *poll);  void vhost_poll_queue(struct vhost_poll *poll); diff --git a/drivers/video/imxfb.c b/drivers/video/imxfb.c index 12526787a7c..0abf2bf2083 100644 --- a/drivers/video/imxfb.c +++ b/drivers/video/imxfb.c @@ -139,6 +139,7 @@ struct imxfb_info {  	struct clk		*clk_ahb;  	struct clk		*clk_per;  	enum imxfb_type		devtype; +	bool			enabled;  	/*  	 * These are the addresses we mapped @@ -536,6 +537,10 @@ static void imxfb_exit_backlight(struct imxfb_info *fbi)  static void imxfb_enable_controller(struct imxfb_info *fbi)  { + +	if (fbi->enabled) +		return; +  	pr_debug("Enabling LCD controller\n");  	writel(fbi->screen_dma, fbi->regs + LCDC_SSA); @@ -556,6 +561,7 @@ static void imxfb_enable_controller(struct imxfb_info *fbi)  	clk_prepare_enable(fbi->clk_ipg);  	clk_prepare_enable(fbi->clk_ahb);  	clk_prepare_enable(fbi->clk_per); +	fbi->enabled = true;  	if (fbi->backlight_power)  		fbi->backlight_power(1); @@ -565,6 +571,9 @@ static void imxfb_enable_controller(struct imxfb_info *fbi)  static void imxfb_disable_controller(struct imxfb_info *fbi)  { +	if (!fbi->enabled) +		return; +  	pr_debug("Disabling LCD controller\n");  	if (fbi->backlight_power) @@ -575,6 +584,7 @@ static void imxfb_disable_controller(struct imxfb_info *fbi)  	clk_disable_unprepare(fbi->clk_per);  	clk_disable_unprepare(fbi->clk_ipg);  	clk_disable_unprepare(fbi->clk_ahb); +	fbi->enabled = false;  	writel(0, fbi->regs + LCDC_RMCR);  } @@ -729,6 +739,8 @@ static int __init imxfb_init_fbinfo(struct platform_device *pdev)  	memset(fbi, 0, sizeof(struct imxfb_info)); +	fbi->devtype = pdev->id_entry->driver_data; +  	strlcpy(info->fix.id, IMX_NAME, sizeof(info->fix.id));  	info->fix.type			= FB_TYPE_PACKED_PIXELS; @@ -789,7 +801,6 @@ static int __init imxfb_probe(struct platform_device *pdev)  		return -ENOMEM;  	fbi = info->par; -	fbi->devtype = pdev->id_entry->driver_data;  	if (!fb_mode)  		fb_mode = pdata->mode[0].mode.name; diff --git a/drivers/video/omap2/dss/dss_features.c b/drivers/video/omap2/dss/dss_features.c index 18688c12e30..d7d66ef5cb5 100644 --- a/drivers/video/omap2/dss/dss_features.c +++ b/drivers/video/omap2/dss/dss_features.c @@ -538,6 +538,7 @@ static const enum dss_feat_id omap3630_dss_feat_list[] = {  	FEAT_ALPHA_FIXED_ZORDER,  	FEAT_FIFO_MERGE,  	FEAT_OMAP3_DSI_FIFO_BUG, +	FEAT_DPI_USES_VDDS_DSI,  };  static const enum dss_feat_id omap4430_es1_0_dss_feat_list[] = { diff --git a/drivers/xen/cpu_hotplug.c b/drivers/xen/cpu_hotplug.c index 4dcfced107f..084041d42c9 100644 --- a/drivers/xen/cpu_hotplug.c +++ b/drivers/xen/cpu_hotplug.c @@ -25,10 +25,10 @@ static void disable_hotplug_cpu(int cpu)  static int vcpu_online(unsigned int cpu)  {  	int err; -	char dir[32], state[32]; +	char dir[16], state[16];  	sprintf(dir, "cpu/%u", cpu); -	err = xenbus_scanf(XBT_NIL, dir, "availability", "%s", state); +	err = xenbus_scanf(XBT_NIL, dir, "availability", "%15s", state);  	if (err != 1) {  		if (!xen_initial_domain())  			printk(KERN_ERR "XENBUS: Unable to read cpu state\n"); diff --git a/drivers/xen/events.c b/drivers/xen/events.c index 0be4df39e95..74d77dfa5f6 100644 --- a/drivers/xen/events.c +++ b/drivers/xen/events.c @@ -840,7 +840,7 @@ int bind_evtchn_to_irq(unsigned int evtchn)  	if (irq == -1) {  		irq = xen_allocate_irq_dynamic(); -		if (irq == -1) +		if (irq < 0)  			goto out;  		irq_set_chip_and_handler_name(irq, &xen_dynamic_chip, @@ -944,7 +944,7 @@ int bind_virq_to_irq(unsigned int virq, unsigned int cpu)  	if (irq == -1) {  		irq = xen_allocate_irq_dynamic(); -		if (irq == -1) +		if (irq < 0)  			goto out;  		irq_set_chip_and_handler_name(irq, &xen_percpu_chip, diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c index 2e22df2f7a3..3c8803feba2 100644 --- a/drivers/xen/gntdev.c +++ b/drivers/xen/gntdev.c @@ -56,10 +56,15 @@ MODULE_PARM_DESC(limit, "Maximum number of grants that may be mapped by "  static atomic_t pages_mapped = ATOMIC_INIT(0);  static int use_ptemod; +#define populate_freeable_maps use_ptemod  struct gntdev_priv { +	/* maps with visible offsets in the file descriptor */  	struct list_head maps; -	/* lock protects maps from concurrent changes */ +	/* maps that are not visible; will be freed on munmap. +	 * Only populated if populate_freeable_maps == 1 */ +	struct list_head freeable_maps; +	/* lock protects maps and freeable_maps */  	spinlock_t lock;  	struct mm_struct *mm;  	struct mmu_notifier mn; @@ -193,7 +198,7 @@ static struct grant_map *gntdev_find_map_index(struct gntdev_priv *priv,  	return NULL;  } -static void gntdev_put_map(struct grant_map *map) +static void gntdev_put_map(struct gntdev_priv *priv, struct grant_map *map)  {  	if (!map)  		return; @@ -208,6 +213,12 @@ static void gntdev_put_map(struct grant_map *map)  		evtchn_put(map->notify.event);  	} +	if (populate_freeable_maps && priv) { +		spin_lock(&priv->lock); +		list_del(&map->next); +		spin_unlock(&priv->lock); +	} +  	if (map->pages && !use_ptemod)  		unmap_grant_pages(map, 0, map->count);  	gntdev_free_map(map); @@ -301,17 +312,10 @@ static int __unmap_grant_pages(struct grant_map *map, int offset, int pages)  	if (map->notify.flags & UNMAP_NOTIFY_CLEAR_BYTE) {  		int pgno = (map->notify.addr >> PAGE_SHIFT); -		if (pgno >= offset && pgno < offset + pages && use_ptemod) { -			void __user *tmp = (void __user *) -				map->vma->vm_start + map->notify.addr; -			err = copy_to_user(tmp, &err, 1); -			if (err) -				return -EFAULT; -			map->notify.flags &= ~UNMAP_NOTIFY_CLEAR_BYTE; -		} else if (pgno >= offset && pgno < offset + pages) { -			uint8_t *tmp = kmap(map->pages[pgno]); +		if (pgno >= offset && pgno < offset + pages) { +			/* No need for kmap, pages are in lowmem */ +			uint8_t *tmp = pfn_to_kaddr(page_to_pfn(map->pages[pgno]));  			tmp[map->notify.addr & (PAGE_SIZE-1)] = 0; -			kunmap(map->pages[pgno]);  			map->notify.flags &= ~UNMAP_NOTIFY_CLEAR_BYTE;  		}  	} @@ -376,11 +380,24 @@ static void gntdev_vma_open(struct vm_area_struct *vma)  static void gntdev_vma_close(struct vm_area_struct *vma)  {  	struct grant_map *map = vma->vm_private_data; +	struct file *file = vma->vm_file; +	struct gntdev_priv *priv = file->private_data;  	pr_debug("gntdev_vma_close %p\n", vma); -	map->vma = NULL; +	if (use_ptemod) { +		/* It is possible that an mmu notifier could be running +		 * concurrently, so take priv->lock to ensure that the vma won't +		 * vanishing during the unmap_grant_pages call, since we will +		 * spin here until that completes. Such a concurrent call will +		 * not do any unmapping, since that has been done prior to +		 * closing the vma, but it may still iterate the unmap_ops list. +		 */ +		spin_lock(&priv->lock); +		map->vma = NULL; +		spin_unlock(&priv->lock); +	}  	vma->vm_private_data = NULL; -	gntdev_put_map(map); +	gntdev_put_map(priv, map);  }  static struct vm_operations_struct gntdev_vmops = { @@ -390,33 +407,43 @@ static struct vm_operations_struct gntdev_vmops = {  /* ------------------------------------------------------------------ */ +static void unmap_if_in_range(struct grant_map *map, +			      unsigned long start, unsigned long end) +{ +	unsigned long mstart, mend; +	int err; + +	if (!map->vma) +		return; +	if (map->vma->vm_start >= end) +		return; +	if (map->vma->vm_end <= start) +		return; +	mstart = max(start, map->vma->vm_start); +	mend   = min(end,   map->vma->vm_end); +	pr_debug("map %d+%d (%lx %lx), range %lx %lx, mrange %lx %lx\n", +			map->index, map->count, +			map->vma->vm_start, map->vma->vm_end, +			start, end, mstart, mend); +	err = unmap_grant_pages(map, +				(mstart - map->vma->vm_start) >> PAGE_SHIFT, +				(mend - mstart) >> PAGE_SHIFT); +	WARN_ON(err); +} +  static void mn_invl_range_start(struct mmu_notifier *mn,  				struct mm_struct *mm,  				unsigned long start, unsigned long end)  {  	struct gntdev_priv *priv = container_of(mn, struct gntdev_priv, mn);  	struct grant_map *map; -	unsigned long mstart, mend; -	int err;  	spin_lock(&priv->lock);  	list_for_each_entry(map, &priv->maps, next) { -		if (!map->vma) -			continue; -		if (map->vma->vm_start >= end) -			continue; -		if (map->vma->vm_end <= start) -			continue; -		mstart = max(start, map->vma->vm_start); -		mend   = min(end,   map->vma->vm_end); -		pr_debug("map %d+%d (%lx %lx), range %lx %lx, mrange %lx %lx\n", -				map->index, map->count, -				map->vma->vm_start, map->vma->vm_end, -				start, end, mstart, mend); -		err = unmap_grant_pages(map, -					(mstart - map->vma->vm_start) >> PAGE_SHIFT, -					(mend - mstart) >> PAGE_SHIFT); -		WARN_ON(err); +		unmap_if_in_range(map, start, end); +	} +	list_for_each_entry(map, &priv->freeable_maps, next) { +		unmap_if_in_range(map, start, end);  	}  	spin_unlock(&priv->lock);  } @@ -445,6 +472,15 @@ static void mn_release(struct mmu_notifier *mn,  		err = unmap_grant_pages(map, /* offset */ 0, map->count);  		WARN_ON(err);  	} +	list_for_each_entry(map, &priv->freeable_maps, next) { +		if (!map->vma) +			continue; +		pr_debug("map %d+%d (%lx %lx)\n", +				map->index, map->count, +				map->vma->vm_start, map->vma->vm_end); +		err = unmap_grant_pages(map, /* offset */ 0, map->count); +		WARN_ON(err); +	}  	spin_unlock(&priv->lock);  } @@ -466,6 +502,7 @@ static int gntdev_open(struct inode *inode, struct file *flip)  		return -ENOMEM;  	INIT_LIST_HEAD(&priv->maps); +	INIT_LIST_HEAD(&priv->freeable_maps);  	spin_lock_init(&priv->lock);  	if (use_ptemod) { @@ -500,8 +537,9 @@ static int gntdev_release(struct inode *inode, struct file *flip)  	while (!list_empty(&priv->maps)) {  		map = list_entry(priv->maps.next, struct grant_map, next);  		list_del(&map->next); -		gntdev_put_map(map); +		gntdev_put_map(NULL /* already removed */, map);  	} +	WARN_ON(!list_empty(&priv->freeable_maps));  	if (use_ptemod)  		mmu_notifier_unregister(&priv->mn, priv->mm); @@ -529,14 +567,14 @@ static long gntdev_ioctl_map_grant_ref(struct gntdev_priv *priv,  	if (unlikely(atomic_add_return(op.count, &pages_mapped) > limit)) {  		pr_debug("can't map: over limit\n"); -		gntdev_put_map(map); +		gntdev_put_map(NULL, map);  		return err;  	}  	if (copy_from_user(map->grants, &u->refs,  			   sizeof(map->grants[0]) * op.count) != 0) { -		gntdev_put_map(map); -		return err; +		gntdev_put_map(NULL, map); +		return -EFAULT;  	}  	spin_lock(&priv->lock); @@ -565,11 +603,13 @@ static long gntdev_ioctl_unmap_grant_ref(struct gntdev_priv *priv,  	map = gntdev_find_map_index(priv, op.index >> PAGE_SHIFT, op.count);  	if (map) {  		list_del(&map->next); +		if (populate_freeable_maps) +			list_add_tail(&map->next, &priv->freeable_maps);  		err = 0;  	}  	spin_unlock(&priv->lock);  	if (map) -		gntdev_put_map(map); +		gntdev_put_map(priv, map);  	return err;  } @@ -579,25 +619,31 @@ static long gntdev_ioctl_get_offset_for_vaddr(struct gntdev_priv *priv,  	struct ioctl_gntdev_get_offset_for_vaddr op;  	struct vm_area_struct *vma;  	struct grant_map *map; +	int rv = -EINVAL;  	if (copy_from_user(&op, u, sizeof(op)) != 0)  		return -EFAULT;  	pr_debug("priv %p, offset for vaddr %lx\n", priv, (unsigned long)op.vaddr); +	down_read(¤t->mm->mmap_sem);  	vma = find_vma(current->mm, op.vaddr);  	if (!vma || vma->vm_ops != &gntdev_vmops) -		return -EINVAL; +		goto out_unlock;  	map = vma->vm_private_data;  	if (!map) -		return -EINVAL; +		goto out_unlock;  	op.offset = map->index << PAGE_SHIFT;  	op.count = map->count; +	rv = 0; -	if (copy_to_user(u, &op, sizeof(op)) != 0) + out_unlock: +	up_read(¤t->mm->mmap_sem); + +	if (rv == 0 && copy_to_user(u, &op, sizeof(op)) != 0)  		return -EFAULT; -	return 0; +	return rv;  }  static long gntdev_ioctl_notify(struct gntdev_priv *priv, void __user *u) @@ -778,7 +824,7 @@ out_unlock_put:  out_put_map:  	if (use_ptemod)  		map->vma = NULL; -	gntdev_put_map(map); +	gntdev_put_map(priv, map);  	return err;  } diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c index 7038de53652..157c0ccda3e 100644 --- a/drivers/xen/grant-table.c +++ b/drivers/xen/grant-table.c @@ -56,10 +56,6 @@  /* External tools reserve first few grant table entries. */  #define NR_RESERVED_ENTRIES 8  #define GNTTAB_LIST_END 0xffffffff -#define GREFS_PER_GRANT_FRAME \ -(grant_table_version == 1 ?                      \ -(PAGE_SIZE / sizeof(struct grant_entry_v1)) :   \ -(PAGE_SIZE / sizeof(union grant_entry_v2)))  static grant_ref_t **gnttab_list;  static unsigned int nr_grant_frames; @@ -154,6 +150,7 @@ static struct gnttab_ops *gnttab_interface;  static grant_status_t *grstatus;  static int grant_table_version; +static int grefs_per_grant_frame;  static struct gnttab_free_callback *gnttab_free_callback_list; @@ -767,12 +764,14 @@ static int grow_gnttab_list(unsigned int more_frames)  	unsigned int new_nr_grant_frames, extra_entries, i;  	unsigned int nr_glist_frames, new_nr_glist_frames; +	BUG_ON(grefs_per_grant_frame == 0); +  	new_nr_grant_frames = nr_grant_frames + more_frames; -	extra_entries       = more_frames * GREFS_PER_GRANT_FRAME; +	extra_entries       = more_frames * grefs_per_grant_frame; -	nr_glist_frames = (nr_grant_frames * GREFS_PER_GRANT_FRAME + RPP - 1) / RPP; +	nr_glist_frames = (nr_grant_frames * grefs_per_grant_frame + RPP - 1) / RPP;  	new_nr_glist_frames = -		(new_nr_grant_frames * GREFS_PER_GRANT_FRAME + RPP - 1) / RPP; +		(new_nr_grant_frames * grefs_per_grant_frame + RPP - 1) / RPP;  	for (i = nr_glist_frames; i < new_nr_glist_frames; i++) {  		gnttab_list[i] = (grant_ref_t *)__get_free_page(GFP_ATOMIC);  		if (!gnttab_list[i]) @@ -780,12 +779,12 @@ static int grow_gnttab_list(unsigned int more_frames)  	} -	for (i = GREFS_PER_GRANT_FRAME * nr_grant_frames; -	     i < GREFS_PER_GRANT_FRAME * new_nr_grant_frames - 1; i++) +	for (i = grefs_per_grant_frame * nr_grant_frames; +	     i < grefs_per_grant_frame * new_nr_grant_frames - 1; i++)  		gnttab_entry(i) = i + 1;  	gnttab_entry(i) = gnttab_free_head; -	gnttab_free_head = GREFS_PER_GRANT_FRAME * nr_grant_frames; +	gnttab_free_head = grefs_per_grant_frame * nr_grant_frames;  	gnttab_free_count += extra_entries;  	nr_grant_frames = new_nr_grant_frames; @@ -957,7 +956,8 @@ EXPORT_SYMBOL_GPL(gnttab_unmap_refs);  static unsigned nr_status_frames(unsigned nr_grant_frames)  { -	return (nr_grant_frames * GREFS_PER_GRANT_FRAME + SPP - 1) / SPP; +	BUG_ON(grefs_per_grant_frame == 0); +	return (nr_grant_frames * grefs_per_grant_frame + SPP - 1) / SPP;  }  static int gnttab_map_frames_v1(xen_pfn_t *frames, unsigned int nr_gframes) @@ -1115,6 +1115,7 @@ static void gnttab_request_version(void)  	rc = HYPERVISOR_grant_table_op(GNTTABOP_set_version, &gsv, 1);  	if (rc == 0 && gsv.version == 2) {  		grant_table_version = 2; +		grefs_per_grant_frame = PAGE_SIZE / sizeof(union grant_entry_v2);  		gnttab_interface = &gnttab_v2_ops;  	} else if (grant_table_version == 2) {  		/* @@ -1127,17 +1128,17 @@ static void gnttab_request_version(void)  		panic("we need grant tables version 2, but only version 1 is available");  	} else {  		grant_table_version = 1; +		grefs_per_grant_frame = PAGE_SIZE / sizeof(struct grant_entry_v1);  		gnttab_interface = &gnttab_v1_ops;  	}  	printk(KERN_INFO "Grant tables using version %d layout.\n",  		grant_table_version);  } -int gnttab_resume(void) +static int gnttab_setup(void)  {  	unsigned int max_nr_gframes; -	gnttab_request_version();  	max_nr_gframes = gnttab_max_grant_frames();  	if (max_nr_gframes < nr_grant_frames)  		return -ENOSYS; @@ -1160,6 +1161,12 @@ int gnttab_resume(void)  	return 0;  } +int gnttab_resume(void) +{ +	gnttab_request_version(); +	return gnttab_setup(); +} +  int gnttab_suspend(void)  {  	gnttab_interface->unmap_frames(); @@ -1171,9 +1178,10 @@ static int gnttab_expand(unsigned int req_entries)  	int rc;  	unsigned int cur, extra; +	BUG_ON(grefs_per_grant_frame == 0);  	cur = nr_grant_frames; -	extra = ((req_entries + (GREFS_PER_GRANT_FRAME-1)) / -		 GREFS_PER_GRANT_FRAME); +	extra = ((req_entries + (grefs_per_grant_frame-1)) / +		 grefs_per_grant_frame);  	if (cur + extra > gnttab_max_grant_frames())  		return -ENOSPC; @@ -1191,21 +1199,23 @@ int gnttab_init(void)  	unsigned int nr_init_grefs;  	int ret; +	gnttab_request_version();  	nr_grant_frames = 1;  	boot_max_nr_grant_frames = __max_nr_grant_frames();  	/* Determine the maximum number of frames required for the  	 * grant reference free list on the current hypervisor.  	 */ +	BUG_ON(grefs_per_grant_frame == 0);  	max_nr_glist_frames = (boot_max_nr_grant_frames * -			       GREFS_PER_GRANT_FRAME / RPP); +			       grefs_per_grant_frame / RPP);  	gnttab_list = kmalloc(max_nr_glist_frames * sizeof(grant_ref_t *),  			      GFP_KERNEL);  	if (gnttab_list == NULL)  		return -ENOMEM; -	nr_glist_frames = (nr_grant_frames * GREFS_PER_GRANT_FRAME + RPP - 1) / RPP; +	nr_glist_frames = (nr_grant_frames * grefs_per_grant_frame + RPP - 1) / RPP;  	for (i = 0; i < nr_glist_frames; i++) {  		gnttab_list[i] = (grant_ref_t *)__get_free_page(GFP_KERNEL);  		if (gnttab_list[i] == NULL) { @@ -1214,12 +1224,12 @@ int gnttab_init(void)  		}  	} -	if (gnttab_resume() < 0) { +	if (gnttab_setup() < 0) {  		ret = -ENODEV;  		goto ini_nomem;  	} -	nr_init_grefs = nr_grant_frames * GREFS_PER_GRANT_FRAME; +	nr_init_grefs = nr_grant_frames * grefs_per_grant_frame;  	for (i = NR_RESERVED_ENTRIES; i < nr_init_grefs - 1; i++)  		gnttab_entry(i) = i + 1; diff --git a/drivers/xen/pcpu.c b/drivers/xen/pcpu.c index 067fcfa1723..5a27a4599a4 100644 --- a/drivers/xen/pcpu.c +++ b/drivers/xen/pcpu.c @@ -278,8 +278,7 @@ static int sync_pcpu(uint32_t cpu, uint32_t *max_cpu)  	 * Only those at cpu present map has its sys interface.  	 */  	if (info->flags & XEN_PCPU_FLAGS_INVALID) { -		if (pcpu) -			unregister_and_remove_pcpu(pcpu); +		unregister_and_remove_pcpu(pcpu);  		return 0;  	} diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c index 0bbbccbb1f1..ca2b00e9d55 100644 --- a/drivers/xen/privcmd.c +++ b/drivers/xen/privcmd.c @@ -199,9 +199,6 @@ static long privcmd_ioctl_mmap(void __user *udata)  	LIST_HEAD(pagelist);  	struct mmap_mfn_state state; -	if (!xen_initial_domain()) -		return -EPERM; -  	/* We only support privcmd_ioctl_mmap_batch for auto translated. */  	if (xen_feature(XENFEAT_auto_translated_physmap))  		return -ENOSYS; @@ -261,11 +258,12 @@ struct mmap_batch_state {  	 *      -ENOENT if at least 1 -ENOENT has happened.  	 */  	int global_error; -	/* An array for individual errors */ -	int *err; +	int version;  	/* User-space mfn array to store errors in the second pass for V1. */  	xen_pfn_t __user *user_mfn; +	/* User-space int array to store errors in the second pass for V2. */ +	int __user *user_err;  };  /* auto translated dom0 note: if domU being created is PV, then mfn is @@ -288,7 +286,19 @@ static int mmap_batch_fn(void *data, void *state)  					 &cur_page);  	/* Store error code for second pass. */ -	*(st->err++) = ret; +	if (st->version == 1) { +		if (ret < 0) { +			/* +			 * V1 encodes the error codes in the 32bit top nibble of the +			 * mfn (with its known limitations vis-a-vis 64 bit callers). +			 */ +			*mfnp |= (ret == -ENOENT) ? +						PRIVCMD_MMAPBATCH_PAGED_ERROR : +						PRIVCMD_MMAPBATCH_MFN_ERROR; +		} +	} else { /* st->version == 2 */ +		*((int *) mfnp) = ret; +	}  	/* And see if it affects the global_error. */  	if (ret < 0) { @@ -305,20 +315,25 @@ static int mmap_batch_fn(void *data, void *state)  	return 0;  } -static int mmap_return_errors_v1(void *data, void *state) +static int mmap_return_errors(void *data, void *state)  { -	xen_pfn_t *mfnp = data;  	struct mmap_batch_state *st = state; -	int err = *(st->err++); -	/* -	 * V1 encodes the error codes in the 32bit top nibble of the -	 * mfn (with its known limitations vis-a-vis 64 bit callers). -	 */ -	*mfnp |= (err == -ENOENT) ? -				PRIVCMD_MMAPBATCH_PAGED_ERROR : -				PRIVCMD_MMAPBATCH_MFN_ERROR; -	return __put_user(*mfnp, st->user_mfn++); +	if (st->version == 1) { +		xen_pfn_t mfnp = *((xen_pfn_t *) data); +		if (mfnp & PRIVCMD_MMAPBATCH_MFN_ERROR) +			return __put_user(mfnp, st->user_mfn++); +		else +			st->user_mfn++; +	} else { /* st->version == 2 */ +		int err = *((int *) data); +		if (err) +			return __put_user(err, st->user_err++); +		else +			st->user_err++; +	} + +	return 0;  }  /* Allocate pfns that are then mapped with gmfns from foreign domid. Update @@ -357,12 +372,8 @@ static long privcmd_ioctl_mmap_batch(void __user *udata, int version)  	struct vm_area_struct *vma;  	unsigned long nr_pages;  	LIST_HEAD(pagelist); -	int *err_array = NULL;  	struct mmap_batch_state state; -	if (!xen_initial_domain()) -		return -EPERM; -  	switch (version) {  	case 1:  		if (copy_from_user(&m, udata, sizeof(struct privcmd_mmapbatch))) @@ -396,10 +407,12 @@ static long privcmd_ioctl_mmap_batch(void __user *udata, int version)  		goto out;  	} -	err_array = kcalloc(m.num, sizeof(int), GFP_KERNEL); -	if (err_array == NULL) { -		ret = -ENOMEM; -		goto out; +	if (version == 2) { +		/* Zero error array now to only copy back actual errors. */ +		if (clear_user(m.err, sizeof(int) * m.num)) { +			ret = -EFAULT; +			goto out; +		}  	}  	down_write(&mm->mmap_sem); @@ -427,7 +440,7 @@ static long privcmd_ioctl_mmap_batch(void __user *udata, int version)  	state.va            = m.addr;  	state.index         = 0;  	state.global_error  = 0; -	state.err           = err_array; +	state.version       = version;  	/* mmap_batch_fn guarantees ret == 0 */  	BUG_ON(traverse_pages(m.num, sizeof(xen_pfn_t), @@ -435,21 +448,14 @@ static long privcmd_ioctl_mmap_batch(void __user *udata, int version)  	up_write(&mm->mmap_sem); -	if (version == 1) { -		if (state.global_error) { -			/* Write back errors in second pass. */ -			state.user_mfn = (xen_pfn_t *)m.arr; -			state.err      = err_array; -			ret = traverse_pages(m.num, sizeof(xen_pfn_t), -					     &pagelist, mmap_return_errors_v1, &state); -		} else -			ret = 0; - -	} else if (version == 2) { -		ret = __copy_to_user(m.err, err_array, m.num * sizeof(int)); -		if (ret) -			ret = -EFAULT; -	} +	if (state.global_error) { +		/* Write back errors in second pass. */ +		state.user_mfn = (xen_pfn_t *)m.arr; +		state.user_err = m.err; +		ret = traverse_pages(m.num, sizeof(xen_pfn_t), +							 &pagelist, mmap_return_errors, &state); +	} else +		ret = 0;  	/* If we have not had any EFAULT-like global errors then set the global  	 * error to -ENOENT if necessary. */ @@ -457,7 +463,6 @@ static long privcmd_ioctl_mmap_batch(void __user *udata, int version)  		ret = -ENOENT;  out: -	kfree(err_array);  	free_page_list(&pagelist);  	return ret; diff --git a/drivers/xen/xen-pciback/pciback.h b/drivers/xen/xen-pciback/pciback.h index a7def010eba..f72af87640e 100644 --- a/drivers/xen/xen-pciback/pciback.h +++ b/drivers/xen/xen-pciback/pciback.h @@ -124,7 +124,7 @@ static inline int xen_pcibk_add_pci_dev(struct xen_pcibk_device *pdev,  static inline void xen_pcibk_release_pci_dev(struct xen_pcibk_device *pdev,  					     struct pci_dev *dev)  { -	if (xen_pcibk_backend && xen_pcibk_backend->free) +	if (xen_pcibk_backend && xen_pcibk_backend->release)  		return xen_pcibk_backend->release(pdev, dev);  } diff --git a/drivers/xen/xen-pciback/pciback_ops.c b/drivers/xen/xen-pciback/pciback_ops.c index 97f5d264c31..37c1f825f51 100644 --- a/drivers/xen/xen-pciback/pciback_ops.c +++ b/drivers/xen/xen-pciback/pciback_ops.c @@ -135,7 +135,6 @@ int xen_pcibk_enable_msi(struct xen_pcibk_device *pdev,  			 struct pci_dev *dev, struct xen_pci_op *op)  {  	struct xen_pcibk_dev_data *dev_data; -	int otherend = pdev->xdev->otherend_id;  	int status;  	if (unlikely(verbose_request)) @@ -144,8 +143,9 @@ int xen_pcibk_enable_msi(struct xen_pcibk_device *pdev,  	status = pci_enable_msi(dev);  	if (status) { -		printk(KERN_ERR "error enable msi for guest %x status %x\n", -			otherend, status); +		pr_warn_ratelimited(DRV_NAME ": %s: error enabling MSI for guest %u: err %d\n", +				    pci_name(dev), pdev->xdev->otherend_id, +				    status);  		op->value = 0;  		return XEN_PCI_ERR_op_failed;  	} @@ -223,10 +223,10 @@ int xen_pcibk_enable_msix(struct xen_pcibk_device *pdev,  						pci_name(dev), i,  						op->msix_entries[i].vector);  		} -	} else { -		printk(KERN_WARNING DRV_NAME ": %s: failed to enable MSI-X: err %d!\n", -			pci_name(dev), result); -	} +	} else +		pr_warn_ratelimited(DRV_NAME ": %s: error enabling MSI-X for guest %u: err %d!\n", +				    pci_name(dev), pdev->xdev->otherend_id, +				    result);  	kfree(entries);  	op->value = result; diff --git a/fs/Kconfig b/fs/Kconfig index cfe512fd1ca..780725a463b 100644 --- a/fs/Kconfig +++ b/fs/Kconfig @@ -68,16 +68,6 @@ source "fs/quota/Kconfig"  source "fs/autofs4/Kconfig"  source "fs/fuse/Kconfig" -config CUSE -	tristate "Character device in Userspace support" -	depends on FUSE_FS -	help -	  This FUSE extension allows character devices to be -	  implemented in userspace. - -	  If you want to develop or use userspace character device -	  based on CUSE, answer Y or M. -  config GENERIC_ACL  	bool  	select FS_POSIX_ACL diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 521e9d4424f..5a3327b8f90 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -3997,7 +3997,7 @@ again:  	 * We make the other tasks wait for the flush only when we can flush  	 * all things.  	 */ -	if (ret && flush == BTRFS_RESERVE_FLUSH_ALL) { +	if (ret && flush != BTRFS_RESERVE_NO_FLUSH) {  		flushing = true;  		space_info->flush = 1;  	} @@ -4534,7 +4534,7 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)  	unsigned nr_extents = 0;  	int extra_reserve = 0;  	enum btrfs_reserve_flush_enum flush = BTRFS_RESERVE_FLUSH_ALL; -	int ret; +	int ret = 0;  	bool delalloc_lock = true;  	/* If we are a free space inode we need to not flush since we will be in @@ -4579,20 +4579,18 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)  	csum_bytes = BTRFS_I(inode)->csum_bytes;  	spin_unlock(&BTRFS_I(inode)->lock); -	if (root->fs_info->quota_enabled) { +	if (root->fs_info->quota_enabled)  		ret = btrfs_qgroup_reserve(root, num_bytes +  					   nr_extents * root->leafsize); -		if (ret) { -			spin_lock(&BTRFS_I(inode)->lock); -			calc_csum_metadata_size(inode, num_bytes, 0); -			spin_unlock(&BTRFS_I(inode)->lock); -			if (delalloc_lock) -				mutex_unlock(&BTRFS_I(inode)->delalloc_mutex); -			return ret; -		} -	} -	ret = reserve_metadata_bytes(root, block_rsv, to_reserve, flush); +	/* +	 * ret != 0 here means the qgroup reservation failed, we go straight to +	 * the shared error handling then. +	 */ +	if (ret == 0) +		ret = reserve_metadata_bytes(root, block_rsv, +					     to_reserve, flush); +  	if (ret) {  		u64 to_free = 0;  		unsigned dropped; @@ -5560,7 +5558,7 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans,  	int empty_cluster = 2 * 1024 * 1024;  	struct btrfs_space_info *space_info;  	int loop = 0; -	int index = 0; +	int index = __get_raid_index(data);  	int alloc_type = (data & BTRFS_BLOCK_GROUP_DATA) ?  		RESERVE_ALLOC_NO_ACCOUNT : RESERVE_ALLOC;  	bool found_uncached_bg = false; @@ -6788,11 +6786,13 @@ static noinline int walk_up_proc(struct btrfs_trans_handle *trans,  						       &wc->flags[level]);  			if (ret < 0) {  				btrfs_tree_unlock_rw(eb, path->locks[level]); +				path->locks[level] = 0;  				return ret;  			}  			BUG_ON(wc->refs[level] == 0);  			if (wc->refs[level] == 1) {  				btrfs_tree_unlock_rw(eb, path->locks[level]); +				path->locks[level] = 0;  				return 1;  			}  		} diff --git a/fs/btrfs/extent_map.c b/fs/btrfs/extent_map.c index f169d6b11d7..fdb7a8db3b5 100644 --- a/fs/btrfs/extent_map.c +++ b/fs/btrfs/extent_map.c @@ -171,6 +171,10 @@ static int mergable_maps(struct extent_map *prev, struct extent_map *next)  	if (test_bit(EXTENT_FLAG_COMPRESSED, &prev->flags))  		return 0; +	if (test_bit(EXTENT_FLAG_LOGGING, &prev->flags) || +	    test_bit(EXTENT_FLAG_LOGGING, &next->flags)) +		return 0; +  	if (extent_map_end(prev) == next->start &&  	    prev->flags == next->flags &&  	    prev->bdev == next->bdev && @@ -255,7 +259,8 @@ int unpin_extent_cache(struct extent_map_tree *tree, u64 start, u64 len,  	if (!em)  		goto out; -	list_move(&em->list, &tree->modified_extents); +	if (!test_bit(EXTENT_FLAG_LOGGING, &em->flags)) +		list_move(&em->list, &tree->modified_extents);  	em->generation = gen;  	clear_bit(EXTENT_FLAG_PINNED, &em->flags);  	em->mod_start = em->start; @@ -280,6 +285,13 @@ out:  } +void clear_em_logging(struct extent_map_tree *tree, struct extent_map *em) +{ +	clear_bit(EXTENT_FLAG_LOGGING, &em->flags); +	if (em->in_tree) +		try_merge_map(tree, em); +} +  /**   * add_extent_mapping - add new extent map to the extent tree   * @tree:	tree to insert new map in diff --git a/fs/btrfs/extent_map.h b/fs/btrfs/extent_map.h index 922943ce29e..c6598c89cff 100644 --- a/fs/btrfs/extent_map.h +++ b/fs/btrfs/extent_map.h @@ -69,6 +69,7 @@ void free_extent_map(struct extent_map *em);  int __init extent_map_init(void);  void extent_map_exit(void);  int unpin_extent_cache(struct extent_map_tree *tree, u64 start, u64 len, u64 gen); +void clear_em_logging(struct extent_map_tree *tree, struct extent_map *em);  struct extent_map *search_extent_mapping(struct extent_map_tree *tree,  					 u64 start, u64 len);  #endif diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c index bd38cef4235..94aa53b3872 100644 --- a/fs/btrfs/file-item.c +++ b/fs/btrfs/file-item.c @@ -460,8 +460,8 @@ int btrfs_csum_one_bio(struct btrfs_root *root, struct inode *inode,  		if (!contig)  			offset = page_offset(bvec->bv_page) + bvec->bv_offset; -		if (!contig && (offset >= ordered->file_offset + ordered->len || -		    offset < ordered->file_offset)) { +		if (offset >= ordered->file_offset + ordered->len || +		    offset < ordered->file_offset) {  			unsigned long bytes_left;  			sums->len = this_sum_bytes;  			this_sum_bytes = 0; diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index 77061bf43ed..aeb84469d2c 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -293,15 +293,24 @@ static int __btrfs_run_defrag_inode(struct btrfs_fs_info *fs_info,  	struct btrfs_key key;  	struct btrfs_ioctl_defrag_range_args range;  	int num_defrag; +	int index; +	int ret;  	/* get the inode */  	key.objectid = defrag->root;  	btrfs_set_key_type(&key, BTRFS_ROOT_ITEM_KEY);  	key.offset = (u64)-1; + +	index = srcu_read_lock(&fs_info->subvol_srcu); +  	inode_root = btrfs_read_fs_root_no_name(fs_info, &key);  	if (IS_ERR(inode_root)) { -		kmem_cache_free(btrfs_inode_defrag_cachep, defrag); -		return PTR_ERR(inode_root); +		ret = PTR_ERR(inode_root); +		goto cleanup; +	} +	if (btrfs_root_refs(&inode_root->root_item) == 0) { +		ret = -ENOENT; +		goto cleanup;  	}  	key.objectid = defrag->ino; @@ -309,9 +318,10 @@ static int __btrfs_run_defrag_inode(struct btrfs_fs_info *fs_info,  	key.offset = 0;  	inode = btrfs_iget(fs_info->sb, &key, inode_root, NULL);  	if (IS_ERR(inode)) { -		kmem_cache_free(btrfs_inode_defrag_cachep, defrag); -		return PTR_ERR(inode); +		ret = PTR_ERR(inode); +		goto cleanup;  	} +	srcu_read_unlock(&fs_info->subvol_srcu, index);  	/* do a chunk of defrag */  	clear_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags); @@ -346,6 +356,10 @@ static int __btrfs_run_defrag_inode(struct btrfs_fs_info *fs_info,  	iput(inode);  	return 0; +cleanup: +	srcu_read_unlock(&fs_info->subvol_srcu, index); +	kmem_cache_free(btrfs_inode_defrag_cachep, defrag); +	return ret;  }  /* @@ -1594,9 +1608,10 @@ static ssize_t btrfs_file_aio_write(struct kiocb *iocb,  		if (err < 0 && num_written > 0)  			num_written = err;  	} -out: +  	if (sync)  		atomic_dec(&BTRFS_I(inode)->sync_writers); +out:  	sb_end_write(inode->i_sb);  	current->backing_dev_info = NULL;  	return num_written ? num_written : err; @@ -2241,6 +2256,7 @@ static int find_desired_extent(struct inode *inode, loff_t *offset, int whence)  	if (lockend <= lockstart)  		lockend = lockstart + root->sectorsize; +	lockend--;  	len = lockend - lockstart + 1;  	len = max_t(u64, len, root->sectorsize); @@ -2307,9 +2323,12 @@ static int find_desired_extent(struct inode *inode, loff_t *offset, int whence)  					}  				} -				*offset = start; -				free_extent_map(em); -				break; +				if (!test_bit(EXTENT_FLAG_PREALLOC, +					      &em->flags)) { +					*offset = start; +					free_extent_map(em); +					break; +				}  			}  		} diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index 59ea2e4349c..0be7a8742a4 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -1862,11 +1862,13 @@ int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group,  {  	struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;  	struct btrfs_free_space *info; -	int ret = 0; +	int ret; +	bool re_search = false;  	spin_lock(&ctl->tree_lock);  again: +	ret = 0;  	if (!bytes)  		goto out_lock; @@ -1879,17 +1881,17 @@ again:  		info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset),  					  1, 0);  		if (!info) { -			/* the tree logging code might be calling us before we -			 * have fully loaded the free space rbtree for this -			 * block group.  So it is possible the entry won't -			 * be in the rbtree yet at all.  The caching code -			 * will make sure not to put it in the rbtree if -			 * the logging code has pinned it. +			/* +			 * If we found a partial bit of our free space in a +			 * bitmap but then couldn't find the other part this may +			 * be a problem, so WARN about it.  			 */ +			WARN_ON(re_search);  			goto out_lock;  		}  	} +	re_search = false;  	if (!info->bitmap) {  		unlink_free_space(ctl, info);  		if (offset == info->offset) { @@ -1935,8 +1937,10 @@ again:  	}  	ret = remove_from_bitmap(ctl, info, &offset, &bytes); -	if (ret == -EAGAIN) +	if (ret == -EAGAIN) { +		re_search = true;  		goto again; +	}  	BUG_ON(ret); /* logic error */  out_lock:  	spin_unlock(&ctl->tree_lock); diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 16d9e8e191e..cc93b23ca35 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -88,7 +88,7 @@ static unsigned char btrfs_type_by_mode[S_IFMT >> S_SHIFT] = {  	[S_IFLNK >> S_SHIFT]	= BTRFS_FT_SYMLINK,  }; -static int btrfs_setsize(struct inode *inode, loff_t newsize); +static int btrfs_setsize(struct inode *inode, struct iattr *attr);  static int btrfs_truncate(struct inode *inode);  static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent);  static noinline int cow_file_range(struct inode *inode, @@ -2478,6 +2478,18 @@ int btrfs_orphan_cleanup(struct btrfs_root *root)  				continue;  			}  			nr_truncate++; + +			/* 1 for the orphan item deletion. */ +			trans = btrfs_start_transaction(root, 1); +			if (IS_ERR(trans)) { +				ret = PTR_ERR(trans); +				goto out; +			} +			ret = btrfs_orphan_add(trans, inode); +			btrfs_end_transaction(trans, root); +			if (ret) +				goto out; +  			ret = btrfs_truncate(inode);  		} else {  			nr_unlink++; @@ -3665,6 +3677,7 @@ int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size)  				block_end - cur_offset, 0);  		if (IS_ERR(em)) {  			err = PTR_ERR(em); +			em = NULL;  			break;  		}  		last_byte = min(extent_map_end(em), block_end); @@ -3748,16 +3761,27 @@ next:  	return err;  } -static int btrfs_setsize(struct inode *inode, loff_t newsize) +static int btrfs_setsize(struct inode *inode, struct iattr *attr)  {  	struct btrfs_root *root = BTRFS_I(inode)->root;  	struct btrfs_trans_handle *trans;  	loff_t oldsize = i_size_read(inode); +	loff_t newsize = attr->ia_size; +	int mask = attr->ia_valid;  	int ret;  	if (newsize == oldsize)  		return 0; +	/* +	 * The regular truncate() case without ATTR_CTIME and ATTR_MTIME is a +	 * special case where we need to update the times despite not having +	 * these flags set.  For all other operations the VFS set these flags +	 * explicitly if it wants a timestamp update. +	 */ +	if (newsize != oldsize && (!(mask & (ATTR_CTIME | ATTR_MTIME)))) +		inode->i_ctime = inode->i_mtime = current_fs_time(inode->i_sb); +  	if (newsize > oldsize) {  		truncate_pagecache(inode, oldsize, newsize);  		ret = btrfs_cont_expand(inode, oldsize, newsize); @@ -3783,9 +3807,34 @@ static int btrfs_setsize(struct inode *inode, loff_t newsize)  			set_bit(BTRFS_INODE_ORDERED_DATA_CLOSE,  				&BTRFS_I(inode)->runtime_flags); +		/* +		 * 1 for the orphan item we're going to add +		 * 1 for the orphan item deletion. +		 */ +		trans = btrfs_start_transaction(root, 2); +		if (IS_ERR(trans)) +			return PTR_ERR(trans); + +		/* +		 * We need to do this in case we fail at _any_ point during the +		 * actual truncate.  Once we do the truncate_setsize we could +		 * invalidate pages which forces any outstanding ordered io to +		 * be instantly completed which will give us extents that need +		 * to be truncated.  If we fail to get an orphan inode down we +		 * could have left over extents that were never meant to live, +		 * so we need to garuntee from this point on that everything +		 * will be consistent. +		 */ +		ret = btrfs_orphan_add(trans, inode); +		btrfs_end_transaction(trans, root); +		if (ret) +			return ret; +  		/* we don't support swapfiles, so vmtruncate shouldn't fail */  		truncate_setsize(inode, newsize);  		ret = btrfs_truncate(inode); +		if (ret && inode->i_nlink) +			btrfs_orphan_del(NULL, inode);  	}  	return ret; @@ -3805,7 +3854,7 @@ static int btrfs_setattr(struct dentry *dentry, struct iattr *attr)  		return err;  	if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) { -		err = btrfs_setsize(inode, attr->ia_size); +		err = btrfs_setsize(inode, attr);  		if (err)  			return err;  	} @@ -5572,10 +5621,13 @@ struct extent_map *btrfs_get_extent_fiemap(struct inode *inode, struct page *pag  		return em;  	if (em) {  		/* -		 * if our em maps to a hole, there might -		 * actually be delalloc bytes behind it +		 * if our em maps to +		 * -  a hole or +		 * -  a pre-alloc extent, +		 * there might actually be delalloc bytes behind it.  		 */ -		if (em->block_start != EXTENT_MAP_HOLE) +		if (em->block_start != EXTENT_MAP_HOLE && +		    !test_bit(EXTENT_FLAG_PREALLOC, &em->flags))  			return em;  		else  			hole_em = em; @@ -5657,6 +5709,8 @@ struct extent_map *btrfs_get_extent_fiemap(struct inode *inode, struct page *pag  			 */  			em->block_start = hole_em->block_start;  			em->block_len = hole_len; +			if (test_bit(EXTENT_FLAG_PREALLOC, &hole_em->flags)) +				set_bit(EXTENT_FLAG_PREALLOC, &em->flags);  		} else {  			em->start = range_start;  			em->len = found; @@ -6915,11 +6969,9 @@ static int btrfs_truncate(struct inode *inode)  	/*  	 * 1 for the truncate slack space -	 * 1 for the orphan item we're going to add -	 * 1 for the orphan item deletion  	 * 1 for updating the inode.  	 */ -	trans = btrfs_start_transaction(root, 4); +	trans = btrfs_start_transaction(root, 2);  	if (IS_ERR(trans)) {  		err = PTR_ERR(trans);  		goto out; @@ -6930,12 +6982,6 @@ static int btrfs_truncate(struct inode *inode)  				      min_size);  	BUG_ON(ret); -	ret = btrfs_orphan_add(trans, inode); -	if (ret) { -		btrfs_end_transaction(trans, root); -		goto out; -	} -  	/*  	 * setattr is responsible for setting the ordered_data_close flag,  	 * but that is only tested during the last file release.  That @@ -7004,12 +7050,6 @@ static int btrfs_truncate(struct inode *inode)  		ret = btrfs_orphan_del(trans, inode);  		if (ret)  			err = ret; -	} else if (ret && inode->i_nlink > 0) { -		/* -		 * Failed to do the truncate, remove us from the in memory -		 * orphan list. -		 */ -		ret = btrfs_orphan_del(NULL, inode);  	}  	if (trans) { @@ -7531,41 +7571,61 @@ void btrfs_wait_and_free_delalloc_work(struct btrfs_delalloc_work *work)   */  int btrfs_start_delalloc_inodes(struct btrfs_root *root, int delay_iput)  { -	struct list_head *head = &root->fs_info->delalloc_inodes;  	struct btrfs_inode *binode;  	struct inode *inode;  	struct btrfs_delalloc_work *work, *next;  	struct list_head works; +	struct list_head splice;  	int ret = 0;  	if (root->fs_info->sb->s_flags & MS_RDONLY)  		return -EROFS;  	INIT_LIST_HEAD(&works); - +	INIT_LIST_HEAD(&splice); +again:  	spin_lock(&root->fs_info->delalloc_lock); -	while (!list_empty(head)) { -		binode = list_entry(head->next, struct btrfs_inode, +	list_splice_init(&root->fs_info->delalloc_inodes, &splice); +	while (!list_empty(&splice)) { +		binode = list_entry(splice.next, struct btrfs_inode,  				    delalloc_inodes); + +		list_del_init(&binode->delalloc_inodes); +  		inode = igrab(&binode->vfs_inode);  		if (!inode) -			list_del_init(&binode->delalloc_inodes); +			continue; + +		list_add_tail(&binode->delalloc_inodes, +			      &root->fs_info->delalloc_inodes);  		spin_unlock(&root->fs_info->delalloc_lock); -		if (inode) { -			work = btrfs_alloc_delalloc_work(inode, 0, delay_iput); -			if (!work) { -				ret = -ENOMEM; -				goto out; -			} -			list_add_tail(&work->list, &works); -			btrfs_queue_worker(&root->fs_info->flush_workers, -					   &work->work); + +		work = btrfs_alloc_delalloc_work(inode, 0, delay_iput); +		if (unlikely(!work)) { +			ret = -ENOMEM; +			goto out;  		} +		list_add_tail(&work->list, &works); +		btrfs_queue_worker(&root->fs_info->flush_workers, +				   &work->work); +  		cond_resched();  		spin_lock(&root->fs_info->delalloc_lock);  	}  	spin_unlock(&root->fs_info->delalloc_lock); +	list_for_each_entry_safe(work, next, &works, list) { +		list_del_init(&work->list); +		btrfs_wait_and_free_delalloc_work(work); +	} + +	spin_lock(&root->fs_info->delalloc_lock); +	if (!list_empty(&root->fs_info->delalloc_inodes)) { +		spin_unlock(&root->fs_info->delalloc_lock); +		goto again; +	} +	spin_unlock(&root->fs_info->delalloc_lock); +  	/* the filemap_flush will queue IO into the worker threads, but  	 * we have to make sure the IO is actually started and that  	 * ordered extents get created before we return @@ -7578,11 +7638,18 @@ int btrfs_start_delalloc_inodes(struct btrfs_root *root, int delay_iput)  		    atomic_read(&root->fs_info->async_delalloc_pages) == 0));  	}  	atomic_dec(&root->fs_info->async_submit_draining); +	return 0;  out:  	list_for_each_entry_safe(work, next, &works, list) {  		list_del_init(&work->list);  		btrfs_wait_and_free_delalloc_work(work);  	} + +	if (!list_empty_careful(&splice)) { +		spin_lock(&root->fs_info->delalloc_lock); +		list_splice_tail(&splice, &root->fs_info->delalloc_inodes); +		spin_unlock(&root->fs_info->delalloc_lock); +	}  	return ret;  } diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index 4b4516770f0..338f2597bf7 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -515,7 +515,6 @@ static noinline int create_subvol(struct btrfs_root *root,  	BUG_ON(ret); -	d_instantiate(dentry, btrfs_lookup_dentry(dir, dentry));  fail:  	if (async_transid) {  		*async_transid = trans->transid; @@ -525,6 +524,10 @@ fail:  	}  	if (err && !ret)  		ret = err; + +	if (!ret) +		d_instantiate(dentry, btrfs_lookup_dentry(dir, dentry)); +  	return ret;  } @@ -1339,7 +1342,8 @@ static noinline int btrfs_ioctl_resize(struct file *file,  	if (atomic_xchg(&root->fs_info->mutually_exclusive_operation_running,  			1)) {  		pr_info("btrfs: dev add/delete/balance/replace/resize operation in progress\n"); -		return -EINPROGRESS; +		mnt_drop_write_file(file); +		return -EINVAL;  	}  	mutex_lock(&root->fs_info->volume_mutex); @@ -1362,6 +1366,7 @@ static noinline int btrfs_ioctl_resize(struct file *file,  		printk(KERN_INFO "btrfs: resizing devid %llu\n",  		       (unsigned long long)devid);  	} +  	device = btrfs_find_device(root->fs_info, devid, NULL, NULL);  	if (!device) {  		printk(KERN_INFO "btrfs: resizer unable to find device %llu\n", @@ -1369,9 +1374,10 @@ static noinline int btrfs_ioctl_resize(struct file *file,  		ret = -EINVAL;  		goto out_free;  	} -	if (device->fs_devices && device->fs_devices->seeding) { + +	if (!device->writeable) {  		printk(KERN_INFO "btrfs: resizer unable to apply on " -		       "seeding device %llu\n", +		       "readonly device %llu\n",  		       (unsigned long long)devid);  		ret = -EINVAL;  		goto out_free; @@ -1443,8 +1449,8 @@ out_free:  	kfree(vol_args);  out:  	mutex_unlock(&root->fs_info->volume_mutex); -	mnt_drop_write_file(file);  	atomic_set(&root->fs_info->mutually_exclusive_operation_running, 0); +	mnt_drop_write_file(file);  	return ret;  } @@ -2095,13 +2101,13 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file,  		err = inode_permission(inode, MAY_WRITE | MAY_EXEC);  		if (err)  			goto out_dput; - -		/* check if subvolume may be deleted by a non-root user */ -		err = btrfs_may_delete(dir, dentry, 1); -		if (err) -			goto out_dput;  	} +	/* check if subvolume may be deleted by a user */ +	err = btrfs_may_delete(dir, dentry, 1); +	if (err) +		goto out_dput; +  	if (btrfs_ino(inode) != BTRFS_FIRST_FREE_OBJECTID) {  		err = -EINVAL;  		goto out_dput; @@ -2183,19 +2189,20 @@ static int btrfs_ioctl_defrag(struct file *file, void __user *argp)  	struct btrfs_ioctl_defrag_range_args *range;  	int ret; -	if (btrfs_root_readonly(root)) -		return -EROFS; +	ret = mnt_want_write_file(file); +	if (ret) +		return ret;  	if (atomic_xchg(&root->fs_info->mutually_exclusive_operation_running,  			1)) {  		pr_info("btrfs: dev add/delete/balance/replace/resize operation in progress\n"); -		return -EINPROGRESS; +		mnt_drop_write_file(file); +		return -EINVAL;  	} -	ret = mnt_want_write_file(file); -	if (ret) { -		atomic_set(&root->fs_info->mutually_exclusive_operation_running, -			   0); -		return ret; + +	if (btrfs_root_readonly(root)) { +		ret = -EROFS; +		goto out;  	}  	switch (inode->i_mode & S_IFMT) { @@ -2247,8 +2254,8 @@ static int btrfs_ioctl_defrag(struct file *file, void __user *argp)  		ret = -EINVAL;  	}  out: -	mnt_drop_write_file(file);  	atomic_set(&root->fs_info->mutually_exclusive_operation_running, 0); +	mnt_drop_write_file(file);  	return ret;  } @@ -2263,7 +2270,7 @@ static long btrfs_ioctl_add_dev(struct btrfs_root *root, void __user *arg)  	if (atomic_xchg(&root->fs_info->mutually_exclusive_operation_running,  			1)) {  		pr_info("btrfs: dev add/delete/balance/replace/resize operation in progress\n"); -		return -EINPROGRESS; +		return -EINVAL;  	}  	mutex_lock(&root->fs_info->volume_mutex); @@ -2300,7 +2307,7 @@ static long btrfs_ioctl_rm_dev(struct file *file, void __user *arg)  			1)) {  		pr_info("btrfs: dev add/delete/balance/replace/resize operation in progress\n");  		mnt_drop_write_file(file); -		return -EINPROGRESS; +		return -EINVAL;  	}  	mutex_lock(&root->fs_info->volume_mutex); @@ -2316,8 +2323,8 @@ static long btrfs_ioctl_rm_dev(struct file *file, void __user *arg)  	kfree(vol_args);  out:  	mutex_unlock(&root->fs_info->volume_mutex); -	mnt_drop_write_file(file);  	atomic_set(&root->fs_info->mutually_exclusive_operation_running, 0); +	mnt_drop_write_file(file);  	return ret;  } @@ -3437,8 +3444,8 @@ static long btrfs_ioctl_balance(struct file *file, void __user *arg)  	struct btrfs_fs_info *fs_info = root->fs_info;  	struct btrfs_ioctl_balance_args *bargs;  	struct btrfs_balance_control *bctl; +	bool need_unlock; /* for mut. excl. ops lock */  	int ret; -	int need_to_clear_lock = 0;  	if (!capable(CAP_SYS_ADMIN))  		return -EPERM; @@ -3447,14 +3454,61 @@ static long btrfs_ioctl_balance(struct file *file, void __user *arg)  	if (ret)  		return ret; -	mutex_lock(&fs_info->volume_mutex); +again: +	if (!atomic_xchg(&fs_info->mutually_exclusive_operation_running, 1)) { +		mutex_lock(&fs_info->volume_mutex); +		mutex_lock(&fs_info->balance_mutex); +		need_unlock = true; +		goto locked; +	} + +	/* +	 * mut. excl. ops lock is locked.  Three possibilites: +	 *   (1) some other op is running +	 *   (2) balance is running +	 *   (3) balance is paused -- special case (think resume) +	 */  	mutex_lock(&fs_info->balance_mutex); +	if (fs_info->balance_ctl) { +		/* this is either (2) or (3) */ +		if (!atomic_read(&fs_info->balance_running)) { +			mutex_unlock(&fs_info->balance_mutex); +			if (!mutex_trylock(&fs_info->volume_mutex)) +				goto again; +			mutex_lock(&fs_info->balance_mutex); + +			if (fs_info->balance_ctl && +			    !atomic_read(&fs_info->balance_running)) { +				/* this is (3) */ +				need_unlock = false; +				goto locked; +			} + +			mutex_unlock(&fs_info->balance_mutex); +			mutex_unlock(&fs_info->volume_mutex); +			goto again; +		} else { +			/* this is (2) */ +			mutex_unlock(&fs_info->balance_mutex); +			ret = -EINPROGRESS; +			goto out; +		} +	} else { +		/* this is (1) */ +		mutex_unlock(&fs_info->balance_mutex); +		pr_info("btrfs: dev add/delete/balance/replace/resize operation in progress\n"); +		ret = -EINVAL; +		goto out; +	} + +locked: +	BUG_ON(!atomic_read(&fs_info->mutually_exclusive_operation_running));  	if (arg) {  		bargs = memdup_user(arg, sizeof(*bargs));  		if (IS_ERR(bargs)) {  			ret = PTR_ERR(bargs); -			goto out; +			goto out_unlock;  		}  		if (bargs->flags & BTRFS_BALANCE_RESUME) { @@ -3474,13 +3528,10 @@ static long btrfs_ioctl_balance(struct file *file, void __user *arg)  		bargs = NULL;  	} -	if (atomic_xchg(&root->fs_info->mutually_exclusive_operation_running, -			1)) { -		pr_info("btrfs: dev add/delete/balance/replace/resize operation in progress\n"); +	if (fs_info->balance_ctl) {  		ret = -EINPROGRESS;  		goto out_bargs;  	} -	need_to_clear_lock = 1;  	bctl = kzalloc(sizeof(*bctl), GFP_NOFS);  	if (!bctl) { @@ -3501,11 +3552,17 @@ static long btrfs_ioctl_balance(struct file *file, void __user *arg)  	}  do_balance: -	ret = btrfs_balance(bctl, bargs);  	/* -	 * bctl is freed in __cancel_balance or in free_fs_info if -	 * restriper was paused all the way until unmount +	 * Ownership of bctl and mutually_exclusive_operation_running +	 * goes to to btrfs_balance.  bctl is freed in __cancel_balance, +	 * or, if restriper was paused all the way until unmount, in +	 * free_fs_info.  mutually_exclusive_operation_running is +	 * cleared in __cancel_balance.  	 */ +	need_unlock = false; + +	ret = btrfs_balance(bctl, bargs); +  	if (arg) {  		if (copy_to_user(arg, bargs, sizeof(*bargs)))  			ret = -EFAULT; @@ -3513,12 +3570,12 @@ do_balance:  out_bargs:  	kfree(bargs); -out: -	if (need_to_clear_lock) -		atomic_set(&root->fs_info->mutually_exclusive_operation_running, -			   0); +out_unlock:  	mutex_unlock(&fs_info->balance_mutex);  	mutex_unlock(&fs_info->volume_mutex); +	if (need_unlock) +		atomic_set(&fs_info->mutually_exclusive_operation_running, 0); +out:  	mnt_drop_write_file(file);  	return ret;  } @@ -3698,6 +3755,11 @@ static long btrfs_ioctl_qgroup_create(struct file *file, void __user *arg)  		goto drop_write;  	} +	if (!sa->qgroupid) { +		ret = -EINVAL; +		goto out; +	} +  	trans = btrfs_join_transaction(root);  	if (IS_ERR(trans)) {  		ret = PTR_ERR(trans); diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c index f1073129704..e5ed5672960 100644 --- a/fs/btrfs/ordered-data.c +++ b/fs/btrfs/ordered-data.c @@ -836,9 +836,16 @@ int btrfs_ordered_update_i_size(struct inode *inode, u64 offset,  	 * if the disk i_size is already at the inode->i_size, or  	 * this ordered extent is inside the disk i_size, we're done  	 */ -	if (disk_i_size == i_size || offset <= disk_i_size) { +	if (disk_i_size == i_size) +		goto out; + +	/* +	 * We still need to update disk_i_size if outstanding_isize is greater +	 * than disk_i_size. +	 */ +	if (offset <= disk_i_size && +	    (!ordered || ordered->outstanding_isize <= disk_i_size))  		goto out; -	}  	/*  	 * walk backward from this ordered extent to disk_i_size. @@ -870,7 +877,7 @@ int btrfs_ordered_update_i_size(struct inode *inode, u64 offset,  			break;  		if (test->file_offset >= i_size)  			break; -		if (test->file_offset >= disk_i_size) { +		if (entry_end(test) > disk_i_size) {  			/*  			 * we don't update disk_i_size now, so record this  			 * undealt i_size. Or we will not know the real diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c index fe9d02c45f8..a5c85623432 100644 --- a/fs/btrfs/qgroup.c +++ b/fs/btrfs/qgroup.c @@ -379,6 +379,13 @@ next1:  		ret = add_relation_rb(fs_info, found_key.objectid,  				      found_key.offset); +		if (ret == -ENOENT) { +			printk(KERN_WARNING +				"btrfs: orphan qgroup relation 0x%llx->0x%llx\n", +				(unsigned long long)found_key.objectid, +				(unsigned long long)found_key.offset); +			ret = 0;	/* ignore the error */ +		}  		if (ret)  			goto out;  next2: @@ -956,17 +963,28 @@ int btrfs_remove_qgroup(struct btrfs_trans_handle *trans,  			struct btrfs_fs_info *fs_info, u64 qgroupid)  {  	struct btrfs_root *quota_root; +	struct btrfs_qgroup *qgroup;  	int ret = 0;  	quota_root = fs_info->quota_root;  	if (!quota_root)  		return -EINVAL; +	/* check if there are no relations to this qgroup */ +	spin_lock(&fs_info->qgroup_lock); +	qgroup = find_qgroup_rb(fs_info, qgroupid); +	if (qgroup) { +		if (!list_empty(&qgroup->groups) || !list_empty(&qgroup->members)) { +			spin_unlock(&fs_info->qgroup_lock); +			return -EBUSY; +		} +	} +	spin_unlock(&fs_info->qgroup_lock); +  	ret = del_qgroup_item(trans, quota_root, qgroupid);  	spin_lock(&fs_info->qgroup_lock);  	del_qgroup_rb(quota_root->fs_info, qgroupid); -  	spin_unlock(&fs_info->qgroup_lock);  	return ret; diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c index bdbb94f245c..67783e03d12 100644 --- a/fs/btrfs/scrub.c +++ b/fs/btrfs/scrub.c @@ -580,20 +580,29 @@ static int scrub_fixup_readpage(u64 inum, u64 offset, u64 root, void *fixup_ctx)  	int corrected = 0;  	struct btrfs_key key;  	struct inode *inode = NULL; +	struct btrfs_fs_info *fs_info;  	u64 end = offset + PAGE_SIZE - 1;  	struct btrfs_root *local_root; +	int srcu_index;  	key.objectid = root;  	key.type = BTRFS_ROOT_ITEM_KEY;  	key.offset = (u64)-1; -	local_root = btrfs_read_fs_root_no_name(fixup->root->fs_info, &key); -	if (IS_ERR(local_root)) + +	fs_info = fixup->root->fs_info; +	srcu_index = srcu_read_lock(&fs_info->subvol_srcu); + +	local_root = btrfs_read_fs_root_no_name(fs_info, &key); +	if (IS_ERR(local_root)) { +		srcu_read_unlock(&fs_info->subvol_srcu, srcu_index);  		return PTR_ERR(local_root); +	}  	key.type = BTRFS_INODE_ITEM_KEY;  	key.objectid = inum;  	key.offset = 0; -	inode = btrfs_iget(fixup->root->fs_info->sb, &key, local_root, NULL); +	inode = btrfs_iget(fs_info->sb, &key, local_root, NULL); +	srcu_read_unlock(&fs_info->subvol_srcu, srcu_index);  	if (IS_ERR(inode))  		return PTR_ERR(inode); @@ -606,7 +615,6 @@ static int scrub_fixup_readpage(u64 inum, u64 offset, u64 root, void *fixup_ctx)  	}  	if (PageUptodate(page)) { -		struct btrfs_fs_info *fs_info;  		if (PageDirty(page)) {  			/*  			 * we need to write the data to the defect sector. the @@ -3180,18 +3188,25 @@ static int copy_nocow_pages_for_inode(u64 inum, u64 offset, u64 root, void *ctx)  	u64 physical_for_dev_replace;  	u64 len;  	struct btrfs_fs_info *fs_info = nocow_ctx->sctx->dev_root->fs_info; +	int srcu_index;  	key.objectid = root;  	key.type = BTRFS_ROOT_ITEM_KEY;  	key.offset = (u64)-1; + +	srcu_index = srcu_read_lock(&fs_info->subvol_srcu); +  	local_root = btrfs_read_fs_root_no_name(fs_info, &key); -	if (IS_ERR(local_root)) +	if (IS_ERR(local_root)) { +		srcu_read_unlock(&fs_info->subvol_srcu, srcu_index);  		return PTR_ERR(local_root); +	}  	key.type = BTRFS_INODE_ITEM_KEY;  	key.objectid = inum;  	key.offset = 0;  	inode = btrfs_iget(fs_info->sb, &key, local_root, NULL); +	srcu_read_unlock(&fs_info->subvol_srcu, srcu_index);  	if (IS_ERR(inode))  		return PTR_ERR(inode); diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c index 54454542ad4..321b7fb4e44 100644 --- a/fs/btrfs/send.c +++ b/fs/btrfs/send.c @@ -1814,8 +1814,10 @@ static int name_cache_insert(struct send_ctx *sctx,  			(unsigned long)nce->ino);  	if (!nce_head) {  		nce_head = kmalloc(sizeof(*nce_head), GFP_NOFS); -		if (!nce_head) +		if (!nce_head) { +			kfree(nce);  			return -ENOMEM; +		}  		INIT_LIST_HEAD(nce_head);  		ret = radix_tree_insert(&sctx->name_cache, nce->ino, nce_head); diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index 99545df1b86..d8982e9601d 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c @@ -267,7 +267,7 @@ void __btrfs_abort_transaction(struct btrfs_trans_handle *trans,  			     function, line, errstr);  		return;  	} -	trans->transaction->aborted = errno; +	ACCESS_ONCE(trans->transaction->aborted) = errno;  	__btrfs_std_error(root->fs_info, function, line, errno, NULL);  }  /* diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index 87fac9a21ea..fc03aa60b68 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -333,12 +333,14 @@ start_transaction(struct btrfs_root *root, u64 num_items, int type,  					  &root->fs_info->trans_block_rsv,  					  num_bytes, flush);  		if (ret) -			return ERR_PTR(ret); +			goto reserve_fail;  	}  again:  	h = kmem_cache_alloc(btrfs_trans_handle_cachep, GFP_NOFS); -	if (!h) -		return ERR_PTR(-ENOMEM); +	if (!h) { +		ret = -ENOMEM; +		goto alloc_fail; +	}  	/*  	 * If we are JOIN_NOLOCK we're already committing a transaction and @@ -365,11 +367,7 @@ again:  	if (ret < 0) {  		/* We must get the transaction if we are JOIN_NOLOCK. */  		BUG_ON(type == TRANS_JOIN_NOLOCK); - -		if (type < TRANS_JOIN_NOLOCK) -			sb_end_intwrite(root->fs_info->sb); -		kmem_cache_free(btrfs_trans_handle_cachep, h); -		return ERR_PTR(ret); +		goto join_fail;  	}  	cur_trans = root->fs_info->running_transaction; @@ -410,6 +408,19 @@ got_it:  	if (!current->journal_info && type != TRANS_USERSPACE)  		current->journal_info = h;  	return h; + +join_fail: +	if (type < TRANS_JOIN_NOLOCK) +		sb_end_intwrite(root->fs_info->sb); +	kmem_cache_free(btrfs_trans_handle_cachep, h); +alloc_fail: +	if (num_bytes) +		btrfs_block_rsv_release(root, &root->fs_info->trans_block_rsv, +					num_bytes); +reserve_fail: +	if (qgroup_reserved) +		btrfs_qgroup_free(root, qgroup_reserved); +	return ERR_PTR(ret);  }  struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root, @@ -1468,7 +1479,8 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,  		goto cleanup_transaction;  	} -	if (cur_trans->aborted) { +	/* Stop the commit early if ->aborted is set */ +	if (unlikely(ACCESS_ONCE(cur_trans->aborted))) {  		ret = cur_trans->aborted;  		goto cleanup_transaction;  	} @@ -1574,6 +1586,11 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,  	wait_event(cur_trans->writer_wait,  		   atomic_read(&cur_trans->num_writers) == 1); +	/* ->aborted might be set after the previous check, so check it */ +	if (unlikely(ACCESS_ONCE(cur_trans->aborted))) { +		ret = cur_trans->aborted; +		goto cleanup_transaction; +	}  	/*  	 * the reloc mutex makes sure that we stop  	 * the balancing code from coming in and moving @@ -1657,6 +1674,17 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,  		goto cleanup_transaction;  	} +	/* +	 * The tasks which save the space cache and inode cache may also +	 * update ->aborted, check it. +	 */ +	if (unlikely(ACCESS_ONCE(cur_trans->aborted))) { +		ret = cur_trans->aborted; +		mutex_unlock(&root->fs_info->tree_log_mutex); +		mutex_unlock(&root->fs_info->reloc_mutex); +		goto cleanup_transaction; +	} +  	btrfs_prepare_extent_commit(trans, root);  	cur_trans = root->fs_info->running_transaction; diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index 83186c7e45d..9027bb1e746 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -3357,6 +3357,11 @@ static int log_one_extent(struct btrfs_trans_handle *trans,  	if (skip_csum)  		return 0; +	if (em->compress_type) { +		csum_offset = 0; +		csum_len = block_len; +	} +  	/* block start is already adjusted for the file extent offset. */  	ret = btrfs_lookup_csums_range(log->fs_info->csum_root,  				       em->block_start + csum_offset, @@ -3410,13 +3415,13 @@ static int btrfs_log_changed_extents(struct btrfs_trans_handle *trans,  		em = list_entry(extents.next, struct extent_map, list);  		list_del_init(&em->list); -		clear_bit(EXTENT_FLAG_LOGGING, &em->flags);  		/*  		 * If we had an error we just need to delete everybody from our  		 * private list.  		 */  		if (ret) { +			clear_em_logging(tree, em);  			free_extent_map(em);  			continue;  		} @@ -3424,8 +3429,9 @@ static int btrfs_log_changed_extents(struct btrfs_trans_handle *trans,  		write_unlock(&tree->lock);  		ret = log_one_extent(trans, inode, root, em, path); -		free_extent_map(em);  		write_lock(&tree->lock); +		clear_em_logging(tree, em); +		free_extent_map(em);  	}  	WARN_ON(!list_empty(&extents));  	write_unlock(&tree->lock); diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 5cce6aa7401..5cbb7f4b167 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -1431,7 +1431,7 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path)  		}  	} else {  		ret = btrfs_get_bdev_and_sb(device_path, -					    FMODE_READ | FMODE_EXCL, +					    FMODE_WRITE | FMODE_EXCL,  					    root->fs_info->bdev_holder, 0,  					    &bdev, &bh);  		if (ret) @@ -1556,7 +1556,8 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path)  	ret = 0;  	/* Notify udev that device has changed */ -	btrfs_kobject_uevent(bdev, KOBJ_CHANGE); +	if (bdev) +		btrfs_kobject_uevent(bdev, KOBJ_CHANGE);  error_brelse:  	brelse(bh); @@ -2614,7 +2615,14 @@ static int chunk_usage_filter(struct btrfs_fs_info *fs_info, u64 chunk_offset,  	cache = btrfs_lookup_block_group(fs_info, chunk_offset);  	chunk_used = btrfs_block_group_used(&cache->item); -	user_thresh = div_factor_fine(cache->key.offset, bargs->usage); +	if (bargs->usage == 0) +		user_thresh = 0; +	else if (bargs->usage > 100) +		user_thresh = cache->key.offset; +	else +		user_thresh = div_factor_fine(cache->key.offset, +					      bargs->usage); +  	if (chunk_used < user_thresh)  		ret = 0; @@ -2959,6 +2967,8 @@ static void __cancel_balance(struct btrfs_fs_info *fs_info)  	unset_balance_control(fs_info);  	ret = del_balance_item(fs_info->tree_root);  	BUG_ON(ret); + +	atomic_set(&fs_info->mutually_exclusive_operation_running, 0);  }  void update_ioctl_balance_args(struct btrfs_fs_info *fs_info, int lock, @@ -3138,8 +3148,10 @@ int btrfs_balance(struct btrfs_balance_control *bctl,  out:  	if (bctl->flags & BTRFS_BALANCE_RESUME)  		__cancel_balance(fs_info); -	else +	else {  		kfree(bctl); +		atomic_set(&fs_info->mutually_exclusive_operation_running, 0); +	}  	return ret;  } @@ -3156,7 +3168,6 @@ static int balance_kthread(void *data)  		ret = btrfs_balance(fs_info->balance_ctl, NULL);  	} -	atomic_set(&fs_info->mutually_exclusive_operation_running, 0);  	mutex_unlock(&fs_info->balance_mutex);  	mutex_unlock(&fs_info->volume_mutex); @@ -3179,7 +3190,6 @@ int btrfs_resume_balance_async(struct btrfs_fs_info *fs_info)  		return 0;  	} -	WARN_ON(atomic_xchg(&fs_info->mutually_exclusive_operation_running, 1));  	tsk = kthread_run(balance_kthread, fs_info, "btrfs-balance");  	if (IS_ERR(tsk))  		return PTR_ERR(tsk); @@ -3233,6 +3243,8 @@ int btrfs_recover_balance(struct btrfs_fs_info *fs_info)  	btrfs_balance_sys(leaf, item, &disk_bargs);  	btrfs_disk_balance_args_to_cpu(&bctl->sys, &disk_bargs); +	WARN_ON(atomic_xchg(&fs_info->mutually_exclusive_operation_running, 1)); +  	mutex_lock(&fs_info->volume_mutex);  	mutex_lock(&fs_info->balance_mutex); @@ -3496,7 +3508,7 @@ struct btrfs_raid_attr btrfs_raid_array[BTRFS_NR_RAID_TYPES] = {  	{ 1, 1, 2, 2, 2, 2 /* raid1 */ },  	{ 1, 2, 1, 1, 1, 2 /* dup */ },  	{ 1, 1, 0, 2, 1, 1 /* raid0 */ }, -	{ 1, 1, 0, 1, 1, 1 /* single */ }, +	{ 1, 1, 1, 1, 1, 1 /* single */ },  };  static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans, diff --git a/fs/cifs/cifs_dfs_ref.c b/fs/cifs/cifs_dfs_ref.c index ce5cbd717bf..210fce2df30 100644 --- a/fs/cifs/cifs_dfs_ref.c +++ b/fs/cifs/cifs_dfs_ref.c @@ -226,6 +226,8 @@ compose_mount_options_out:  compose_mount_options_err:  	kfree(mountdata);  	mountdata = ERR_PTR(rc); +	kfree(*devname); +	*devname = NULL;  	goto compose_mount_options_out;  } diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index 17c3643e595..12b3da39733 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c @@ -1917,7 +1917,7 @@ srcip_matches(struct sockaddr *srcaddr, struct sockaddr *rhs)  	}  	case AF_INET6: {  		struct sockaddr_in6 *saddr6 = (struct sockaddr_in6 *)srcaddr; -		struct sockaddr_in6 *vaddr6 = (struct sockaddr_in6 *)&rhs; +		struct sockaddr_in6 *vaddr6 = (struct sockaddr_in6 *)rhs;  		return ipv6_addr_equal(&saddr6->sin6_addr, &vaddr6->sin6_addr);  	}  	default: diff --git a/fs/dlm/user.c b/fs/dlm/user.c index 7ff49852b0c..911649a47dd 100644 --- a/fs/dlm/user.c +++ b/fs/dlm/user.c @@ -503,11 +503,11 @@ static ssize_t device_write(struct file *file, const char __user *buf,  #endif  		return -EINVAL; -#ifdef CONFIG_COMPAT -	if (count > sizeof(struct dlm_write_request32) + DLM_RESNAME_MAXLEN) -#else +	/* +	 * can't compare against COMPAT/dlm_write_request32 because +	 * we don't yet know if is64bit is zero +	 */  	if (count > sizeof(struct dlm_write_request) + DLM_RESNAME_MAXLEN) -#endif  		return -EINVAL;  	kbuf = kzalloc(count + 1, GFP_NOFS); diff --git a/fs/f2fs/acl.c b/fs/f2fs/acl.c index e95b94945d5..137af4255da 100644 --- a/fs/f2fs/acl.c +++ b/fs/f2fs/acl.c @@ -191,15 +191,14 @@ struct posix_acl *f2fs_get_acl(struct inode *inode, int type)  		retval = f2fs_getxattr(inode, name_index, "", value, retval);  	} -	if (retval < 0) { -		if (retval == -ENODATA) -			acl = NULL; -		else -			acl = ERR_PTR(retval); -	} else { +	if (retval > 0)  		acl = f2fs_acl_from_disk(value, retval); -	} +	else if (retval == -ENODATA) +		acl = NULL; +	else +		acl = ERR_PTR(retval);  	kfree(value); +  	if (!IS_ERR(acl))  		set_cached_acl(inode, type, acl); diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c index 6ef36c37e2b..ff3c8439af8 100644 --- a/fs/f2fs/checkpoint.c +++ b/fs/f2fs/checkpoint.c @@ -214,7 +214,6 @@ retry:  		goto retry;  	}  	new->ino = ino; -	INIT_LIST_HEAD(&new->list);  	/* add new_oentry into list which is sorted by inode number */  	if (orphan) { @@ -772,7 +771,7 @@ void init_orphan_info(struct f2fs_sb_info *sbi)  	sbi->n_orphans = 0;  } -int create_checkpoint_caches(void) +int __init create_checkpoint_caches(void)  {  	orphan_entry_slab = f2fs_kmem_cache_create("f2fs_orphan_entry",  			sizeof(struct orphan_inode_entry), NULL); diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c index 3aa5ce7cab8..7bd22a20112 100644 --- a/fs/f2fs/data.c +++ b/fs/f2fs/data.c @@ -547,6 +547,15 @@ redirty_out:  #define MAX_DESIRED_PAGES_WP	4096 +static int __f2fs_writepage(struct page *page, struct writeback_control *wbc, +			void *data) +{ +	struct address_space *mapping = data; +	int ret = mapping->a_ops->writepage(page, wbc); +	mapping_set_error(mapping, ret); +	return ret; +} +  static int f2fs_write_data_pages(struct address_space *mapping,  			    struct writeback_control *wbc)  { @@ -563,7 +572,7 @@ static int f2fs_write_data_pages(struct address_space *mapping,  	if (!S_ISDIR(inode->i_mode))  		mutex_lock(&sbi->writepages); -	ret = generic_writepages(mapping, wbc); +	ret = write_cache_pages(mapping, wbc, __f2fs_writepage, mapping);  	if (!S_ISDIR(inode->i_mode))  		mutex_unlock(&sbi->writepages);  	f2fs_submit_bio(sbi, DATA, (wbc->sync_mode == WB_SYNC_ALL)); @@ -689,6 +698,11 @@ static int f2fs_set_data_page_dirty(struct page *page)  	return 0;  } +static sector_t f2fs_bmap(struct address_space *mapping, sector_t block) +{ +	return generic_block_bmap(mapping, block, get_data_block_ro); +} +  const struct address_space_operations f2fs_dblock_aops = {  	.readpage	= f2fs_read_data_page,  	.readpages	= f2fs_read_data_pages, @@ -700,4 +714,5 @@ const struct address_space_operations f2fs_dblock_aops = {  	.invalidatepage	= f2fs_invalidate_data_page,  	.releasepage	= f2fs_release_data_page,  	.direct_IO	= f2fs_direct_IO, +	.bmap		= f2fs_bmap,  }; diff --git a/fs/f2fs/debug.c b/fs/f2fs/debug.c index 0e0380a588a..c8c37307b32 100644 --- a/fs/f2fs/debug.c +++ b/fs/f2fs/debug.c @@ -26,6 +26,7 @@  static LIST_HEAD(f2fs_stat_list);  static struct dentry *debugfs_root; +static DEFINE_MUTEX(f2fs_stat_mutex);  static void update_general_status(struct f2fs_sb_info *sbi)  { @@ -180,18 +181,14 @@ static int stat_show(struct seq_file *s, void *v)  	int i = 0;  	int j; +	mutex_lock(&f2fs_stat_mutex);  	list_for_each_entry_safe(si, next, &f2fs_stat_list, stat_list) { -		mutex_lock(&si->stat_lock); -		if (!si->sbi) { -			mutex_unlock(&si->stat_lock); -			continue; -		}  		update_general_status(si->sbi);  		seq_printf(s, "\n=====[ partition info. #%d ]=====\n", i++); -		seq_printf(s, "[SB: 1] [CP: 2] [NAT: %d] [SIT: %d] ", -			   si->nat_area_segs, si->sit_area_segs); +		seq_printf(s, "[SB: 1] [CP: 2] [SIT: %d] [NAT: %d] ", +			   si->sit_area_segs, si->nat_area_segs);  		seq_printf(s, "[SSA: %d] [MAIN: %d",  			   si->ssa_area_segs, si->main_area_segs);  		seq_printf(s, "(OverProv:%d Resv:%d)]\n\n", @@ -286,8 +283,8 @@ static int stat_show(struct seq_file *s, void *v)  		seq_printf(s, "\nMemory: %u KB = static: %u + cached: %u\n",  				(si->base_mem + si->cache_mem) >> 10,  				si->base_mem >> 10, si->cache_mem >> 10); -		mutex_unlock(&si->stat_lock);  	} +	mutex_unlock(&f2fs_stat_mutex);  	return 0;  } @@ -303,7 +300,7 @@ static const struct file_operations stat_fops = {  	.release = single_release,  }; -static int init_stats(struct f2fs_sb_info *sbi) +int f2fs_build_stats(struct f2fs_sb_info *sbi)  {  	struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);  	struct f2fs_stat_info *si; @@ -313,9 +310,6 @@ static int init_stats(struct f2fs_sb_info *sbi)  		return -ENOMEM;  	si = sbi->stat_info; -	mutex_init(&si->stat_lock); -	list_add_tail(&si->stat_list, &f2fs_stat_list); -  	si->all_area_segs = le32_to_cpu(raw_super->segment_count);  	si->sit_area_segs = le32_to_cpu(raw_super->segment_count_sit);  	si->nat_area_segs = le32_to_cpu(raw_super->segment_count_nat); @@ -325,21 +319,11 @@ static int init_stats(struct f2fs_sb_info *sbi)  	si->main_area_zones = si->main_area_sections /  				le32_to_cpu(raw_super->secs_per_zone);  	si->sbi = sbi; -	return 0; -} -int f2fs_build_stats(struct f2fs_sb_info *sbi) -{ -	int retval; - -	retval = init_stats(sbi); -	if (retval) -		return retval; - -	if (!debugfs_root) -		debugfs_root = debugfs_create_dir("f2fs", NULL); +	mutex_lock(&f2fs_stat_mutex); +	list_add_tail(&si->stat_list, &f2fs_stat_list); +	mutex_unlock(&f2fs_stat_mutex); -	debugfs_create_file("status", S_IRUGO, debugfs_root, NULL, &stat_fops);  	return 0;  } @@ -347,14 +331,22 @@ void f2fs_destroy_stats(struct f2fs_sb_info *sbi)  {  	struct f2fs_stat_info *si = sbi->stat_info; +	mutex_lock(&f2fs_stat_mutex);  	list_del(&si->stat_list); -	mutex_lock(&si->stat_lock); -	si->sbi = NULL; -	mutex_unlock(&si->stat_lock); +	mutex_unlock(&f2fs_stat_mutex); +  	kfree(sbi->stat_info);  } -void destroy_root_stats(void) +void __init f2fs_create_root_stats(void) +{ +	debugfs_root = debugfs_create_dir("f2fs", NULL); +	if (debugfs_root) +		debugfs_create_file("status", S_IRUGO, debugfs_root, +					 NULL, &stat_fops); +} + +void f2fs_destroy_root_stats(void)  {  	debugfs_remove_recursive(debugfs_root);  	debugfs_root = NULL; diff --git a/fs/f2fs/dir.c b/fs/f2fs/dir.c index 951ed52748f..989980e16d0 100644 --- a/fs/f2fs/dir.c +++ b/fs/f2fs/dir.c @@ -503,7 +503,7 @@ void f2fs_delete_entry(struct f2fs_dir_entry *dentry, struct page *page,  	}  	if (inode) { -		inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME; +		inode->i_ctime = CURRENT_TIME;  		drop_nlink(inode);  		if (S_ISDIR(inode->i_mode)) {  			drop_nlink(inode); diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h index 13c6dfbb718..c8e2d751ef9 100644 --- a/fs/f2fs/f2fs.h +++ b/fs/f2fs/f2fs.h @@ -211,11 +211,11 @@ struct dnode_of_data {  static inline void set_new_dnode(struct dnode_of_data *dn, struct inode *inode,  		struct page *ipage, struct page *npage, nid_t nid)  { +	memset(dn, 0, sizeof(*dn));  	dn->inode = inode;  	dn->inode_page = ipage;  	dn->node_page = npage;  	dn->nid = nid; -	dn->inode_page_locked = 0;  }  /* @@ -877,6 +877,8 @@ bool f2fs_empty_dir(struct inode *);   * super.c   */  int f2fs_sync_fs(struct super_block *, int); +extern __printf(3, 4) +void f2fs_msg(struct super_block *, const char *, const char *, ...);  /*   * hash.c @@ -912,7 +914,7 @@ int restore_node_summary(struct f2fs_sb_info *, unsigned int,  void flush_nat_entries(struct f2fs_sb_info *);  int build_node_manager(struct f2fs_sb_info *);  void destroy_node_manager(struct f2fs_sb_info *); -int create_node_manager_caches(void); +int __init create_node_manager_caches(void);  void destroy_node_manager_caches(void);  /* @@ -964,7 +966,7 @@ void sync_dirty_dir_inodes(struct f2fs_sb_info *);  void block_operations(struct f2fs_sb_info *);  void write_checkpoint(struct f2fs_sb_info *, bool, bool);  void init_orphan_info(struct f2fs_sb_info *); -int create_checkpoint_caches(void); +int __init create_checkpoint_caches(void);  void destroy_checkpoint_caches(void);  /* @@ -984,9 +986,9 @@ int do_write_data_page(struct page *);  int start_gc_thread(struct f2fs_sb_info *);  void stop_gc_thread(struct f2fs_sb_info *);  block_t start_bidx_of_node(unsigned int); -int f2fs_gc(struct f2fs_sb_info *, int); +int f2fs_gc(struct f2fs_sb_info *);  void build_gc_manager(struct f2fs_sb_info *); -int create_gc_caches(void); +int __init create_gc_caches(void);  void destroy_gc_caches(void);  /* @@ -1058,7 +1060,8 @@ struct f2fs_stat_info {  int f2fs_build_stats(struct f2fs_sb_info *);  void f2fs_destroy_stats(struct f2fs_sb_info *); -void destroy_root_stats(void); +void __init f2fs_create_root_stats(void); +void f2fs_destroy_root_stats(void);  #else  #define stat_inc_call_count(si)  #define stat_inc_seg_count(si, type) @@ -1068,7 +1071,8 @@ void destroy_root_stats(void);  static inline int f2fs_build_stats(struct f2fs_sb_info *sbi) { return 0; }  static inline void f2fs_destroy_stats(struct f2fs_sb_info *sbi) { } -static inline void destroy_root_stats(void) { } +static inline void __init f2fs_create_root_stats(void) { } +static inline void f2fs_destroy_root_stats(void) { }  #endif  extern const struct file_operations f2fs_dir_operations; diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c index 7f9ea9271eb..3191b52aafb 100644 --- a/fs/f2fs/file.c +++ b/fs/f2fs/file.c @@ -96,8 +96,9 @@ out:  }  static const struct vm_operations_struct f2fs_file_vm_ops = { -	.fault        = filemap_fault, -	.page_mkwrite = f2fs_vm_page_mkwrite, +	.fault		= filemap_fault, +	.page_mkwrite	= f2fs_vm_page_mkwrite, +	.remap_pages	= generic_file_remap_pages,  };  static int need_to_sync_dir(struct f2fs_sb_info *sbi, struct inode *inode) @@ -137,6 +138,9 @@ int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)  	if (ret)  		return ret; +	/* guarantee free sections for fsync */ +	f2fs_balance_fs(sbi); +  	mutex_lock(&inode->i_mutex);  	if (datasync && !(inode->i_state & I_DIRTY_DATASYNC)) @@ -407,6 +411,8 @@ int truncate_hole(struct inode *inode, pgoff_t pg_start, pgoff_t pg_end)  		struct dnode_of_data dn;  		struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); +		f2fs_balance_fs(sbi); +  		mutex_lock_op(sbi, DATA_TRUNC);  		set_new_dnode(&dn, inode, NULL, NULL, 0);  		err = get_dnode_of_data(&dn, index, RDONLY_NODE); @@ -534,7 +540,6 @@ static long f2fs_fallocate(struct file *file, int mode,  				loff_t offset, loff_t len)  {  	struct inode *inode = file->f_path.dentry->d_inode; -	struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);  	long ret;  	if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE)) @@ -545,7 +550,10 @@ static long f2fs_fallocate(struct file *file, int mode,  	else  		ret = expand_inode_data(inode, offset, len, mode); -	f2fs_balance_fs(sbi); +	if (!ret) { +		inode->i_mtime = inode->i_ctime = CURRENT_TIME; +		mark_inode_dirty(inode); +	}  	return ret;  } diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c index b0ec721e984..c386910dacc 100644 --- a/fs/f2fs/gc.c +++ b/fs/f2fs/gc.c @@ -78,7 +78,7 @@ static int gc_thread_func(void *data)  		sbi->bg_gc++; -		if (f2fs_gc(sbi, 1) == GC_NONE) +		if (f2fs_gc(sbi) == GC_NONE)  			wait_ms = GC_THREAD_NOGC_SLEEP_TIME;  		else if (wait_ms == GC_THREAD_NOGC_SLEEP_TIME)  			wait_ms = GC_THREAD_MAX_SLEEP_TIME; @@ -424,7 +424,11 @@ next_step:  }  /* - * Calculate start block index that this node page contains + * Calculate start block index indicating the given node offset. + * Be careful, caller should give this node offset only indicating direct node + * blocks. If any node offsets, which point the other types of node blocks such + * as indirect or double indirect node blocks, are given, it must be a caller's + * bug.   */  block_t start_bidx_of_node(unsigned int node_ofs)  { @@ -651,62 +655,44 @@ static int do_garbage_collect(struct f2fs_sb_info *sbi, unsigned int segno,  	return ret;  } -int f2fs_gc(struct f2fs_sb_info *sbi, int nGC) +int f2fs_gc(struct f2fs_sb_info *sbi)  { -	unsigned int segno; -	int old_free_secs, cur_free_secs; -	int gc_status, nfree;  	struct list_head ilist; +	unsigned int segno, i;  	int gc_type = BG_GC; +	int gc_status = GC_NONE;  	INIT_LIST_HEAD(&ilist);  gc_more: -	nfree = 0; -	gc_status = GC_NONE; +	if (!(sbi->sb->s_flags & MS_ACTIVE)) +		goto stop;  	if (has_not_enough_free_secs(sbi)) -		old_free_secs = reserved_sections(sbi); -	else -		old_free_secs = free_sections(sbi); - -	while (sbi->sb->s_flags & MS_ACTIVE) { -		int i; -		if (has_not_enough_free_secs(sbi)) -			gc_type = FG_GC; +		gc_type = FG_GC; -		cur_free_secs = free_sections(sbi) + nfree; +	if (!__get_victim(sbi, &segno, gc_type, NO_CHECK_TYPE)) +		goto stop; -		/* We got free space successfully. */ -		if (nGC < cur_free_secs - old_free_secs) -			break; - -		if (!__get_victim(sbi, &segno, gc_type, NO_CHECK_TYPE)) +	for (i = 0; i < sbi->segs_per_sec; i++) { +		/* +		 * do_garbage_collect will give us three gc_status: +		 * GC_ERROR, GC_DONE, and GC_BLOCKED. +		 * If GC is finished uncleanly, we have to return +		 * the victim to dirty segment list. +		 */ +		gc_status = do_garbage_collect(sbi, segno + i, &ilist, gc_type); +		if (gc_status != GC_DONE)  			break; - -		for (i = 0; i < sbi->segs_per_sec; i++) { -			/* -			 * do_garbage_collect will give us three gc_status: -			 * GC_ERROR, GC_DONE, and GC_BLOCKED. -			 * If GC is finished uncleanly, we have to return -			 * the victim to dirty segment list. -			 */ -			gc_status = do_garbage_collect(sbi, segno + i, -					&ilist, gc_type); -			if (gc_status != GC_DONE) -				goto stop; -			nfree++; -		}  	} -stop: -	if (has_not_enough_free_secs(sbi) || gc_status == GC_BLOCKED) { +	if (has_not_enough_free_secs(sbi)) {  		write_checkpoint(sbi, (gc_status == GC_BLOCKED), false); -		if (nfree) +		if (has_not_enough_free_secs(sbi))  			goto gc_more;  	} +stop:  	mutex_unlock(&sbi->gc_mutex);  	put_gc_inode(&ilist); -	BUG_ON(!list_empty(&ilist));  	return gc_status;  } @@ -715,7 +701,7 @@ void build_gc_manager(struct f2fs_sb_info *sbi)  	DIRTY_I(sbi)->v_ops = &default_v_ops;  } -int create_gc_caches(void) +int __init create_gc_caches(void)  {  	winode_slab = f2fs_kmem_cache_create("f2fs_gc_inodes",  			sizeof(struct inode_entry), NULL); diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c index bf20b4d0321..79424177732 100644 --- a/fs/f2fs/inode.c +++ b/fs/f2fs/inode.c @@ -217,6 +217,9 @@ int f2fs_write_inode(struct inode *inode, struct writeback_control *wbc)  			inode->i_ino == F2FS_META_INO(sbi))  		return 0; +	if (wbc) +		f2fs_balance_fs(sbi); +  	node_page = get_node_page(sbi, inode->i_ino);  	if (IS_ERR(node_page))  		return PTR_ERR(node_page); diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c index 5066bfd256c..9bda63c9c16 100644 --- a/fs/f2fs/node.c +++ b/fs/f2fs/node.c @@ -1124,6 +1124,12 @@ static int f2fs_write_node_page(struct page *page,  	return 0;  } +/* + * It is very important to gather dirty pages and write at once, so that we can + * submit a big bio without interfering other data writes. + * Be default, 512 pages (2MB), a segment size, is quite reasonable. + */ +#define COLLECT_DIRTY_NODES	512  static int f2fs_write_node_pages(struct address_space *mapping,  			    struct writeback_control *wbc)  { @@ -1131,17 +1137,16 @@ static int f2fs_write_node_pages(struct address_space *mapping,  	struct block_device *bdev = sbi->sb->s_bdev;  	long nr_to_write = wbc->nr_to_write; -	if (wbc->for_kupdate) -		return 0; - -	if (get_pages(sbi, F2FS_DIRTY_NODES) == 0) -		return 0; - +	/* First check balancing cached NAT entries */  	if (try_to_free_nats(sbi, NAT_ENTRY_PER_BLOCK)) {  		write_checkpoint(sbi, false, false);  		return 0;  	} +	/* collect a number of dirty node pages and write together */ +	if (get_pages(sbi, F2FS_DIRTY_NODES) < COLLECT_DIRTY_NODES) +		return 0; +  	/* if mounting is failed, skip writing node pages */  	wbc->nr_to_write = bio_get_nr_vecs(bdev);  	sync_node_pages(sbi, 0, wbc); @@ -1732,7 +1737,7 @@ void destroy_node_manager(struct f2fs_sb_info *sbi)  	kfree(nm_i);  } -int create_node_manager_caches(void) +int __init create_node_manager_caches(void)  {  	nat_entry_slab = f2fs_kmem_cache_create("nat_entry",  			sizeof(struct nat_entry), NULL); diff --git a/fs/f2fs/recovery.c b/fs/f2fs/recovery.c index b571fee677d..f42e4060b39 100644 --- a/fs/f2fs/recovery.c +++ b/fs/f2fs/recovery.c @@ -67,7 +67,7 @@ static int recover_dentry(struct page *ipage, struct inode *inode)  		kunmap(page);  		f2fs_put_page(page, 0);  	} else { -		f2fs_add_link(&dent, inode); +		err = f2fs_add_link(&dent, inode);  	}  	iput(dir);  out: @@ -151,7 +151,6 @@ static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head)  				goto out;  			} -			INIT_LIST_HEAD(&entry->list);  			list_add_tail(&entry->list, head);  			entry->blkaddr = blkaddr;  		} @@ -174,10 +173,9 @@ out:  static void destroy_fsync_dnodes(struct f2fs_sb_info *sbi,  					struct list_head *head)  { -	struct list_head *this; -	struct fsync_inode_entry *entry; -	list_for_each(this, head) { -		entry = list_entry(this, struct fsync_inode_entry, list); +	struct fsync_inode_entry *entry, *tmp; + +	list_for_each_entry_safe(entry, tmp, head, list) {  		iput(entry->inode);  		list_del(&entry->list);  		kmem_cache_free(fsync_entry_slab, entry); diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c index de6240922b0..4b009906658 100644 --- a/fs/f2fs/segment.c +++ b/fs/f2fs/segment.c @@ -31,7 +31,7 @@ void f2fs_balance_fs(struct f2fs_sb_info *sbi)  	 */  	if (has_not_enough_free_secs(sbi)) {  		mutex_lock(&sbi->gc_mutex); -		f2fs_gc(sbi, 1); +		f2fs_gc(sbi);  	}  } diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c index 08a94c814bd..37fad04c866 100644 --- a/fs/f2fs/super.c +++ b/fs/f2fs/super.c @@ -53,6 +53,18 @@ static match_table_t f2fs_tokens = {  	{Opt_err, NULL},  }; +void f2fs_msg(struct super_block *sb, const char *level, const char *fmt, ...) +{ +	struct va_format vaf; +	va_list args; + +	va_start(args, fmt); +	vaf.fmt = fmt; +	vaf.va = &args; +	printk("%sF2FS-fs (%s): %pV\n", level, sb->s_id, &vaf); +	va_end(args); +} +  static void init_once(void *foo)  {  	struct f2fs_inode_info *fi = (struct f2fs_inode_info *) foo; @@ -125,6 +137,8 @@ int f2fs_sync_fs(struct super_block *sb, int sync)  	if (sync)  		write_checkpoint(sbi, false, false); +	else +		f2fs_balance_fs(sbi);  	return 0;  } @@ -247,7 +261,8 @@ static const struct export_operations f2fs_export_ops = {  	.get_parent = f2fs_get_parent,  }; -static int parse_options(struct f2fs_sb_info *sbi, char *options) +static int parse_options(struct super_block *sb, struct f2fs_sb_info *sbi, +				char *options)  {  	substring_t args[MAX_OPT_ARGS];  	char *p; @@ -286,7 +301,8 @@ static int parse_options(struct f2fs_sb_info *sbi, char *options)  			break;  #else  		case Opt_nouser_xattr: -			pr_info("nouser_xattr options not supported\n"); +			f2fs_msg(sb, KERN_INFO, +				"nouser_xattr options not supported");  			break;  #endif  #ifdef CONFIG_F2FS_FS_POSIX_ACL @@ -295,7 +311,7 @@ static int parse_options(struct f2fs_sb_info *sbi, char *options)  			break;  #else  		case Opt_noacl: -			pr_info("noacl options not supported\n"); +			f2fs_msg(sb, KERN_INFO, "noacl options not supported");  			break;  #endif  		case Opt_active_logs: @@ -309,8 +325,9 @@ static int parse_options(struct f2fs_sb_info *sbi, char *options)  			set_opt(sbi, DISABLE_EXT_IDENTIFY);  			break;  		default: -			pr_err("Unrecognized mount option \"%s\" or missing value\n", -					p); +			f2fs_msg(sb, KERN_ERR, +				"Unrecognized mount option \"%s\" or missing value", +				p);  			return -EINVAL;  		}  	} @@ -337,23 +354,36 @@ static loff_t max_file_size(unsigned bits)  	return result;  } -static int sanity_check_raw_super(struct f2fs_super_block *raw_super) +static int sanity_check_raw_super(struct super_block *sb, +			struct f2fs_super_block *raw_super)  {  	unsigned int blocksize; -	if (F2FS_SUPER_MAGIC != le32_to_cpu(raw_super->magic)) +	if (F2FS_SUPER_MAGIC != le32_to_cpu(raw_super->magic)) { +		f2fs_msg(sb, KERN_INFO, +			"Magic Mismatch, valid(0x%x) - read(0x%x)", +			F2FS_SUPER_MAGIC, le32_to_cpu(raw_super->magic));  		return 1; +	}  	/* Currently, support only 4KB block size */  	blocksize = 1 << le32_to_cpu(raw_super->log_blocksize); -	if (blocksize != PAGE_CACHE_SIZE) +	if (blocksize != PAGE_CACHE_SIZE) { +		f2fs_msg(sb, KERN_INFO, +			"Invalid blocksize (%u), supports only 4KB\n", +			blocksize);  		return 1; +	}  	if (le32_to_cpu(raw_super->log_sectorsize) != -					F2FS_LOG_SECTOR_SIZE) +					F2FS_LOG_SECTOR_SIZE) { +		f2fs_msg(sb, KERN_INFO, "Invalid log sectorsize");  		return 1; +	}  	if (le32_to_cpu(raw_super->log_sectors_per_block) != -					F2FS_LOG_SECTORS_PER_BLOCK) +					F2FS_LOG_SECTORS_PER_BLOCK) { +		f2fs_msg(sb, KERN_INFO, "Invalid log sectors per block");  		return 1; +	}  	return 0;  } @@ -413,14 +443,17 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent)  	if (!sbi)  		return -ENOMEM; -	/* set a temporary block size */ -	if (!sb_set_blocksize(sb, F2FS_BLKSIZE)) +	/* set a block size */ +	if (!sb_set_blocksize(sb, F2FS_BLKSIZE)) { +		f2fs_msg(sb, KERN_ERR, "unable to set blocksize");  		goto free_sbi; +	}  	/* read f2fs raw super block */  	raw_super_buf = sb_bread(sb, 0);  	if (!raw_super_buf) {  		err = -EIO; +		f2fs_msg(sb, KERN_ERR, "unable to read superblock");  		goto free_sbi;  	}  	raw_super = (struct f2fs_super_block *) @@ -438,12 +471,14 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent)  	set_opt(sbi, POSIX_ACL);  #endif  	/* parse mount options */ -	if (parse_options(sbi, (char *)data)) +	if (parse_options(sb, sbi, (char *)data))  		goto free_sb_buf;  	/* sanity checking of raw super */ -	if (sanity_check_raw_super(raw_super)) +	if (sanity_check_raw_super(sb, raw_super)) { +		f2fs_msg(sb, KERN_ERR, "Can't find a valid F2FS filesystem");  		goto free_sb_buf; +	}  	sb->s_maxbytes = max_file_size(le32_to_cpu(raw_super->log_blocksize));  	sb->s_max_links = F2FS_LINK_MAX; @@ -477,18 +512,23 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent)  	/* get an inode for meta space */  	sbi->meta_inode = f2fs_iget(sb, F2FS_META_INO(sbi));  	if (IS_ERR(sbi->meta_inode)) { +		f2fs_msg(sb, KERN_ERR, "Failed to read F2FS meta data inode");  		err = PTR_ERR(sbi->meta_inode);  		goto free_sb_buf;  	}  	err = get_valid_checkpoint(sbi); -	if (err) +	if (err) { +		f2fs_msg(sb, KERN_ERR, "Failed to get valid F2FS checkpoint");  		goto free_meta_inode; +	}  	/* sanity checking of checkpoint */  	err = -EINVAL; -	if (sanity_check_ckpt(raw_super, sbi->ckpt)) +	if (sanity_check_ckpt(raw_super, sbi->ckpt)) { +		f2fs_msg(sb, KERN_ERR, "Invalid F2FS checkpoint");  		goto free_cp; +	}  	sbi->total_valid_node_count =  				le32_to_cpu(sbi->ckpt->valid_node_count); @@ -502,25 +542,28 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent)  	INIT_LIST_HEAD(&sbi->dir_inode_list);  	spin_lock_init(&sbi->dir_inode_lock); -	/* init super block */ -	if (!sb_set_blocksize(sb, sbi->blocksize)) -		goto free_cp; -  	init_orphan_info(sbi);  	/* setup f2fs internal modules */  	err = build_segment_manager(sbi); -	if (err) +	if (err) { +		f2fs_msg(sb, KERN_ERR, +			"Failed to initialize F2FS segment manager");  		goto free_sm; +	}  	err = build_node_manager(sbi); -	if (err) +	if (err) { +		f2fs_msg(sb, KERN_ERR, +			"Failed to initialize F2FS node manager");  		goto free_nm; +	}  	build_gc_manager(sbi);  	/* get an inode for node space */  	sbi->node_inode = f2fs_iget(sb, F2FS_NODE_INO(sbi));  	if (IS_ERR(sbi->node_inode)) { +		f2fs_msg(sb, KERN_ERR, "Failed to read node inode");  		err = PTR_ERR(sbi->node_inode);  		goto free_nm;  	} @@ -533,6 +576,7 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent)  	/* read root inode and dentry */  	root = f2fs_iget(sb, F2FS_ROOT_INO(sbi));  	if (IS_ERR(root)) { +		f2fs_msg(sb, KERN_ERR, "Failed to read root inode");  		err = PTR_ERR(root);  		goto free_node_inode;  	} @@ -596,7 +640,7 @@ static struct file_system_type f2fs_fs_type = {  	.fs_flags	= FS_REQUIRES_DEV,  }; -static int init_inodecache(void) +static int __init init_inodecache(void)  {  	f2fs_inode_cachep = f2fs_kmem_cache_create("f2fs_inode_cache",  			sizeof(struct f2fs_inode_info), NULL); @@ -631,14 +675,17 @@ static int __init init_f2fs_fs(void)  	err = create_checkpoint_caches();  	if (err)  		goto fail; -	return register_filesystem(&f2fs_fs_type); +	err = register_filesystem(&f2fs_fs_type); +	if (err) +		goto fail; +	f2fs_create_root_stats();  fail:  	return err;  }  static void __exit exit_f2fs_fs(void)  { -	destroy_root_stats(); +	f2fs_destroy_root_stats();  	unregister_filesystem(&f2fs_fs_type);  	destroy_checkpoint_caches();  	destroy_gc_caches(); diff --git a/fs/f2fs/xattr.c b/fs/f2fs/xattr.c index 940136a3d3a..8038c049650 100644 --- a/fs/f2fs/xattr.c +++ b/fs/f2fs/xattr.c @@ -318,6 +318,8 @@ int f2fs_setxattr(struct inode *inode, int name_index, const char *name,  	if (name_len > 255 || value_len > MAX_VALUE_LEN)  		return -ERANGE; +	f2fs_balance_fs(sbi); +  	mutex_lock_op(sbi, NODE_NEW);  	if (!fi->i_xattr_nid) {  		/* Allocate new attribute block */ diff --git a/fs/fuse/Kconfig b/fs/fuse/Kconfig index 0cf160a94ed..1b2f6c2c3aa 100644 --- a/fs/fuse/Kconfig +++ b/fs/fuse/Kconfig @@ -4,12 +4,24 @@ config FUSE_FS  	  With FUSE it is possible to implement a fully functional filesystem  	  in a userspace program. -	  There's also companion library: libfuse.  This library along with -	  utilities is available from the FUSE homepage: +	  There's also a companion library: libfuse2.  This library is available +	  from the FUSE homepage:  	  <http://fuse.sourceforge.net/> +	  although chances are your distribution already has that library +	  installed if you've installed the "fuse" package itself.  	  See <file:Documentation/filesystems/fuse.txt> for more information.  	  See <file:Documentation/Changes> for needed library/utility version.  	  If you want to develop a userspace FS, or if you want to use  	  a filesystem based on FUSE, answer Y or M. + +config CUSE +	tristate "Character device in Userspace support" +	depends on FUSE_FS +	help +	  This FUSE extension allows character devices to be +	  implemented in userspace. + +	  If you want to develop or use a userspace character device +	  based on CUSE, answer Y or M. diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c index ee8d5504229..e397b675b02 100644 --- a/fs/fuse/cuse.c +++ b/fs/fuse/cuse.c @@ -45,7 +45,6 @@  #include <linux/miscdevice.h>  #include <linux/mutex.h>  #include <linux/slab.h> -#include <linux/spinlock.h>  #include <linux/stat.h>  #include <linux/module.h> @@ -63,7 +62,7 @@ struct cuse_conn {  	bool			unrestricted_ioctl;  }; -static DEFINE_SPINLOCK(cuse_lock);		/* protects cuse_conntbl */ +static DEFINE_MUTEX(cuse_lock);		/* protects registration */  static struct list_head cuse_conntbl[CUSE_CONNTBL_LEN];  static struct class *cuse_class; @@ -114,14 +113,14 @@ static int cuse_open(struct inode *inode, struct file *file)  	int rc;  	/* look up and get the connection */ -	spin_lock(&cuse_lock); +	mutex_lock(&cuse_lock);  	list_for_each_entry(pos, cuse_conntbl_head(devt), list)  		if (pos->dev->devt == devt) {  			fuse_conn_get(&pos->fc);  			cc = pos;  			break;  		} -	spin_unlock(&cuse_lock); +	mutex_unlock(&cuse_lock);  	/* dead? */  	if (!cc) @@ -267,7 +266,7 @@ static int cuse_parse_one(char **pp, char *end, char **keyp, char **valp)  static int cuse_parse_devinfo(char *p, size_t len, struct cuse_devinfo *devinfo)  {  	char *end = p + len; -	char *key, *val; +	char *uninitialized_var(key), *uninitialized_var(val);  	int rc;  	while (true) { @@ -305,14 +304,14 @@ static void cuse_gendev_release(struct device *dev)   */  static void cuse_process_init_reply(struct fuse_conn *fc, struct fuse_req *req)  { -	struct cuse_conn *cc = fc_to_cc(fc); +	struct cuse_conn *cc = fc_to_cc(fc), *pos;  	struct cuse_init_out *arg = req->out.args[0].value;  	struct page *page = req->pages[0];  	struct cuse_devinfo devinfo = { };  	struct device *dev;  	struct cdev *cdev;  	dev_t devt; -	int rc; +	int rc, i;  	if (req->out.h.error ||  	    arg->major != FUSE_KERNEL_VERSION || arg->minor < 11) { @@ -356,15 +355,24 @@ static void cuse_process_init_reply(struct fuse_conn *fc, struct fuse_req *req)  	dev_set_drvdata(dev, cc);  	dev_set_name(dev, "%s", devinfo.name); +	mutex_lock(&cuse_lock); + +	/* make sure the device-name is unique */ +	for (i = 0; i < CUSE_CONNTBL_LEN; ++i) { +		list_for_each_entry(pos, &cuse_conntbl[i], list) +			if (!strcmp(dev_name(pos->dev), dev_name(dev))) +				goto err_unlock; +	} +  	rc = device_add(dev);  	if (rc) -		goto err_device; +		goto err_unlock;  	/* register cdev */  	rc = -ENOMEM;  	cdev = cdev_alloc();  	if (!cdev) -		goto err_device; +		goto err_unlock;  	cdev->owner = THIS_MODULE;  	cdev->ops = &cuse_frontend_fops; @@ -377,9 +385,8 @@ static void cuse_process_init_reply(struct fuse_conn *fc, struct fuse_req *req)  	cc->cdev = cdev;  	/* make the device available */ -	spin_lock(&cuse_lock);  	list_add(&cc->list, cuse_conntbl_head(devt)); -	spin_unlock(&cuse_lock); +	mutex_unlock(&cuse_lock);  	/* announce device availability */  	dev_set_uevent_suppress(dev, 0); @@ -391,7 +398,8 @@ out:  err_cdev:  	cdev_del(cdev); -err_device: +err_unlock: +	mutex_unlock(&cuse_lock);  	put_device(dev);  err_region:  	unregister_chrdev_region(devt, 1); @@ -520,9 +528,9 @@ static int cuse_channel_release(struct inode *inode, struct file *file)  	int rc;  	/* remove from the conntbl, no more access from this point on */ -	spin_lock(&cuse_lock); +	mutex_lock(&cuse_lock);  	list_del_init(&cc->list); -	spin_unlock(&cuse_lock); +	mutex_unlock(&cuse_lock);  	/* remove device */  	if (cc->dev) diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c index c16335315e5..e83351aa5ba 100644 --- a/fs/fuse/dev.c +++ b/fs/fuse/dev.c @@ -692,8 +692,6 @@ static int fuse_try_move_page(struct fuse_copy_state *cs, struct page **pagep)  	struct page *oldpage = *pagep;  	struct page *newpage;  	struct pipe_buffer *buf = cs->pipebufs; -	struct address_space *mapping; -	pgoff_t index;  	unlock_request(cs->fc, cs->req);  	fuse_copy_finish(cs); @@ -724,9 +722,6 @@ static int fuse_try_move_page(struct fuse_copy_state *cs, struct page **pagep)  	if (fuse_check_page(newpage) != 0)  		goto out_fallback_unlock; -	mapping = oldpage->mapping; -	index = oldpage->index; -  	/*  	 * This is a new and locked page, it shouldn't be mapped or  	 * have any special flags on it diff --git a/fs/fuse/file.c b/fs/fuse/file.c index e21d4d8f87e..f3ab824fa30 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c @@ -2177,8 +2177,8 @@ fuse_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,  	return ret;  } -long fuse_file_fallocate(struct file *file, int mode, loff_t offset, -			    loff_t length) +static long fuse_file_fallocate(struct file *file, int mode, loff_t offset, +				loff_t length)  {  	struct fuse_file *ff = file->private_data;  	struct fuse_conn *fc = ff->fc; @@ -2213,7 +2213,6 @@ long fuse_file_fallocate(struct file *file, int mode, loff_t offset,  	return err;  } -EXPORT_SYMBOL_GPL(fuse_file_fallocate);  static const struct file_operations fuse_file_operations = {  	.llseek		= fuse_file_llseek, diff --git a/fs/gfs2/lock_dlm.c b/fs/gfs2/lock_dlm.c index b906ed17a83..9802de0f85e 100644 --- a/fs/gfs2/lock_dlm.c +++ b/fs/gfs2/lock_dlm.c @@ -281,6 +281,7 @@ static void gdlm_put_lock(struct gfs2_glock *gl)  {  	struct gfs2_sbd *sdp = gl->gl_sbd;  	struct lm_lockstruct *ls = &sdp->sd_lockstruct; +	int lvb_needs_unlock = 0;  	int error;  	if (gl->gl_lksb.sb_lkid == 0) { @@ -294,8 +295,12 @@ static void gdlm_put_lock(struct gfs2_glock *gl)  	gfs2_update_request_times(gl);  	/* don't want to skip dlm_unlock writing the lvb when lock is ex */ + +	if (gl->gl_lksb.sb_lvbptr && (gl->gl_state == LM_ST_EXCLUSIVE)) +		lvb_needs_unlock = 1; +  	if (test_bit(SDF_SKIP_DLM_UNLOCK, &sdp->sd_flags) && -	    gl->gl_lksb.sb_lvbptr && (gl->gl_state != LM_ST_EXCLUSIVE)) { +	    !lvb_needs_unlock) {  		gfs2_glock_free(gl);  		return;  	} diff --git a/fs/nfs/namespace.c b/fs/nfs/namespace.c index dd057bc6b65..fc8dc20fdeb 100644 --- a/fs/nfs/namespace.c +++ b/fs/nfs/namespace.c @@ -177,11 +177,31 @@ out_nofree:  	return mnt;  } +static int +nfs_namespace_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat) +{ +	if (NFS_FH(dentry->d_inode)->size != 0) +		return nfs_getattr(mnt, dentry, stat); +	generic_fillattr(dentry->d_inode, stat); +	return 0; +} + +static int +nfs_namespace_setattr(struct dentry *dentry, struct iattr *attr) +{ +	if (NFS_FH(dentry->d_inode)->size != 0) +		return nfs_setattr(dentry, attr); +	return -EACCES; +} +  const struct inode_operations nfs_mountpoint_inode_operations = {  	.getattr	= nfs_getattr, +	.setattr	= nfs_setattr,  };  const struct inode_operations nfs_referral_inode_operations = { +	.getattr	= nfs_namespace_getattr, +	.setattr	= nfs_namespace_setattr,  };  static void nfs_expire_automounts(struct work_struct *work) diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c index acc34726812..2e9779b58b7 100644 --- a/fs/nfs/nfs4client.c +++ b/fs/nfs/nfs4client.c @@ -236,11 +236,10 @@ struct nfs_client *nfs4_init_client(struct nfs_client *clp,  	error = nfs4_discover_server_trunking(clp, &old);  	if (error < 0)  		goto error; +	nfs_put_client(clp);  	if (clp != old) {  		clp->cl_preserve_clid = true; -		nfs_put_client(clp);  		clp = old; -		atomic_inc(&clp->cl_count);  	}  	return clp; @@ -306,7 +305,7 @@ int nfs40_walk_client_list(struct nfs_client *new,  		.clientid	= new->cl_clientid,  		.confirm	= new->cl_confirm,  	}; -	int status; +	int status = -NFS4ERR_STALE_CLIENTID;  	spin_lock(&nn->nfs_client_lock);  	list_for_each_entry_safe(pos, n, &nn->nfs_client_list, cl_share_link) { @@ -332,40 +331,33 @@ int nfs40_walk_client_list(struct nfs_client *new,  		if (prev)  			nfs_put_client(prev); +		prev = pos;  		status = nfs4_proc_setclientid_confirm(pos, &clid, cred); -		if (status == 0) { +		switch (status) { +		case -NFS4ERR_STALE_CLIENTID: +			break; +		case 0:  			nfs4_swap_callback_idents(pos, new); -			nfs_put_client(pos); +			prev = NULL;  			*result = pos;  			dprintk("NFS: <-- %s using nfs_client = %p ({%d})\n",  				__func__, pos, atomic_read(&pos->cl_count)); -			return 0; -		} -		if (status != -NFS4ERR_STALE_CLIENTID) { -			nfs_put_client(pos); -			dprintk("NFS: <-- %s status = %d, no result\n", -				__func__, status); -			return status; +		default: +			goto out;  		}  		spin_lock(&nn->nfs_client_lock); -		prev = pos;  	} +	spin_unlock(&nn->nfs_client_lock); -	/* -	 * No matching nfs_client found.  This should be impossible, -	 * because the new nfs_client has already been added to -	 * nfs_client_list by nfs_get_client(). -	 * -	 * Don't BUG(), since the caller is holding a mutex. -	 */ +	/* No match found. The server lost our clientid */ +out:  	if (prev)  		nfs_put_client(prev); -	spin_unlock(&nn->nfs_client_lock); -	pr_err("NFS: %s Error: no matching nfs_client found\n", __func__); -	return -NFS4ERR_STALE_CLIENTID; +	dprintk("NFS: <-- %s status = %d\n", __func__, status); +	return status;  }  #ifdef CONFIG_NFS_V4_1 @@ -432,7 +424,7 @@ int nfs41_walk_client_list(struct nfs_client *new,  {  	struct nfs_net *nn = net_generic(new->cl_net, nfs_net_id);  	struct nfs_client *pos, *n, *prev = NULL; -	int error; +	int status = -NFS4ERR_STALE_CLIENTID;  	spin_lock(&nn->nfs_client_lock);  	list_for_each_entry_safe(pos, n, &nn->nfs_client_list, cl_share_link) { @@ -448,14 +440,17 @@ int nfs41_walk_client_list(struct nfs_client *new,  				nfs_put_client(prev);  			prev = pos; -			error = nfs_wait_client_init_complete(pos); -			if (error < 0) { +			nfs4_schedule_lease_recovery(pos); +			status = nfs_wait_client_init_complete(pos); +			if (status < 0) {  				nfs_put_client(pos);  				spin_lock(&nn->nfs_client_lock);  				continue;  			} - +			status = pos->cl_cons_state;  			spin_lock(&nn->nfs_client_lock); +			if (status < 0) +				continue;  		}  		if (pos->rpc_ops != new->rpc_ops) @@ -473,6 +468,7 @@ int nfs41_walk_client_list(struct nfs_client *new,  		if (!nfs4_match_serverowners(pos, new))  			continue; +		atomic_inc(&pos->cl_count);  		spin_unlock(&nn->nfs_client_lock);  		dprintk("NFS: <-- %s using nfs_client = %p ({%d})\n",  			__func__, pos, atomic_read(&pos->cl_count)); @@ -481,16 +477,10 @@ int nfs41_walk_client_list(struct nfs_client *new,  		return 0;  	} -	/* -	 * No matching nfs_client found.  This should be impossible, -	 * because the new nfs_client has already been added to -	 * nfs_client_list by nfs_get_client(). -	 * -	 * Don't BUG(), since the caller is holding a mutex. -	 */ +	/* No matching nfs_client found. */  	spin_unlock(&nn->nfs_client_lock); -	pr_err("NFS: %s Error: no matching nfs_client found\n", __func__); -	return -NFS4ERR_STALE_CLIENTID; +	dprintk("NFS: <-- %s status = %d\n", __func__, status); +	return status;  }  #endif	/* CONFIG_NFS_V4_1 */ diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c index 9448c579d41..e61f68d5ef2 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c @@ -136,16 +136,11 @@ int nfs40_discover_server_trunking(struct nfs_client *clp,  	clp->cl_confirm = clid.confirm;  	status = nfs40_walk_client_list(clp, result, cred); -	switch (status) { -	case -NFS4ERR_STALE_CLIENTID: -		set_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state); -	case 0: +	if (status == 0) {  		/* Sustain the lease, even if it's empty.  If the clientid4  		 * goes stale it's of no use for trunking discovery. */  		nfs4_schedule_state_renewal(*result); -		break;  	} -  out:  	return status;  } @@ -1863,6 +1858,7 @@ again:  	case -ETIMEDOUT:  	case -EAGAIN:  		ssleep(1); +	case -NFS4ERR_STALE_CLIENTID:  		dprintk("NFS: %s after status %d, retrying\n",  			__func__, status);  		goto again; @@ -2022,8 +2018,18 @@ static int nfs4_reset_session(struct nfs_client *clp)  	nfs4_begin_drain_session(clp);  	cred = nfs4_get_exchange_id_cred(clp);  	status = nfs4_proc_destroy_session(clp->cl_session, cred); -	if (status && status != -NFS4ERR_BADSESSION && -	    status != -NFS4ERR_DEADSESSION) { +	switch (status) { +	case 0: +	case -NFS4ERR_BADSESSION: +	case -NFS4ERR_DEADSESSION: +		break; +	case -NFS4ERR_BACK_CHAN_BUSY: +	case -NFS4ERR_DELAY: +		set_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state); +		status = 0; +		ssleep(1); +		goto out; +	default:  		status = nfs4_recovery_handle_error(clp, status);  		goto out;  	} diff --git a/fs/nfs/super.c b/fs/nfs/super.c index 2e7e8c878e5..b056b162872 100644 --- a/fs/nfs/super.c +++ b/fs/nfs/super.c @@ -2589,27 +2589,23 @@ nfs_xdev_mount(struct file_system_type *fs_type, int flags,  	struct nfs_server *server;  	struct dentry *mntroot = ERR_PTR(-ENOMEM);  	struct nfs_subversion *nfs_mod = NFS_SB(data->sb)->nfs_client->cl_nfs_mod; -	int error; -	dprintk("--> nfs_xdev_mount_common()\n"); +	dprintk("--> nfs_xdev_mount()\n");  	mount_info.mntfh = mount_info.cloned->fh;  	/* create a new volume representation */  	server = nfs_mod->rpc_ops->clone_server(NFS_SB(data->sb), data->fh, data->fattr, data->authflavor); -	if (IS_ERR(server)) { -		error = PTR_ERR(server); -		goto out_err; -	} -	mntroot = nfs_fs_mount_common(server, flags, dev_name, &mount_info, nfs_mod); -	dprintk("<-- nfs_xdev_mount_common() = 0\n"); -out: -	return mntroot; +	if (IS_ERR(server)) +		mntroot = ERR_CAST(server); +	else +		mntroot = nfs_fs_mount_common(server, flags, +				dev_name, &mount_info, nfs_mod); -out_err: -	dprintk("<-- nfs_xdev_mount_common() = %d [error]\n", error); -	goto out; +	dprintk("<-- nfs_xdev_mount() = %ld\n", +			IS_ERR(mntroot) ? PTR_ERR(mntroot) : 0L); +	return mntroot;  }  #if IS_ENABLED(CONFIG_NFS_V4) diff --git a/fs/nilfs2/ioctl.c b/fs/nilfs2/ioctl.c index fdb18076948..f3859354e41 100644 --- a/fs/nilfs2/ioctl.c +++ b/fs/nilfs2/ioctl.c @@ -664,8 +664,11 @@ static int nilfs_ioctl_clean_segments(struct inode *inode, struct file *filp,  	if (ret < 0)  		printk(KERN_ERR "NILFS: GC failed during preparation: "  			"cannot read source blocks: err=%d\n", ret); -	else +	else { +		if (nilfs_sb_need_update(nilfs)) +			set_nilfs_discontinued(nilfs);  		ret = nilfs_clean_segments(inode->i_sb, argv, kbufs); +	}  	nilfs_remove_all_gcinodes(nilfs);  	clear_nilfs_gc_running(nilfs); diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c index 4111a40ebe1..5f707e53717 100644 --- a/fs/xfs/xfs_aops.c +++ b/fs/xfs/xfs_aops.c @@ -86,11 +86,11 @@ xfs_destroy_ioend(  	}  	if (ioend->io_iocb) { +		inode_dio_done(ioend->io_inode);  		if (ioend->io_isasync) {  			aio_complete(ioend->io_iocb, ioend->io_error ?  					ioend->io_error : ioend->io_result, 0);  		} -		inode_dio_done(ioend->io_inode);  	}  	mempool_free(ioend, xfs_ioend_pool); diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c index 0e92d12765d..cdb2d334858 100644 --- a/fs/xfs/xfs_bmap.c +++ b/fs/xfs/xfs_bmap.c @@ -4680,9 +4680,6 @@ __xfs_bmapi_allocate(  			return error;  	} -	if (bma->flags & XFS_BMAPI_STACK_SWITCH) -		bma->stack_switch = 1; -  	error = xfs_bmap_alloc(bma);  	if (error)  		return error; @@ -4956,6 +4953,9 @@ xfs_bmapi_write(  	bma.flist = flist;  	bma.firstblock = firstblock; +	if (flags & XFS_BMAPI_STACK_SWITCH) +		bma.stack_switch = 1; +  	while (bno < end && n < *nmap) {  		inhole = eof || bma.got.br_startoff > bno;  		wasdelay = !inhole && isnullstartblock(bma.got.br_startblock); diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c index 56d1614760c..fbbb9eb92e3 100644 --- a/fs/xfs/xfs_buf.c +++ b/fs/xfs/xfs_buf.c @@ -487,6 +487,7 @@ _xfs_buf_find(  	struct rb_node		*parent;  	xfs_buf_t		*bp;  	xfs_daddr_t		blkno = map[0].bm_bn; +	xfs_daddr_t		eofs;  	int			numblks = 0;  	int			i; @@ -498,6 +499,23 @@ _xfs_buf_find(  	ASSERT(!(numbytes < (1 << btp->bt_sshift)));  	ASSERT(!(BBTOB(blkno) & (xfs_off_t)btp->bt_smask)); +	/* +	 * Corrupted block numbers can get through to here, unfortunately, so we +	 * have to check that the buffer falls within the filesystem bounds. +	 */ +	eofs = XFS_FSB_TO_BB(btp->bt_mount, btp->bt_mount->m_sb.sb_dblocks); +	if (blkno >= eofs) { +		/* +		 * XXX (dgc): we should really be returning EFSCORRUPTED here, +		 * but none of the higher level infrastructure supports +		 * returning a specific error on buffer lookup failures. +		 */ +		xfs_alert(btp->bt_mount, +			  "%s: Block out of range: block 0x%llx, EOFS 0x%llx ", +			  __func__, blkno, eofs); +		return NULL; +	} +  	/* get tree root */  	pag = xfs_perag_get(btp->bt_mount,  				xfs_daddr_to_agno(btp->bt_mount, blkno)); @@ -1487,6 +1505,8 @@ restart:  	while (!list_empty(&btp->bt_lru)) {  		bp = list_first_entry(&btp->bt_lru, struct xfs_buf, b_lru);  		if (atomic_read(&bp->b_hold) > 1) { +			trace_xfs_buf_wait_buftarg(bp, _RET_IP_); +			list_move_tail(&bp->b_lru, &btp->bt_lru);  			spin_unlock(&btp->bt_lru_lock);  			delay(100);  			goto restart; diff --git a/fs/xfs/xfs_buf_item.c b/fs/xfs/xfs_buf_item.c index 77b09750e92..3f9949fee39 100644 --- a/fs/xfs/xfs_buf_item.c +++ b/fs/xfs/xfs_buf_item.c @@ -652,7 +652,10 @@ xfs_buf_item_unlock(  	/*  	 * If the buf item isn't tracking any data, free it, otherwise drop the -	 * reference we hold to it. +	 * reference we hold to it. If we are aborting the transaction, this may +	 * be the only reference to the buf item, so we free it anyway +	 * regardless of whether it is dirty or not. A dirty abort implies a +	 * shutdown, anyway.  	 */  	clean = 1;  	for (i = 0; i < bip->bli_format_count; i++) { @@ -664,7 +667,12 @@ xfs_buf_item_unlock(  	}  	if (clean)  		xfs_buf_item_relse(bp); -	else +	else if (aborted) { +		if (atomic_dec_and_test(&bip->bli_refcount)) { +			ASSERT(XFS_FORCED_SHUTDOWN(lip->li_mountp)); +			xfs_buf_item_relse(bp); +		} +	} else  		atomic_dec(&bip->bli_refcount);  	if (!hold) diff --git a/fs/xfs/xfs_dfrag.c b/fs/xfs/xfs_dfrag.c index d0e9c74d3d9..a8bd26b82ec 100644 --- a/fs/xfs/xfs_dfrag.c +++ b/fs/xfs/xfs_dfrag.c @@ -246,10 +246,10 @@ xfs_swap_extents(  		goto out_unlock;  	} -	error = -filemap_write_and_wait(VFS_I(ip)->i_mapping); +	error = -filemap_write_and_wait(VFS_I(tip)->i_mapping);  	if (error)  		goto out_unlock; -	truncate_pagecache_range(VFS_I(ip), 0, -1); +	truncate_pagecache_range(VFS_I(tip), 0, -1);  	/* Verify O_DIRECT for ftmp */  	if (VN_CACHED(VFS_I(tip)) != 0) { diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c index add06b4e9a6..364818eef40 100644 --- a/fs/xfs/xfs_iomap.c +++ b/fs/xfs/xfs_iomap.c @@ -351,6 +351,15 @@ xfs_iomap_prealloc_size(  		}  		if (shift)  			alloc_blocks >>= shift; + +		/* +		 * If we are still trying to allocate more space than is +		 * available, squash the prealloc hard. This can happen if we +		 * have a large file on a small filesystem and the above +		 * lowspace thresholds are smaller than MAXEXTLEN. +		 */ +		while (alloc_blocks >= freesp) +			alloc_blocks >>= 4;  	}  	if (alloc_blocks < mp->m_writeio_blocks) diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c index da508463ff1..7d6df7c00c3 100644 --- a/fs/xfs/xfs_mount.c +++ b/fs/xfs/xfs_mount.c @@ -658,7 +658,7 @@ xfs_sb_quiet_read_verify(  		return;  	}  	/* quietly fail */ -	xfs_buf_ioerror(bp, EFSCORRUPTED); +	xfs_buf_ioerror(bp, EWRONGFS);  }  static void diff --git a/fs/xfs/xfs_trace.h b/fs/xfs/xfs_trace.h index 2e137d4a85a..16a812977ea 100644 --- a/fs/xfs/xfs_trace.h +++ b/fs/xfs/xfs_trace.h @@ -341,6 +341,7 @@ DEFINE_BUF_EVENT(xfs_buf_item_relse);  DEFINE_BUF_EVENT(xfs_buf_item_iodone);  DEFINE_BUF_EVENT(xfs_buf_item_iodone_async);  DEFINE_BUF_EVENT(xfs_buf_error_relse); +DEFINE_BUF_EVENT(xfs_buf_wait_buftarg);  DEFINE_BUF_EVENT(xfs_trans_read_buf_io);  DEFINE_BUF_EVENT(xfs_trans_read_buf_shut); diff --git a/include/asm-generic/dma-mapping-broken.h b/include/asm-generic/dma-mapping-broken.h index ccf7b4f34a3..6c32af918c2 100644 --- a/include/asm-generic/dma-mapping-broken.h +++ b/include/asm-generic/dma-mapping-broken.h @@ -16,6 +16,22 @@ extern void  dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,  		    dma_addr_t dma_handle); +static inline void *dma_alloc_attrs(struct device *dev, size_t size, +				    dma_addr_t *dma_handle, gfp_t flag, +				    struct dma_attrs *attrs) +{ +	/* attrs is not supported and ignored */ +	return dma_alloc_coherent(dev, size, dma_handle, flag); +} + +static inline void dma_free_attrs(struct device *dev, size_t size, +				  void *cpu_addr, dma_addr_t dma_handle, +				  struct dma_attrs *attrs) +{ +	/* attrs is not supported and ignored */ +	dma_free_coherent(dev, size, cpu_addr, dma_handle); +} +  #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)  #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h index 701beab27aa..5cf680a98f9 100644 --- a/include/asm-generic/pgtable.h +++ b/include/asm-generic/pgtable.h @@ -461,10 +461,8 @@ static inline int is_zero_pfn(unsigned long pfn)  	return offset_from_zero_pfn <= (zero_page_mask >> PAGE_SHIFT);  } -static inline unsigned long my_zero_pfn(unsigned long addr) -{ -	return page_to_pfn(ZERO_PAGE(addr)); -} +#define my_zero_pfn(addr)	page_to_pfn(ZERO_PAGE(addr)) +  #else  static inline int is_zero_pfn(unsigned long pfn)  { diff --git a/include/asm-generic/syscalls.h b/include/asm-generic/syscalls.h index 58f466ff00d..1db51b8524e 100644 --- a/include/asm-generic/syscalls.h +++ b/include/asm-generic/syscalls.h @@ -21,10 +21,12 @@ asmlinkage long sys_mmap(unsigned long addr, unsigned long len,  			unsigned long fd, off_t pgoff);  #endif +#ifndef CONFIG_GENERIC_SIGALTSTACK  #ifndef sys_sigaltstack  asmlinkage long sys_sigaltstack(const stack_t __user *, stack_t __user *,  			struct pt_regs *);  #endif +#endif  #ifndef sys_rt_sigreturn  asmlinkage long sys_rt_sigreturn(struct pt_regs *regs); diff --git a/include/linux/ata.h b/include/linux/ata.h index 408da950217..8f7a3d68371 100644 --- a/include/linux/ata.h +++ b/include/linux/ata.h @@ -297,10 +297,12 @@ enum {  	ATA_LOG_SATA_NCQ	= 0x10,  	ATA_LOG_SATA_ID_DEV_DATA  = 0x30,  	ATA_LOG_SATA_SETTINGS	  = 0x08, -	ATA_LOG_DEVSLP_MDAT	  = 0x30, +	ATA_LOG_DEVSLP_OFFSET	  = 0x30, +	ATA_LOG_DEVSLP_SIZE	  = 0x08, +	ATA_LOG_DEVSLP_MDAT	  = 0x00,  	ATA_LOG_DEVSLP_MDAT_MASK  = 0x1F, -	ATA_LOG_DEVSLP_DETO	  = 0x31, -	ATA_LOG_DEVSLP_VALID	  = 0x37, +	ATA_LOG_DEVSLP_DETO	  = 0x01, +	ATA_LOG_DEVSLP_VALID	  = 0x07,  	ATA_LOG_DEVSLP_VALID_MASK = 0x80,  	/* READ/WRITE LONG (obsolete) */ diff --git a/include/linux/efi.h b/include/linux/efi.h index 8b84916dc67..7a9498ab3c2 100644 --- a/include/linux/efi.h +++ b/include/linux/efi.h @@ -618,18 +618,30 @@ extern int __init efi_setup_pcdp_console(char *);  #endif  /* - * We play games with efi_enabled so that the compiler will, if possible, remove - * EFI-related code altogether. + * We play games with efi_enabled so that the compiler will, if + * possible, remove EFI-related code altogether.   */ +#define EFI_BOOT		0	/* Were we booted from EFI? */ +#define EFI_SYSTEM_TABLES	1	/* Can we use EFI system tables? */ +#define EFI_CONFIG_TABLES	2	/* Can we use EFI config tables? */ +#define EFI_RUNTIME_SERVICES	3	/* Can we use runtime services? */ +#define EFI_MEMMAP		4	/* Can we use EFI memory map? */ +#define EFI_64BIT		5	/* Is the firmware 64-bit? */ +  #ifdef CONFIG_EFI  # ifdef CONFIG_X86 -   extern int efi_enabled; -   extern bool efi_64bit; +extern int efi_enabled(int facility);  # else -#  define efi_enabled 1 +static inline int efi_enabled(int facility) +{ +	return 1; +}  # endif  #else -# define efi_enabled 0 +static inline int efi_enabled(int facility) +{ +	return 0; +}  #endif  /* diff --git a/include/linux/libata.h b/include/linux/libata.h index 83ba0ab2c91..649e5f86b5f 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h @@ -652,8 +652,8 @@ struct ata_device {  		u32		gscr[SATA_PMP_GSCR_DWORDS]; /* PMP GSCR block */  	}; -	/* Identify Device Data Log (30h), SATA Settings (page 08h) */ -	u8			sata_settings[ATA_SECT_SIZE]; +	/* DEVSLP Timing Variables from Identify Device Data Log */ +	u8			devslp_timing[ATA_LOG_DEVSLP_SIZE];  	/* error history */  	int			spdn_cnt; diff --git a/include/linux/llist.h b/include/linux/llist.h index a5199f6d0e8..d0ab98f73d3 100644 --- a/include/linux/llist.h +++ b/include/linux/llist.h @@ -125,6 +125,31 @@ static inline void init_llist_head(struct llist_head *list)  	     (pos) = llist_entry((pos)->member.next, typeof(*(pos)), member))  /** + * llist_for_each_entry_safe - iterate safely against remove over some entries + * of lock-less list of given type. + * @pos:	the type * to use as a loop cursor. + * @n:		another type * to use as a temporary storage. + * @node:	the fist entry of deleted list entries. + * @member:	the name of the llist_node with the struct. + * + * In general, some entries of the lock-less list can be traversed + * safely only after being removed from list, so start with an entry + * instead of list head. This variant allows removal of entries + * as we iterate. + * + * If being used on entries deleted from lock-less list directly, the + * traverse order is from the newest to the oldest added entry.  If + * you want to traverse from the oldest to the newest, you must + * reverse the order by yourself before traversing. + */ +#define llist_for_each_entry_safe(pos, n, node, member)		\ +	for ((pos) = llist_entry((node), typeof(*(pos)), member),	\ +	     (n) = (pos)->member.next;					\ +	     &(pos)->member != NULL;					\ +	     (pos) = llist_entry(n, typeof(*(pos)), member),		\ +	     (n) = (&(pos)->member != NULL) ? (pos)->member.next : NULL) + +/**   * llist_empty - tests whether a lock-less list is empty   * @head:	the list to test   * diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 0108a56f814..28bd5fa2ff2 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -429,7 +429,7 @@ extern int memcg_limited_groups_array_size;   * the slab_mutex must be held when looping through those caches   */  #define for_each_memcg_cache_index(_idx)	\ -	for ((_idx) = 0; i < memcg_limited_groups_array_size; (_idx)++) +	for ((_idx) = 0; (_idx) < memcg_limited_groups_array_size; (_idx)++)  static inline bool memcg_kmem_enabled(void)  { diff --git a/include/linux/mfd/abx500.h b/include/linux/mfd/abx500.h index 2138bd33021..e53dcfeaee6 100644 --- a/include/linux/mfd/abx500.h +++ b/include/linux/mfd/abx500.h @@ -272,8 +272,6 @@ struct abx500_bm_data {  	const struct abx500_fg_parameters *fg_params;  }; -extern struct abx500_bm_data ab8500_bm_data; -  enum {  	NTC_EXTERNAL = 0,  	NTC_INTERNAL, diff --git a/include/linux/mfd/abx500/ab8500-bm.h b/include/linux/mfd/abx500/ab8500-bm.h index 44310c98ee6..9bd037df97d 100644 --- a/include/linux/mfd/abx500/ab8500-bm.h +++ b/include/linux/mfd/abx500/ab8500-bm.h @@ -422,7 +422,10 @@ struct ab8500_chargalg_platform_data {  struct ab8500_btemp;  struct ab8500_gpadc;  struct ab8500_fg; +  #ifdef CONFIG_AB8500_BM +extern struct abx500_bm_data ab8500_bm_data; +  void ab8500_fg_reinit(void);  void ab8500_charger_usb_state_changed(u8 bm_usb_state, u16 mA);  struct ab8500_btemp *ab8500_btemp_get(void); @@ -434,31 +437,7 @@ int ab8500_fg_inst_curr_finalize(struct ab8500_fg *di, int *res);  int ab8500_fg_inst_curr_done(struct ab8500_fg *di);  #else -int ab8500_fg_inst_curr_done(struct ab8500_fg *di) -{ -} -static void ab8500_fg_reinit(void) -{ -} -static void ab8500_charger_usb_state_changed(u8 bm_usb_state, u16 mA) -{ -} -static struct ab8500_btemp *ab8500_btemp_get(void) -{ -	return NULL; -} -static int ab8500_btemp_get_batctrl_temp(struct ab8500_btemp *btemp) -{ -	return 0; -} -struct ab8500_fg *ab8500_fg_get(void) -{ -	return NULL; -} -static int ab8500_fg_inst_curr_blocking(struct ab8500_fg *dev) -{ -	return -ENODEV; -} +static struct abx500_bm_data ab8500_bm_data;  static inline int ab8500_fg_inst_curr_start(struct ab8500_fg *di)  { diff --git a/include/linux/mfd/da9052/da9052.h b/include/linux/mfd/da9052/da9052.h index 86dd93de6ff..786d02eb79d 100644 --- a/include/linux/mfd/da9052/da9052.h +++ b/include/linux/mfd/da9052/da9052.h @@ -99,6 +99,9 @@ struct da9052 {  	u8 chip_id;  	int chip_irq; + +	/* SOC I/O transfer related fixes for DA9052/53 */ +	int (*fix_io) (struct da9052 *da9052, unsigned char reg);  };  /* ADC API */ @@ -113,32 +116,87 @@ static inline int da9052_reg_read(struct da9052 *da9052, unsigned char reg)  	ret = regmap_read(da9052->regmap, reg, &val);  	if (ret < 0)  		return ret; + +	if (da9052->fix_io) { +		ret = da9052->fix_io(da9052, reg); +		if (ret < 0) +			return ret; +	} +  	return val;  }  static inline int da9052_reg_write(struct da9052 *da9052, unsigned char reg,  				    unsigned char val)  { -	return regmap_write(da9052->regmap, reg, val); +	int ret; + +	ret = regmap_write(da9052->regmap, reg, val); +	if (ret < 0) +		return ret; + +	if (da9052->fix_io) { +		ret = da9052->fix_io(da9052, reg); +		if (ret < 0) +			return ret; +	} + +	return ret;  }  static inline int da9052_group_read(struct da9052 *da9052, unsigned char reg,  				     unsigned reg_cnt, unsigned char *val)  { -	return regmap_bulk_read(da9052->regmap, reg, val, reg_cnt); +	int ret; + +	ret = regmap_bulk_read(da9052->regmap, reg, val, reg_cnt); +	if (ret < 0) +		return ret; + +	if (da9052->fix_io) { +		ret = da9052->fix_io(da9052, reg); +		if (ret < 0) +			return ret; +	} + +	return ret;  }  static inline int da9052_group_write(struct da9052 *da9052, unsigned char reg,  				      unsigned reg_cnt, unsigned char *val)  { -	return regmap_raw_write(da9052->regmap, reg, val, reg_cnt); +	int ret; + +	ret = regmap_raw_write(da9052->regmap, reg, val, reg_cnt); +	if (ret < 0) +		return ret; + +	if (da9052->fix_io) { +		ret = da9052->fix_io(da9052, reg); +		if (ret < 0) +			return ret; +	} + +	return ret;  }  static inline int da9052_reg_update(struct da9052 *da9052, unsigned char reg,  				     unsigned char bit_mask,  				     unsigned char reg_val)  { -	return regmap_update_bits(da9052->regmap, reg, bit_mask, reg_val); +	int ret; + +	ret = regmap_update_bits(da9052->regmap, reg, bit_mask, reg_val); +	if (ret < 0) +		return ret; + +	if (da9052->fix_io) { +		ret = da9052->fix_io(da9052, reg); +		if (ret < 0) +			return ret; +	} + +	return ret;  }  int da9052_device_init(struct da9052 *da9052, u8 chip_id); diff --git a/include/linux/mfd/da9052/reg.h b/include/linux/mfd/da9052/reg.h index b97f7309d7f..c4dd3a8add2 100644 --- a/include/linux/mfd/da9052/reg.h +++ b/include/linux/mfd/da9052/reg.h @@ -34,6 +34,9 @@  #define DA9052_STATUS_C_REG		3  #define DA9052_STATUS_D_REG		4 +/* PARK REGISTER */ +#define DA9052_PARK_REGISTER		DA9052_STATUS_D_REG +  /* EVENT REGISTERS */  #define DA9052_EVENT_A_REG		5  #define DA9052_EVENT_B_REG		6 diff --git a/include/linux/mfd/rtsx_common.h b/include/linux/mfd/rtsx_common.h index a8d393e3066..2b13970596f 100644 --- a/include/linux/mfd/rtsx_common.h +++ b/include/linux/mfd/rtsx_common.h @@ -38,6 +38,9 @@  #define RTSX_SD_CARD			0  #define RTSX_MS_CARD			1 +#define CLK_TO_DIV_N			0 +#define DIV_N_TO_CLK			1 +  struct platform_device;  struct rtsx_slot { diff --git a/include/linux/mfd/rtsx_pci.h b/include/linux/mfd/rtsx_pci.h index 060b721fcbf..4b117a3f54d 100644 --- a/include/linux/mfd/rtsx_pci.h +++ b/include/linux/mfd/rtsx_pci.h @@ -158,10 +158,9 @@  #define SG_TRANS_DATA		(0x02 << 4)  #define SG_LINK_DESC		(0x03 << 4) -/* SD bank voltage */ -#define SD_IO_3V3		0 -#define SD_IO_1V8		1 - +/* Output voltage */ +#define OUTPUT_3V3		0 +#define OUTPUT_1V8		1  /* Card Clock Enable Register */  #define SD_CLK_EN			0x04 @@ -201,6 +200,20 @@  #define CHANGE_CLK			0x01  /* LDO_CTL */ +#define BPP_ASIC_1V7			0x00 +#define BPP_ASIC_1V8			0x01 +#define BPP_ASIC_1V9			0x02 +#define BPP_ASIC_2V0			0x03 +#define BPP_ASIC_2V7			0x04 +#define BPP_ASIC_2V8			0x05 +#define BPP_ASIC_3V2			0x06 +#define BPP_ASIC_3V3			0x07 +#define BPP_REG_TUNED18			0x07 +#define BPP_TUNED18_SHIFT_8402		5 +#define BPP_TUNED18_SHIFT_8411		4 +#define BPP_PAD_MASK			0x04 +#define BPP_PAD_3V3			0x04 +#define BPP_PAD_1V8			0x00  #define BPP_LDO_POWB			0x03  #define BPP_LDO_ON			0x00  #define BPP_LDO_SUSPEND			0x02 @@ -688,7 +701,10 @@ struct pcr_ops {  	int		(*disable_auto_blink)(struct rtsx_pcr *pcr);  	int		(*card_power_on)(struct rtsx_pcr *pcr, int card);  	int		(*card_power_off)(struct rtsx_pcr *pcr, int card); +	int		(*switch_output_voltage)(struct rtsx_pcr *pcr, +						u8 voltage);  	unsigned int	(*cd_deglitch)(struct rtsx_pcr *pcr); +	int		(*conv_clk_and_div_n)(int clk, int dir);  };  enum PDEV_STAT  {PDEV_STAT_IDLE, PDEV_STAT_RUN}; @@ -783,6 +799,7 @@ int rtsx_pci_switch_clock(struct rtsx_pcr *pcr, unsigned int card_clock,  		u8 ssc_depth, bool initial_mode, bool double_clk, bool vpclk);  int rtsx_pci_card_power_on(struct rtsx_pcr *pcr, int card);  int rtsx_pci_card_power_off(struct rtsx_pcr *pcr, int card); +int rtsx_pci_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage);  unsigned int rtsx_pci_card_exist(struct rtsx_pcr *pcr);  void rtsx_pci_complete_unfinished_transfer(struct rtsx_pcr *pcr); diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h index bc823c4c028..deca8745252 100644 --- a/include/linux/mmu_notifier.h +++ b/include/linux/mmu_notifier.h @@ -151,7 +151,7 @@ struct mmu_notifier_ops {   * Therefore notifier chains can only be traversed when either   *   * 1. mmap_sem is held. - * 2. One of the reverse map locks is held (i_mmap_mutex or anon_vma->mutex). + * 2. One of the reverse map locks is held (i_mmap_mutex or anon_vma->rwsem).   * 3. No other concurrent thread can access the list (release)   */  struct mmu_notifier { diff --git a/include/linux/module.h b/include/linux/module.h index 7760c6d344a..1375ee3f03a 100644 --- a/include/linux/module.h +++ b/include/linux/module.h @@ -199,11 +199,11 @@ struct module_use {  	struct module *source, *target;  }; -enum module_state -{ -	MODULE_STATE_LIVE, -	MODULE_STATE_COMING, -	MODULE_STATE_GOING, +enum module_state { +	MODULE_STATE_LIVE,	/* Normal state. */ +	MODULE_STATE_COMING,	/* Full formed, running module_init. */ +	MODULE_STATE_GOING,	/* Going away. */ +	MODULE_STATE_UNFORMED,	/* Still setting it up. */  };  /** diff --git a/arch/arm/mach-imx/iram.h b/include/linux/platform_data/imx-iram.h index 022690c3370..022690c3370 100644 --- a/arch/arm/mach-imx/iram.h +++ b/include/linux/platform_data/imx-iram.h diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h index 1693775ecfe..89573a33ab3 100644 --- a/include/linux/ptrace.h +++ b/include/linux/ptrace.h @@ -45,7 +45,6 @@ extern long arch_ptrace(struct task_struct *child, long request,  extern int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len);  extern int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long dst, int len);  extern void ptrace_disable(struct task_struct *); -extern int ptrace_check_attach(struct task_struct *task, bool ignore_state);  extern int ptrace_request(struct task_struct *child, long request,  			  unsigned long addr, unsigned long data);  extern void ptrace_notify(int exit_code); diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 275aa3f1062..b758ce17b30 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -53,7 +53,10 @@ extern int rcutorture_runnable; /* for sysctl */  extern void rcutorture_record_test_transition(void);  extern void rcutorture_record_progress(unsigned long vernum);  extern void do_trace_rcu_torture_read(char *rcutorturename, -				      struct rcu_head *rhp); +				      struct rcu_head *rhp, +				      unsigned long secs, +				      unsigned long c_old, +				      unsigned long c);  #else  static inline void rcutorture_record_test_transition(void)  { @@ -63,9 +66,13 @@ static inline void rcutorture_record_progress(unsigned long vernum)  }  #ifdef CONFIG_RCU_TRACE  extern void do_trace_rcu_torture_read(char *rcutorturename, -				      struct rcu_head *rhp); +				      struct rcu_head *rhp, +				      unsigned long secs, +				      unsigned long c_old, +				      unsigned long c);  #else -#define do_trace_rcu_torture_read(rcutorturename, rhp) do { } while (0) +#define do_trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c) \ +	do { } while (0)  #endif  #endif @@ -749,7 +756,7 @@ static inline void rcu_preempt_sleep_check(void)   * preemptible RCU implementations (TREE_PREEMPT_RCU and TINY_PREEMPT_RCU)   * in CONFIG_PREEMPT kernel builds, RCU read-side critical sections may   * be preempted, but explicit blocking is illegal.  Finally, in preemptible - * RCU implementations in real-time (CONFIG_PREEMPT_RT) kernel builds, + * RCU implementations in real-time (with -rt patchset) kernel builds,   * RCU read-side critical sections may be preempted and they may also   * block, but only when acquiring spinlocks that are subject to priority   * inheritance. diff --git a/include/linux/sched.h b/include/linux/sched.h index 6fc8f45de4e..d2112477ff5 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -2714,7 +2714,16 @@ static inline void thread_group_cputime_init(struct signal_struct *sig)  extern void recalc_sigpending_and_wake(struct task_struct *t);  extern void recalc_sigpending(void); -extern void signal_wake_up(struct task_struct *t, int resume_stopped); +extern void signal_wake_up_state(struct task_struct *t, unsigned int state); + +static inline void signal_wake_up(struct task_struct *t, bool resume) +{ +	signal_wake_up_state(t, resume ? TASK_WAKEKILL : 0); +} +static inline void ptrace_signal_wake_up(struct task_struct *t, bool resume) +{ +	signal_wake_up_state(t, resume ? __TASK_TRACED : 0); +}  /*   * Wrappers for p->thread_info->cpu access. No-op on UP. diff --git a/include/linux/security.h b/include/linux/security.h index 0f6afc657f7..eee7478cda7 100644 --- a/include/linux/security.h +++ b/include/linux/security.h @@ -989,17 +989,29 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)   *	tells the LSM to decrement the number of secmark labeling rules loaded   * @req_classify_flow:   *	Sets the flow's sid to the openreq sid. + * @tun_dev_alloc_security: + *	This hook allows a module to allocate a security structure for a TUN + *	device. + *	@security pointer to a security structure pointer. + *	Returns a zero on success, negative values on failure. + * @tun_dev_free_security: + *	This hook allows a module to free the security structure for a TUN + *	device. + *	@security pointer to the TUN device's security structure   * @tun_dev_create:   *	Check permissions prior to creating a new TUN device. - * @tun_dev_post_create: - *	This hook allows a module to update or allocate a per-socket security - *	structure. - *	@sk contains the newly created sock structure. + * @tun_dev_attach_queue: + *	Check permissions prior to attaching to a TUN device queue. + *	@security pointer to the TUN device's security structure.   * @tun_dev_attach: - *	Check permissions prior to attaching to a persistent TUN device.  This - *	hook can also be used by the module to update any security state + *	This hook can be used by the module to update any security state   *	associated with the TUN device's sock structure.   *	@sk contains the existing sock structure. + *	@security pointer to the TUN device's security structure. + * @tun_dev_open: + *	This hook can be used by the module to update any security state + *	associated with the TUN device's security structure. + *	@security pointer to the TUN devices's security structure.   *   * Security hooks for XFRM operations.   * @@ -1620,9 +1632,12 @@ struct security_operations {  	void (*secmark_refcount_inc) (void);  	void (*secmark_refcount_dec) (void);  	void (*req_classify_flow) (const struct request_sock *req, struct flowi *fl); -	int (*tun_dev_create)(void); -	void (*tun_dev_post_create)(struct sock *sk); -	int (*tun_dev_attach)(struct sock *sk); +	int (*tun_dev_alloc_security) (void **security); +	void (*tun_dev_free_security) (void *security); +	int (*tun_dev_create) (void); +	int (*tun_dev_attach_queue) (void *security); +	int (*tun_dev_attach) (struct sock *sk, void *security); +	int (*tun_dev_open) (void *security);  #endif	/* CONFIG_SECURITY_NETWORK */  #ifdef CONFIG_SECURITY_NETWORK_XFRM @@ -2566,9 +2581,12 @@ void security_inet_conn_established(struct sock *sk,  int security_secmark_relabel_packet(u32 secid);  void security_secmark_refcount_inc(void);  void security_secmark_refcount_dec(void); +int security_tun_dev_alloc_security(void **security); +void security_tun_dev_free_security(void *security);  int security_tun_dev_create(void); -void security_tun_dev_post_create(struct sock *sk); -int security_tun_dev_attach(struct sock *sk); +int security_tun_dev_attach_queue(void *security); +int security_tun_dev_attach(struct sock *sk, void *security); +int security_tun_dev_open(void *security);  #else	/* CONFIG_SECURITY_NETWORK */  static inline int security_unix_stream_connect(struct sock *sock, @@ -2733,16 +2751,31 @@ static inline void security_secmark_refcount_dec(void)  {  } +static inline int security_tun_dev_alloc_security(void **security) +{ +	return 0; +} + +static inline void security_tun_dev_free_security(void *security) +{ +} +  static inline int security_tun_dev_create(void)  {  	return 0;  } -static inline void security_tun_dev_post_create(struct sock *sk) +static inline int security_tun_dev_attach_queue(void *security) +{ +	return 0; +} + +static inline int security_tun_dev_attach(struct sock *sk, void *security)  { +	return 0;  } -static inline int security_tun_dev_attach(struct sock *sk) +static inline int security_tun_dev_open(void *security)  {  	return 0;  } diff --git a/include/linux/srcu.h b/include/linux/srcu.h index 6eb691b0835..04f4121a23a 100644 --- a/include/linux/srcu.h +++ b/include/linux/srcu.h @@ -151,30 +151,14 @@ void srcu_barrier(struct srcu_struct *sp);   * Checks debug_lockdep_rcu_enabled() to prevent false positives during boot   * and while lockdep is disabled.   * - * Note that if the CPU is in the idle loop from an RCU point of view - * (ie: that we are in the section between rcu_idle_enter() and - * rcu_idle_exit()) then srcu_read_lock_held() returns false even if - * the CPU did an srcu_read_lock().  The reason for this is that RCU - * ignores CPUs that are in such a section, considering these as in - * extended quiescent state, so such a CPU is effectively never in an - * RCU read-side critical section regardless of what RCU primitives it - * invokes.  This state of affairs is required --- we need to keep an - * RCU-free window in idle where the CPU may possibly enter into low - * power mode. This way we can notice an extended quiescent state to - * other CPUs that started a grace period. Otherwise we would delay any - * grace period as long as we run in the idle task. - * - * Similarly, we avoid claiming an SRCU read lock held if the current - * CPU is offline. + * Note that SRCU is based on its own statemachine and it doesn't + * relies on normal RCU, it can be called from the CPU which + * is in the idle loop from an RCU point of view or offline.   */  static inline int srcu_read_lock_held(struct srcu_struct *sp)  {  	if (!debug_lockdep_rcu_enabled())  		return 1; -	if (rcu_is_cpu_idle()) -		return 0; -	if (!rcu_lockdep_current_cpu_online()) -		return 0;  	return lock_is_held(&sp->dep_map);  } @@ -236,8 +220,6 @@ static inline int srcu_read_lock(struct srcu_struct *sp) __acquires(sp)  	int retval = __srcu_read_lock(sp);  	rcu_lock_acquire(&(sp)->dep_map); -	rcu_lockdep_assert(!rcu_is_cpu_idle(), -			   "srcu_read_lock() used illegally while idle");  	return retval;  } @@ -251,8 +233,6 @@ static inline int srcu_read_lock(struct srcu_struct *sp) __acquires(sp)  static inline void srcu_read_unlock(struct srcu_struct *sp, int idx)  	__releases(sp)  { -	rcu_lockdep_assert(!rcu_is_cpu_idle(), -			   "srcu_read_unlock() used illegally while idle");  	rcu_lock_release(&(sp)->dep_map);  	__srcu_read_unlock(sp, idx);  } diff --git a/include/linux/usb.h b/include/linux/usb.h index 689b14b26c8..4d22d0f6167 100644 --- a/include/linux/usb.h +++ b/include/linux/usb.h @@ -357,6 +357,8 @@ struct usb_bus {  	int bandwidth_int_reqs;		/* number of Interrupt requests */  	int bandwidth_isoc_reqs;	/* number of Isoc. requests */ +	unsigned resuming_ports;	/* bit array: resuming root-hub ports */ +  #if defined(CONFIG_USB_MON) || defined(CONFIG_USB_MON_MODULE)  	struct mon_bus *mon_bus;	/* non-null when associated */  	int monitored;			/* non-zero when monitored */ diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h index 608050b2545..0a78df5f6cf 100644 --- a/include/linux/usb/hcd.h +++ b/include/linux/usb/hcd.h @@ -430,6 +430,9 @@ extern void usb_hcd_poll_rh_status(struct usb_hcd *hcd);  extern void usb_wakeup_notification(struct usb_device *hdev,  		unsigned int portnum); +extern void usb_hcd_start_port_resume(struct usb_bus *bus, int portnum); +extern void usb_hcd_end_port_resume(struct usb_bus *bus, int portnum); +  /* The D0/D1 toggle bits ... USE WITH CAUTION (they're almost hcd-internal) */  #define usb_gettoggle(dev, ep, out) (((dev)->toggle[out] >> (ep)) & 1)  #define	usb_dotoggle(dev, ep, out)  ((dev)->toggle[out] ^= (1 << (ep))) diff --git a/include/linux/usb/usbnet.h b/include/linux/usb/usbnet.h index bd45eb7bedc..0e5ac93bab1 100644 --- a/include/linux/usb/usbnet.h +++ b/include/linux/usb/usbnet.h @@ -33,6 +33,7 @@ struct usbnet {  	wait_queue_head_t	*wait;  	struct mutex		phy_mutex;  	unsigned char		suspend_count; +	unsigned char		pkt_cnt, pkt_err;  	/* i/o info: pipes etc */  	unsigned		in, out; @@ -70,6 +71,7 @@ struct usbnet {  #		define EVENT_DEV_OPEN	7  #		define EVENT_DEVICE_REPORT_IDLE	8  #		define EVENT_NO_RUNTIME_PM	9 +#		define EVENT_RX_KILL	10  };  static inline struct usb_driver *driver_of(struct usb_interface *intf) @@ -107,6 +109,7 @@ struct driver_info {   */  #define FLAG_MULTI_PACKET	0x2000  #define FLAG_RX_ASSEMBLE	0x4000	/* rx packets may span >1 frames */ +#define FLAG_NOARP		0x8000	/* device can't do ARP */  	/* init device ... can sleep, or cause probe() failure */  	int	(*bind)(struct usbnet *, struct usb_interface *); diff --git a/include/net/ip.h b/include/net/ip.h index 0707fb9551a..a68f838a132 100644 --- a/include/net/ip.h +++ b/include/net/ip.h @@ -143,6 +143,8 @@ static inline struct sk_buff *ip_finish_skb(struct sock *sk, struct flowi4 *fl4)  extern int		ip4_datagram_connect(struct sock *sk,   					     struct sockaddr *uaddr, int addr_len); +extern void ip4_datagram_release_cb(struct sock *sk); +  struct ip_reply_arg {  	struct kvec iov[1];     	int	    flags; diff --git a/include/net/netfilter/nf_conntrack_core.h b/include/net/netfilter/nf_conntrack_core.h index d8f5b9f5216..e98aeb3da03 100644 --- a/include/net/netfilter/nf_conntrack_core.h +++ b/include/net/netfilter/nf_conntrack_core.h @@ -31,6 +31,8 @@ extern void nf_conntrack_cleanup(struct net *net);  extern int nf_conntrack_proto_init(struct net *net);  extern void nf_conntrack_proto_fini(struct net *net); +extern void nf_conntrack_cleanup_end(void); +  extern bool  nf_ct_get_tuple(const struct sk_buff *skb,  		unsigned int nhoff, diff --git a/include/net/transp_v6.h b/include/net/transp_v6.h index 498433dd067..938b7fd1120 100644 --- a/include/net/transp_v6.h +++ b/include/net/transp_v6.h @@ -34,17 +34,17 @@ extern int				udpv6_connect(struct sock *sk,  						      struct sockaddr *uaddr,  						      int addr_len); -extern int			datagram_recv_ctl(struct sock *sk, -						  struct msghdr *msg, -						  struct sk_buff *skb); +extern int			ip6_datagram_recv_ctl(struct sock *sk, +						      struct msghdr *msg, +						      struct sk_buff *skb); -extern int			datagram_send_ctl(struct net *net, -						  struct sock *sk, -						  struct msghdr *msg, -						  struct flowi6 *fl6, -						  struct ipv6_txoptions *opt, -						  int *hlimit, int *tclass, -						  int *dontfrag); +extern int			ip6_datagram_send_ctl(struct net *net, +						      struct sock *sk, +						      struct msghdr *msg, +						      struct flowi6 *fl6, +						      struct ipv6_txoptions *opt, +						      int *hlimit, int *tclass, +						      int *dontfrag);  #define		LOOPBACK4_IPV6		cpu_to_be32(0x7f000006) diff --git a/include/trace/events/rcu.h b/include/trace/events/rcu.h index d4f559b1ec3..1918e832da4 100644 --- a/include/trace/events/rcu.h +++ b/include/trace/events/rcu.h @@ -44,8 +44,10 @@ TRACE_EVENT(rcu_utilization,   * of a new grace period or the end of an old grace period ("cpustart"   * and "cpuend", respectively), a CPU passing through a quiescent   * state ("cpuqs"), a CPU coming online or going offline ("cpuonl" - * and "cpuofl", respectively), and a CPU being kicked for being too - * long in dyntick-idle mode ("kick"). + * and "cpuofl", respectively), a CPU being kicked for being too + * long in dyntick-idle mode ("kick"), a CPU accelerating its new + * callbacks to RCU_NEXT_READY_TAIL ("AccReadyCB"), and a CPU + * accelerating its new callbacks to RCU_WAIT_TAIL ("AccWaitCB").   */  TRACE_EVENT(rcu_grace_period, @@ -393,7 +395,7 @@ TRACE_EVENT(rcu_kfree_callback,   */  TRACE_EVENT(rcu_batch_start, -	TP_PROTO(char *rcuname, long qlen_lazy, long qlen, int blimit), +	TP_PROTO(char *rcuname, long qlen_lazy, long qlen, long blimit),  	TP_ARGS(rcuname, qlen_lazy, qlen, blimit), @@ -401,7 +403,7 @@ TRACE_EVENT(rcu_batch_start,  		__field(char *, rcuname)  		__field(long, qlen_lazy)  		__field(long, qlen) -		__field(int, blimit) +		__field(long, blimit)  	),  	TP_fast_assign( @@ -411,7 +413,7 @@ TRACE_EVENT(rcu_batch_start,  		__entry->blimit = blimit;  	), -	TP_printk("%s CBs=%ld/%ld bl=%d", +	TP_printk("%s CBs=%ld/%ld bl=%ld",  		  __entry->rcuname, __entry->qlen_lazy, __entry->qlen,  		  __entry->blimit)  ); @@ -523,22 +525,30 @@ TRACE_EVENT(rcu_batch_end,   */  TRACE_EVENT(rcu_torture_read, -	TP_PROTO(char *rcutorturename, struct rcu_head *rhp), +	TP_PROTO(char *rcutorturename, struct rcu_head *rhp, +		 unsigned long secs, unsigned long c_old, unsigned long c), -	TP_ARGS(rcutorturename, rhp), +	TP_ARGS(rcutorturename, rhp, secs, c_old, c),  	TP_STRUCT__entry(  		__field(char *, rcutorturename)  		__field(struct rcu_head *, rhp) +		__field(unsigned long, secs) +		__field(unsigned long, c_old) +		__field(unsigned long, c)  	),  	TP_fast_assign(  		__entry->rcutorturename = rcutorturename;  		__entry->rhp = rhp; +		__entry->secs = secs; +		__entry->c_old = c_old; +		__entry->c = c;  	), -	TP_printk("%s torture read %p", -		  __entry->rcutorturename, __entry->rhp) +	TP_printk("%s torture read %p %luus c: %lu %lu", +		  __entry->rcutorturename, __entry->rhp, +		  __entry->secs, __entry->c_old, __entry->c)  );  /* @@ -608,7 +618,8 @@ TRACE_EVENT(rcu_barrier,  #define trace_rcu_invoke_kfree_callback(rcuname, rhp, offset) do { } while (0)  #define trace_rcu_batch_end(rcuname, callbacks_invoked, cb, nr, iit, risk) \  	do { } while (0) -#define trace_rcu_torture_read(rcutorturename, rhp) do { } while (0) +#define trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c) \ +	do { } while (0)  #define trace_rcu_barrier(name, s, cpu, cnt, done) do { } while (0)  #endif /* #else #ifdef CONFIG_RCU_TRACE */ diff --git a/include/uapi/linux/auto_fs.h b/include/uapi/linux/auto_fs.h index 77cdba9df27..bb991dfe134 100644 --- a/include/uapi/linux/auto_fs.h +++ b/include/uapi/linux/auto_fs.h @@ -28,25 +28,16 @@  #define AUTOFS_MIN_PROTO_VERSION	AUTOFS_PROTO_VERSION  /* - * Architectures where both 32- and 64-bit binaries can be executed - * on 64-bit kernels need this.  This keeps the structure format - * uniform, and makes sure the wait_queue_token isn't too big to be - * passed back down to the kernel. - * - * This assumes that on these architectures: - * mode     32 bit    64 bit - * ------------------------- - * int      32 bit    32 bit - * long     32 bit    64 bit - * - * If so, 32-bit user-space code should be backwards compatible. + * The wait_queue_token (autofs_wqt_t) is part of a structure which is passed + * back to the kernel via ioctl from userspace. On architectures where 32- and + * 64-bit userspace binaries can be executed it's important that the size of + * autofs_wqt_t stays constant between 32- and 64-bit Linux kernels so that we + * do not break the binary ABI interface by changing the structure size.   */ - -#if defined(__sparc__) || defined(__mips__) || defined(__x86_64__) \ - || defined(__powerpc__) || defined(__s390__) -typedef unsigned int autofs_wqt_t; -#else +#if defined(__ia64__) || defined(__alpha__) /* pure 64bit architectures */  typedef unsigned long autofs_wqt_t; +#else +typedef unsigned int autofs_wqt_t;  #endif  /* Packet types */ diff --git a/include/uapi/linux/serial_core.h b/include/uapi/linux/serial_core.h index 78f99d97475..2c6c85f18ea 100644 --- a/include/uapi/linux/serial_core.h +++ b/include/uapi/linux/serial_core.h @@ -50,7 +50,8 @@  #define PORT_LPC3220	22	/* NXP LPC32xx SoC "Standard" UART */  #define PORT_8250_CIR	23	/* CIR infrared port, has its own driver */  #define PORT_XR17V35X	24	/* Exar XR17V35x UARTs */ -#define PORT_MAX_8250	24	/* max port ID */ +#define PORT_BRCM_TRUMANAGE	24 +#define PORT_MAX_8250	25	/* max port ID */  /*   * ARM specific type numbers.  These are not currently guaranteed diff --git a/include/uapi/linux/usb/ch9.h b/include/uapi/linux/usb/ch9.h index 50598472dc4..f738e25377f 100644 --- a/include/uapi/linux/usb/ch9.h +++ b/include/uapi/linux/usb/ch9.h @@ -152,6 +152,12 @@  #define USB_INTRF_FUNC_SUSPEND_LP	(1 << (8 + 0))  #define USB_INTRF_FUNC_SUSPEND_RW	(1 << (8 + 1)) +/* + * Interface status, Figure 9-5 USB 3.0 spec + */ +#define USB_INTRF_STAT_FUNC_RW_CAP     1 +#define USB_INTRF_STAT_FUNC_RW         2 +  #define USB_ENDPOINT_HALT		0	/* IN/OUT will STALL */  /* Bit array elements as returned by the USB_REQ_GET_STATUS request. */ diff --git a/init/Kconfig b/init/Kconfig index a98e1acc122..dcb68ac42b7 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -449,7 +449,7 @@ config TREE_RCU  config TREE_PREEMPT_RCU  	bool "Preemptible tree-based hierarchical RCU" -	depends on PREEMPT && SMP +	depends on PREEMPT  	help  	  This option selects the RCU implementation that is  	  designed for very large SMP systems with hundreds or @@ -457,6 +457,8 @@ config TREE_PREEMPT_RCU  	  is also required.  It also scales down nicely to  	  smaller systems. +	  Select this option if you are unsure. +  config TINY_RCU  	bool "UP-only small-memory-footprint RCU"  	depends on !PREEMPT && !SMP @@ -482,6 +484,14 @@ config PREEMPT_RCU  	  This option enables preemptible-RCU code that is common between  	  the TREE_PREEMPT_RCU and TINY_PREEMPT_RCU implementations. +config RCU_STALL_COMMON +	def_bool ( TREE_RCU || TREE_PREEMPT_RCU || RCU_TRACE ) +	help +	  This option enables RCU CPU stall code that is common between +	  the TINY and TREE variants of RCU.  The purpose is to allow +	  the tiny variants to disable RCU CPU stall warnings, while +	  making these warnings mandatory for the tree variants. +  config CONTEXT_TRACKING         bool diff --git a/init/do_mounts_initrd.c b/init/do_mounts_initrd.c index 5e4ded51788..f9acf71b981 100644 --- a/init/do_mounts_initrd.c +++ b/init/do_mounts_initrd.c @@ -36,6 +36,10 @@ __setup("noinitrd", no_initrd);  static int init_linuxrc(struct subprocess_info *info, struct cred *new)  {  	sys_unshare(CLONE_FS | CLONE_FILES); +	/* stdin/stdout/stderr for /linuxrc */ +	sys_open("/dev/console", O_RDWR, 0); +	sys_dup(0); +	sys_dup(0);  	/* move initrd over / and chdir/chroot in initrd root */  	sys_chdir("/root");  	sys_mount(".", "/", NULL, MS_MOVE, NULL); diff --git a/init/main.c b/init/main.c index 85d69dffe86..cee4b5c66d8 100644 --- a/init/main.c +++ b/init/main.c @@ -604,7 +604,7 @@ asmlinkage void __init start_kernel(void)  	pidmap_init();  	anon_vma_init();  #ifdef CONFIG_X86 -	if (efi_enabled) +	if (efi_enabled(EFI_RUNTIME_SERVICES))  		efi_enter_virtual_mode();  #endif  	thread_info_cache_init(); @@ -632,7 +632,7 @@ asmlinkage void __init start_kernel(void)  	acpi_early_init(); /* before LAPIC and SMP init */  	sfi_init_late(); -	if (efi_enabled) { +	if (efi_enabled(EFI_RUNTIME_SERVICES)) {  		efi_late_init();  		efi_free_boot_services();  	} @@ -802,7 +802,7 @@ static int run_init_process(const char *init_filename)  		(const char __user *const __user *)envp_init);  } -static void __init kernel_init_freeable(void); +static noinline void __init kernel_init_freeable(void);  static int __ref kernel_init(void *unused)  { @@ -845,7 +845,7 @@ static int __ref kernel_init(void *unused)  	      "See Linux Documentation/init.txt for guidance.");  } -static void __init kernel_init_freeable(void) +static noinline void __init kernel_init_freeable(void)  {  	/*  	 * Wait until kthreadd is all set-up. diff --git a/kernel/async.c b/kernel/async.c index a1d585c351d..6f34904a0b5 100644 --- a/kernel/async.c +++ b/kernel/async.c @@ -86,18 +86,27 @@ static atomic_t entry_count;   */  static async_cookie_t  __lowest_in_progress(struct async_domain *running)  { +	async_cookie_t first_running = next_cookie;	/* infinity value */ +	async_cookie_t first_pending = next_cookie;	/* ditto */  	struct async_entry *entry; +	/* +	 * Both running and pending lists are sorted but not disjoint. +	 * Take the first cookies from both and return the min. +	 */  	if (!list_empty(&running->domain)) {  		entry = list_first_entry(&running->domain, typeof(*entry), list); -		return entry->cookie; +		first_running = entry->cookie;  	} -	list_for_each_entry(entry, &async_pending, list) -		if (entry->running == running) -			return entry->cookie; +	list_for_each_entry(entry, &async_pending, list) { +		if (entry->running == running) { +			first_pending = entry->cookie; +			break; +		} +	} -	return next_cookie;	/* "infinity" value */ +	return min(first_running, first_pending);  }  static async_cookie_t  lowest_in_progress(struct async_domain *running) @@ -118,13 +127,17 @@ static void async_run_entry_fn(struct work_struct *work)  {  	struct async_entry *entry =  		container_of(work, struct async_entry, work); +	struct async_entry *pos;  	unsigned long flags;  	ktime_t uninitialized_var(calltime), delta, rettime;  	struct async_domain *running = entry->running; -	/* 1) move self to the running queue */ +	/* 1) move self to the running queue, make sure it stays sorted */  	spin_lock_irqsave(&async_lock, flags); -	list_move_tail(&entry->list, &running->domain); +	list_for_each_entry_reverse(pos, &running->domain, list) +		if (entry->cookie < pos->cookie) +			break; +	list_move_tail(&entry->list, &pos->list);  	spin_unlock_irqrestore(&async_lock, flags);  	/* 2) run (and print duration) */ diff --git a/kernel/compat.c b/kernel/compat.c index f6150e92dfc..36700e9e2be 100644 --- a/kernel/compat.c +++ b/kernel/compat.c @@ -535,9 +535,11 @@ asmlinkage long compat_sys_getrusage(int who, struct compat_rusage __user *ru)  	return 0;  } -asmlinkage long -compat_sys_wait4(compat_pid_t pid, compat_uint_t __user *stat_addr, int options, -	struct compat_rusage __user *ru) +COMPAT_SYSCALL_DEFINE4(wait4, +	compat_pid_t, pid, +	compat_uint_t __user *, stat_addr, +	int, options, +	struct compat_rusage __user *, ru)  {  	if (!ru) {  		return sys_wait4(pid, stat_addr, options, NULL); @@ -564,9 +566,10 @@ compat_sys_wait4(compat_pid_t pid, compat_uint_t __user *stat_addr, int options,  	}  } -asmlinkage long compat_sys_waitid(int which, compat_pid_t pid, -		struct compat_siginfo __user *uinfo, int options, -		struct compat_rusage __user *uru) +COMPAT_SYSCALL_DEFINE5(waitid, +		int, which, compat_pid_t, pid, +		struct compat_siginfo __user *, uinfo, int, options, +		struct compat_rusage __user *, uru)  {  	siginfo_t info;  	struct rusage ru; @@ -584,7 +587,11 @@ asmlinkage long compat_sys_waitid(int which, compat_pid_t pid,  		return ret;  	if (uru) { -		ret = put_compat_rusage(&ru, uru); +		/* sys_waitid() overwrites everything in ru */ +		if (COMPAT_USE_64BIT_TIME) +			ret = copy_to_user(uru, &ru, sizeof(ru)); +		else +			ret = put_compat_rusage(&ru, uru);  		if (ret)  			return ret;  	} @@ -994,7 +1001,7 @@ compat_sys_rt_sigtimedwait (compat_sigset_t __user *uthese,  	sigset_from_compat(&s, &s32);  	if (uts) { -		if (get_compat_timespec(&t, uts)) +		if (compat_get_timespec(&t, uts))  			return -EFAULT;  	} diff --git a/kernel/context_tracking.c b/kernel/context_tracking.c index e0e07fd5550..d566aba7e80 100644 --- a/kernel/context_tracking.c +++ b/kernel/context_tracking.c @@ -1,3 +1,19 @@ +/* + * Context tracking: Probe on high level context boundaries such as kernel + * and userspace. This includes syscalls and exceptions entry/exit. + * + * This is used by RCU to remove its dependency on the timer tick while a CPU + * runs in userspace. + * + *  Started by Frederic Weisbecker: + * + * Copyright (C) 2012 Red Hat, Inc., Frederic Weisbecker <fweisbec@redhat.com> + * + * Many thanks to Gilad Ben-Yossef, Paul McKenney, Ingo Molnar, Andrew Morton, + * Steven Rostedt, Peter Zijlstra for suggestions and improvements. + * + */ +  #include <linux/context_tracking.h>  #include <linux/rcupdate.h>  #include <linux/sched.h> @@ -6,8 +22,8 @@  struct context_tracking {  	/* -	 * When active is false, hooks are not set to -	 * minimize overhead: TIF flags are cleared +	 * When active is false, probes are unset in order +	 * to minimize overhead: TIF flags are cleared  	 * and calls to user_enter/exit are ignored. This  	 * may be further optimized using static keys.  	 */ @@ -24,6 +40,15 @@ static DEFINE_PER_CPU(struct context_tracking, context_tracking) = {  #endif  }; +/** + * user_enter - Inform the context tracking that the CPU is going to + *              enter userspace mode. + * + * This function must be called right before we switch from the kernel + * to userspace, when it's guaranteed the remaining kernel instructions + * to execute won't use any RCU read side critical section because this + * function sets RCU in extended quiescent state. + */  void user_enter(void)  {  	unsigned long flags; @@ -39,40 +64,70 @@ void user_enter(void)  	if (in_interrupt())  		return; +	/* Kernel threads aren't supposed to go to userspace */  	WARN_ON_ONCE(!current->mm);  	local_irq_save(flags);  	if (__this_cpu_read(context_tracking.active) &&  	    __this_cpu_read(context_tracking.state) != IN_USER) {  		__this_cpu_write(context_tracking.state, IN_USER); +		/* +		 * At this stage, only low level arch entry code remains and +		 * then we'll run in userspace. We can assume there won't be +		 * any RCU read-side critical section until the next call to +		 * user_exit() or rcu_irq_enter(). Let's remove RCU's dependency +		 * on the tick. +		 */  		rcu_user_enter();  	}  	local_irq_restore(flags);  } + +/** + * user_exit - Inform the context tracking that the CPU is + *             exiting userspace mode and entering the kernel. + * + * This function must be called after we entered the kernel from userspace + * before any use of RCU read side critical section. This potentially include + * any high level kernel code like syscalls, exceptions, signal handling, etc... + * + * This call supports re-entrancy. This way it can be called from any exception + * handler without needing to know if we came from userspace or not. + */  void user_exit(void)  {  	unsigned long flags; -	/* -	 * Some contexts may involve an exception occuring in an irq, -	 * leading to that nesting: -	 * rcu_irq_enter() rcu_user_exit() rcu_user_exit() rcu_irq_exit() -	 * This would mess up the dyntick_nesting count though. And rcu_irq_*() -	 * helpers are enough to protect RCU uses inside the exception. So -	 * just return immediately if we detect we are in an IRQ. -	 */  	if (in_interrupt())  		return;  	local_irq_save(flags);  	if (__this_cpu_read(context_tracking.state) == IN_USER) {  		__this_cpu_write(context_tracking.state, IN_KERNEL); +		/* +		 * We are going to run code that may use RCU. Inform +		 * RCU core about that (ie: we may need the tick again). +		 */  		rcu_user_exit();  	}  	local_irq_restore(flags);  } + +/** + * context_tracking_task_switch - context switch the syscall callbacks + * @prev: the task that is being switched out + * @next: the task that is being switched in + * + * The context tracking uses the syscall slow path to implement its user-kernel + * boundaries probes on syscalls. This way it doesn't impact the syscall fast + * path on CPUs that don't do context tracking. + * + * But we need to clear the flag on the previous task because it may later + * migrate to some CPU that doesn't do the context tracking. As such the TIF + * flag may not be desired there. + */  void context_tracking_task_switch(struct task_struct *prev,  			     struct task_struct *next)  { diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c index 4d5f8d5612f..8875254120b 100644 --- a/kernel/debug/kdb/kdb_main.c +++ b/kernel/debug/kdb/kdb_main.c @@ -1970,6 +1970,8 @@ static int kdb_lsmod(int argc, const char **argv)  	kdb_printf("Module                  Size  modstruct     Used by\n");  	list_for_each_entry(mod, kdb_modules, list) { +		if (mod->state == MODULE_STATE_UNFORMED) +			continue;  		kdb_printf("%-20s%8u  0x%p ", mod->name,  			   mod->core_size, (void *)mod); diff --git a/kernel/events/core.c b/kernel/events/core.c index 301079d06f2..7b6646a8c06 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -908,6 +908,15 @@ list_add_event(struct perf_event *event, struct perf_event_context *ctx)  }  /* + * Initialize event state based on the perf_event_attr::disabled. + */ +static inline void perf_event__state_init(struct perf_event *event) +{ +	event->state = event->attr.disabled ? PERF_EVENT_STATE_OFF : +					      PERF_EVENT_STATE_INACTIVE; +} + +/*   * Called at perf_event creation and when events are attached/detached from a   * group.   */ @@ -6179,8 +6188,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,  	event->overflow_handler	= overflow_handler;  	event->overflow_handler_context = context; -	if (attr->disabled) -		event->state = PERF_EVENT_STATE_OFF; +	perf_event__state_init(event);  	pmu = NULL; @@ -6609,9 +6617,17 @@ SYSCALL_DEFINE5(perf_event_open,  		mutex_lock(&gctx->mutex);  		perf_remove_from_context(group_leader); + +		/* +		 * Removing from the context ends up with disabled +		 * event. What we want here is event in the initial +		 * startup state, ready to be add into new context. +		 */ +		perf_event__state_init(group_leader);  		list_for_each_entry(sibling, &group_leader->sibling_list,  				    group_entry) {  			perf_remove_from_context(sibling); +			perf_event__state_init(sibling);  			put_ctx(gctx);  		}  		mutex_unlock(&gctx->mutex); diff --git a/kernel/fork.c b/kernel/fork.c index 65ca6d27f24..c535f33bbb9 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -1668,8 +1668,10 @@ SYSCALL_DEFINE5(clone, unsigned long, clone_flags, unsigned long, newsp,  		 int, tls_val)  #endif  { -	return do_fork(clone_flags, newsp, 0, -		parent_tidptr, child_tidptr); +	long ret = do_fork(clone_flags, newsp, 0, parent_tidptr, child_tidptr); +	asmlinkage_protect(5, ret, clone_flags, newsp, +			parent_tidptr, child_tidptr, tls_val); +	return ret;  }  #endif diff --git a/kernel/module.c b/kernel/module.c index b10b048367e..eab08274ec9 100644 --- a/kernel/module.c +++ b/kernel/module.c @@ -188,6 +188,7 @@ struct load_info {     ongoing or failed initialization etc. */  static inline int strong_try_module_get(struct module *mod)  { +	BUG_ON(mod && mod->state == MODULE_STATE_UNFORMED);  	if (mod && mod->state == MODULE_STATE_COMING)  		return -EBUSY;  	if (try_module_get(mod)) @@ -343,6 +344,9 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,  #endif  		}; +		if (mod->state == MODULE_STATE_UNFORMED) +			continue; +  		if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))  			return true;  	} @@ -450,16 +454,24 @@ const struct kernel_symbol *find_symbol(const char *name,  EXPORT_SYMBOL_GPL(find_symbol);  /* Search for module by name: must hold module_mutex. */ -struct module *find_module(const char *name) +static struct module *find_module_all(const char *name, +				      bool even_unformed)  {  	struct module *mod;  	list_for_each_entry(mod, &modules, list) { +		if (!even_unformed && mod->state == MODULE_STATE_UNFORMED) +			continue;  		if (strcmp(mod->name, name) == 0)  			return mod;  	}  	return NULL;  } + +struct module *find_module(const char *name) +{ +	return find_module_all(name, false); +}  EXPORT_SYMBOL_GPL(find_module);  #ifdef CONFIG_SMP @@ -525,6 +537,8 @@ bool is_module_percpu_address(unsigned long addr)  	preempt_disable();  	list_for_each_entry_rcu(mod, &modules, list) { +		if (mod->state == MODULE_STATE_UNFORMED) +			continue;  		if (!mod->percpu_size)  			continue;  		for_each_possible_cpu(cpu) { @@ -1048,6 +1062,8 @@ static ssize_t show_initstate(struct module_attribute *mattr,  	case MODULE_STATE_GOING:  		state = "going";  		break; +	default: +		BUG();  	}  	return sprintf(buffer, "%s\n", state);  } @@ -1786,6 +1802,8 @@ void set_all_modules_text_rw(void)  	mutex_lock(&module_mutex);  	list_for_each_entry_rcu(mod, &modules, list) { +		if (mod->state == MODULE_STATE_UNFORMED) +			continue;  		if ((mod->module_core) && (mod->core_text_size)) {  			set_page_attributes(mod->module_core,  						mod->module_core + mod->core_text_size, @@ -1807,6 +1825,8 @@ void set_all_modules_text_ro(void)  	mutex_lock(&module_mutex);  	list_for_each_entry_rcu(mod, &modules, list) { +		if (mod->state == MODULE_STATE_UNFORMED) +			continue;  		if ((mod->module_core) && (mod->core_text_size)) {  			set_page_attributes(mod->module_core,  						mod->module_core + mod->core_text_size, @@ -2527,6 +2547,13 @@ static int copy_module_from_fd(int fd, struct load_info *info)  		err = -EFBIG;  		goto out;  	} + +	/* Don't hand 0 to vmalloc, it whines. */ +	if (stat.size == 0) { +		err = -EINVAL; +		goto out; +	} +  	info->hdr = vmalloc(stat.size);  	if (!info->hdr) {  		err = -ENOMEM; @@ -2990,8 +3017,9 @@ static bool finished_loading(const char *name)  	bool ret;  	mutex_lock(&module_mutex); -	mod = find_module(name); -	ret = !mod || mod->state != MODULE_STATE_COMING; +	mod = find_module_all(name, true); +	ret = !mod || mod->state == MODULE_STATE_LIVE +		|| mod->state == MODULE_STATE_GOING;  	mutex_unlock(&module_mutex);  	return ret; @@ -3136,6 +3164,32 @@ static int load_module(struct load_info *info, const char __user *uargs,  		goto free_copy;  	} +	/* +	 * We try to place it in the list now to make sure it's unique +	 * before we dedicate too many resources.  In particular, +	 * temporary percpu memory exhaustion. +	 */ +	mod->state = MODULE_STATE_UNFORMED; +again: +	mutex_lock(&module_mutex); +	if ((old = find_module_all(mod->name, true)) != NULL) { +		if (old->state == MODULE_STATE_COMING +		    || old->state == MODULE_STATE_UNFORMED) { +			/* Wait in case it fails to load. */ +			mutex_unlock(&module_mutex); +			err = wait_event_interruptible(module_wq, +					       finished_loading(mod->name)); +			if (err) +				goto free_module; +			goto again; +		} +		err = -EEXIST; +		mutex_unlock(&module_mutex); +		goto free_module; +	} +	list_add_rcu(&mod->list, &modules); +	mutex_unlock(&module_mutex); +  #ifdef CONFIG_MODULE_SIG  	mod->sig_ok = info->sig_ok;  	if (!mod->sig_ok) @@ -3145,7 +3199,7 @@ static int load_module(struct load_info *info, const char __user *uargs,  	/* Now module is in final location, initialize linked lists, etc. */  	err = module_unload_init(mod);  	if (err) -		goto free_module; +		goto unlink_mod;  	/* Now we've got everything in the final locations, we can  	 * find optional sections. */ @@ -3180,54 +3234,33 @@ static int load_module(struct load_info *info, const char __user *uargs,  		goto free_arch_cleanup;  	} -	/* Mark state as coming so strong_try_module_get() ignores us. */ -	mod->state = MODULE_STATE_COMING; - -	/* Now sew it into the lists so we can get lockdep and oops -	 * info during argument parsing.  No one should access us, since -	 * strong_try_module_get() will fail. -	 * lockdep/oops can run asynchronous, so use the RCU list insertion -	 * function to insert in a way safe to concurrent readers. -	 * The mutex protects against concurrent writers. -	 */ -again: -	mutex_lock(&module_mutex); -	if ((old = find_module(mod->name)) != NULL) { -		if (old->state == MODULE_STATE_COMING) { -			/* Wait in case it fails to load. */ -			mutex_unlock(&module_mutex); -			err = wait_event_interruptible(module_wq, -					       finished_loading(mod->name)); -			if (err) -				goto free_arch_cleanup; -			goto again; -		} -		err = -EEXIST; -		goto unlock; -	} - -	/* This has to be done once we're sure module name is unique. */  	dynamic_debug_setup(info->debug, info->num_debug); -	/* Find duplicate symbols */ +	mutex_lock(&module_mutex); +	/* Find duplicate symbols (must be called under lock). */  	err = verify_export_symbols(mod);  	if (err < 0) -		goto ddebug; +		goto ddebug_cleanup; +	/* This relies on module_mutex for list integrity. */  	module_bug_finalize(info->hdr, info->sechdrs, mod); -	list_add_rcu(&mod->list, &modules); + +	/* Mark state as coming so strong_try_module_get() ignores us, +	 * but kallsyms etc. can see us. */ +	mod->state = MODULE_STATE_COMING; +  	mutex_unlock(&module_mutex);  	/* Module is ready to execute: parsing args may do that. */  	err = parse_args(mod->name, mod->args, mod->kp, mod->num_kp,  			 -32768, 32767, &ddebug_dyndbg_module_param_cb);  	if (err < 0) -		goto unlink; +		goto bug_cleanup;  	/* Link in to syfs. */  	err = mod_sysfs_setup(mod, info, mod->kp, mod->num_kp);  	if (err < 0) -		goto unlink; +		goto bug_cleanup;  	/* Get rid of temporary copy. */  	free_copy(info); @@ -3237,16 +3270,13 @@ again:  	return do_init_module(mod); - unlink: + bug_cleanup: +	/* module_bug_cleanup needs module_mutex protection */  	mutex_lock(&module_mutex); -	/* Unlink carefully: kallsyms could be walking list. */ -	list_del_rcu(&mod->list);  	module_bug_cleanup(mod); -	wake_up_all(&module_wq); - ddebug: -	dynamic_debug_remove(info->debug); - unlock: + ddebug_cleanup:  	mutex_unlock(&module_mutex); +	dynamic_debug_remove(info->debug);  	synchronize_sched();  	kfree(mod->args);   free_arch_cleanup: @@ -3255,6 +3285,12 @@ again:  	free_modinfo(mod);   free_unload:  	module_unload_free(mod); + unlink_mod: +	mutex_lock(&module_mutex); +	/* Unlink carefully: kallsyms could be walking list. */ +	list_del_rcu(&mod->list); +	wake_up_all(&module_wq); +	mutex_unlock(&module_mutex);   free_module:  	module_deallocate(mod, info);   free_copy: @@ -3377,6 +3413,8 @@ const char *module_address_lookup(unsigned long addr,  	preempt_disable();  	list_for_each_entry_rcu(mod, &modules, list) { +		if (mod->state == MODULE_STATE_UNFORMED) +			continue;  		if (within_module_init(addr, mod) ||  		    within_module_core(addr, mod)) {  			if (modname) @@ -3400,6 +3438,8 @@ int lookup_module_symbol_name(unsigned long addr, char *symname)  	preempt_disable();  	list_for_each_entry_rcu(mod, &modules, list) { +		if (mod->state == MODULE_STATE_UNFORMED) +			continue;  		if (within_module_init(addr, mod) ||  		    within_module_core(addr, mod)) {  			const char *sym; @@ -3424,6 +3464,8 @@ int lookup_module_symbol_attrs(unsigned long addr, unsigned long *size,  	preempt_disable();  	list_for_each_entry_rcu(mod, &modules, list) { +		if (mod->state == MODULE_STATE_UNFORMED) +			continue;  		if (within_module_init(addr, mod) ||  		    within_module_core(addr, mod)) {  			const char *sym; @@ -3451,6 +3493,8 @@ int module_get_kallsym(unsigned int symnum, unsigned long *value, char *type,  	preempt_disable();  	list_for_each_entry_rcu(mod, &modules, list) { +		if (mod->state == MODULE_STATE_UNFORMED) +			continue;  		if (symnum < mod->num_symtab) {  			*value = mod->symtab[symnum].st_value;  			*type = mod->symtab[symnum].st_info; @@ -3493,9 +3537,12 @@ unsigned long module_kallsyms_lookup_name(const char *name)  			ret = mod_find_symname(mod, colon+1);  		*colon = ':';  	} else { -		list_for_each_entry_rcu(mod, &modules, list) +		list_for_each_entry_rcu(mod, &modules, list) { +			if (mod->state == MODULE_STATE_UNFORMED) +				continue;  			if ((ret = mod_find_symname(mod, name)) != 0)  				break; +		}  	}  	preempt_enable();  	return ret; @@ -3510,6 +3557,8 @@ int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *,  	int ret;  	list_for_each_entry(mod, &modules, list) { +		if (mod->state == MODULE_STATE_UNFORMED) +			continue;  		for (i = 0; i < mod->num_symtab; i++) {  			ret = fn(data, mod->strtab + mod->symtab[i].st_name,  				 mod, mod->symtab[i].st_value); @@ -3525,6 +3574,7 @@ static char *module_flags(struct module *mod, char *buf)  {  	int bx = 0; +	BUG_ON(mod->state == MODULE_STATE_UNFORMED);  	if (mod->taints ||  	    mod->state == MODULE_STATE_GOING ||  	    mod->state == MODULE_STATE_COMING) { @@ -3566,6 +3616,10 @@ static int m_show(struct seq_file *m, void *p)  	struct module *mod = list_entry(p, struct module, list);  	char buf[8]; +	/* We always ignore unformed modules. */ +	if (mod->state == MODULE_STATE_UNFORMED) +		return 0; +  	seq_printf(m, "%s %u",  		   mod->name, mod->init_size + mod->core_size);  	print_unload_info(m, mod); @@ -3626,6 +3680,8 @@ const struct exception_table_entry *search_module_extables(unsigned long addr)  	preempt_disable();  	list_for_each_entry_rcu(mod, &modules, list) { +		if (mod->state == MODULE_STATE_UNFORMED) +			continue;  		if (mod->num_exentries == 0)  			continue; @@ -3674,10 +3730,13 @@ struct module *__module_address(unsigned long addr)  	if (addr < module_addr_min || addr > module_addr_max)  		return NULL; -	list_for_each_entry_rcu(mod, &modules, list) +	list_for_each_entry_rcu(mod, &modules, list) { +		if (mod->state == MODULE_STATE_UNFORMED) +			continue;  		if (within_module_core(addr, mod)  		    || within_module_init(addr, mod))  			return mod; +	}  	return NULL;  }  EXPORT_SYMBOL_GPL(__module_address); @@ -3730,8 +3789,11 @@ void print_modules(void)  	printk(KERN_DEFAULT "Modules linked in:");  	/* Most callers should already have preempt disabled, but make sure */  	preempt_disable(); -	list_for_each_entry_rcu(mod, &modules, list) +	list_for_each_entry_rcu(mod, &modules, list) { +		if (mod->state == MODULE_STATE_UNFORMED) +			continue;  		printk(" %s%s", mod->name, module_flags(mod, buf)); +	}  	preempt_enable();  	if (last_unloaded_module[0])  		printk(" [last unloaded: %s]", last_unloaded_module); diff --git a/kernel/pid.c b/kernel/pid.c index de9af600006..f2c6a682509 100644 --- a/kernel/pid.c +++ b/kernel/pid.c @@ -331,7 +331,7 @@ out:  	return pid;  out_unlock: -	spin_unlock(&pidmap_lock); +	spin_unlock_irq(&pidmap_lock);  out_free:  	while (++i <= ns->level)  		free_pidmap(pid->numbers + i); diff --git a/kernel/printk.c b/kernel/printk.c index 0b31715f335..f24633afa46 100644 --- a/kernel/printk.c +++ b/kernel/printk.c @@ -88,12 +88,6 @@ static DEFINE_SEMAPHORE(console_sem);  struct console *console_drivers;  EXPORT_SYMBOL_GPL(console_drivers); -#ifdef CONFIG_LOCKDEP -static struct lockdep_map console_lock_dep_map = { -	.name = "console_lock" -}; -#endif -  /*   * This is used for debugging the mess that is the VT code by   * keeping track if we have the console semaphore held. It's @@ -1925,7 +1919,6 @@ void console_lock(void)  		return;  	console_locked = 1;  	console_may_schedule = 1; -	mutex_acquire(&console_lock_dep_map, 0, 0, _RET_IP_);  }  EXPORT_SYMBOL(console_lock); @@ -1947,7 +1940,6 @@ int console_trylock(void)  	}  	console_locked = 1;  	console_may_schedule = 0; -	mutex_acquire(&console_lock_dep_map, 0, 1, _RET_IP_);  	return 1;  }  EXPORT_SYMBOL(console_trylock); @@ -2110,7 +2102,6 @@ skip:  		local_irq_restore(flags);  	}  	console_locked = 0; -	mutex_release(&console_lock_dep_map, 1, _RET_IP_);  	/* Release the exclusive_console once it is used */  	if (unlikely(exclusive_console)) diff --git a/kernel/ptrace.c b/kernel/ptrace.c index 1599157336a..6cbeaae4406 100644 --- a/kernel/ptrace.c +++ b/kernel/ptrace.c @@ -117,11 +117,45 @@ void __ptrace_unlink(struct task_struct *child)  	 * TASK_KILLABLE sleeps.  	 */  	if (child->jobctl & JOBCTL_STOP_PENDING || task_is_traced(child)) -		signal_wake_up(child, task_is_traced(child)); +		ptrace_signal_wake_up(child, true);  	spin_unlock(&child->sighand->siglock);  } +/* Ensure that nothing can wake it up, even SIGKILL */ +static bool ptrace_freeze_traced(struct task_struct *task) +{ +	bool ret = false; + +	/* Lockless, nobody but us can set this flag */ +	if (task->jobctl & JOBCTL_LISTENING) +		return ret; + +	spin_lock_irq(&task->sighand->siglock); +	if (task_is_traced(task) && !__fatal_signal_pending(task)) { +		task->state = __TASK_TRACED; +		ret = true; +	} +	spin_unlock_irq(&task->sighand->siglock); + +	return ret; +} + +static void ptrace_unfreeze_traced(struct task_struct *task) +{ +	if (task->state != __TASK_TRACED) +		return; + +	WARN_ON(!task->ptrace || task->parent != current); + +	spin_lock_irq(&task->sighand->siglock); +	if (__fatal_signal_pending(task)) +		wake_up_state(task, __TASK_TRACED); +	else +		task->state = TASK_TRACED; +	spin_unlock_irq(&task->sighand->siglock); +} +  /**   * ptrace_check_attach - check whether ptracee is ready for ptrace operation   * @child: ptracee to check for @@ -139,7 +173,7 @@ void __ptrace_unlink(struct task_struct *child)   * RETURNS:   * 0 on success, -ESRCH if %child is not ready.   */ -int ptrace_check_attach(struct task_struct *child, bool ignore_state) +static int ptrace_check_attach(struct task_struct *child, bool ignore_state)  {  	int ret = -ESRCH; @@ -151,24 +185,29 @@ int ptrace_check_attach(struct task_struct *child, bool ignore_state)  	 * be changed by us so it's not changing right after this.  	 */  	read_lock(&tasklist_lock); -	if ((child->ptrace & PT_PTRACED) && child->parent == current) { +	if (child->ptrace && child->parent == current) { +		WARN_ON(child->state == __TASK_TRACED);  		/*  		 * child->sighand can't be NULL, release_task()  		 * does ptrace_unlink() before __exit_signal().  		 */ -		spin_lock_irq(&child->sighand->siglock); -		WARN_ON_ONCE(task_is_stopped(child)); -		if (ignore_state || (task_is_traced(child) && -				     !(child->jobctl & JOBCTL_LISTENING))) +		if (ignore_state || ptrace_freeze_traced(child))  			ret = 0; -		spin_unlock_irq(&child->sighand->siglock);  	}  	read_unlock(&tasklist_lock); -	if (!ret && !ignore_state) -		ret = wait_task_inactive(child, TASK_TRACED) ? 0 : -ESRCH; +	if (!ret && !ignore_state) { +		if (!wait_task_inactive(child, __TASK_TRACED)) { +			/* +			 * This can only happen if may_ptrace_stop() fails and +			 * ptrace_stop() changes ->state back to TASK_RUNNING, +			 * so we should not worry about leaking __TASK_TRACED. +			 */ +			WARN_ON(child->state == __TASK_TRACED); +			ret = -ESRCH; +		} +	} -	/* All systems go.. */  	return ret;  } @@ -317,7 +356,7 @@ static int ptrace_attach(struct task_struct *task, long request,  	 */  	if (task_is_stopped(task) &&  	    task_set_jobctl_pending(task, JOBCTL_TRAP_STOP | JOBCTL_TRAPPING)) -		signal_wake_up(task, 1); +		signal_wake_up_state(task, __TASK_STOPPED);  	spin_unlock(&task->sighand->siglock); @@ -737,7 +776,7 @@ int ptrace_request(struct task_struct *child, long request,  		 * tracee into STOP.  		 */  		if (likely(task_set_jobctl_pending(child, JOBCTL_TRAP_STOP))) -			signal_wake_up(child, child->jobctl & JOBCTL_LISTENING); +			ptrace_signal_wake_up(child, child->jobctl & JOBCTL_LISTENING);  		unlock_task_sighand(child, &flags);  		ret = 0; @@ -763,7 +802,7 @@ int ptrace_request(struct task_struct *child, long request,  			 * start of this trap and now.  Trigger re-trap.  			 */  			if (child->jobctl & JOBCTL_TRAP_NOTIFY) -				signal_wake_up(child, true); +				ptrace_signal_wake_up(child, true);  			ret = 0;  		}  		unlock_task_sighand(child, &flags); @@ -900,6 +939,8 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,  		goto out_put_task_struct;  	ret = arch_ptrace(child, request, addr, data); +	if (ret || request != PTRACE_DETACH) +		ptrace_unfreeze_traced(child);   out_put_task_struct:  	put_task_struct(child); @@ -1039,8 +1080,11 @@ asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,  	ret = ptrace_check_attach(child, request == PTRACE_KILL ||  				  request == PTRACE_INTERRUPT); -	if (!ret) +	if (!ret) {  		ret = compat_arch_ptrace(child, request, addr, data); +		if (ret || request != PTRACE_DETACH) +			ptrace_unfreeze_traced(child); +	}   out_put_task_struct:  	put_task_struct(child); diff --git a/kernel/rcu.h b/kernel/rcu.h index 20dfba576c2..7f8e7590e3e 100644 --- a/kernel/rcu.h +++ b/kernel/rcu.h @@ -111,4 +111,11 @@ static inline bool __rcu_reclaim(char *rn, struct rcu_head *head)  extern int rcu_expedited; +#ifdef CONFIG_RCU_STALL_COMMON + +extern int rcu_cpu_stall_suppress; +int rcu_jiffies_till_stall_check(void); + +#endif /* #ifdef CONFIG_RCU_STALL_COMMON */ +  #endif /* __LINUX_RCU_H */ diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c index a2cf76177b4..48ab70384a4 100644 --- a/kernel/rcupdate.c +++ b/kernel/rcupdate.c @@ -404,11 +404,65 @@ EXPORT_SYMBOL_GPL(rcuhead_debug_descr);  #endif /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */  #if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU) || defined(CONFIG_RCU_TRACE) -void do_trace_rcu_torture_read(char *rcutorturename, struct rcu_head *rhp) +void do_trace_rcu_torture_read(char *rcutorturename, struct rcu_head *rhp, +			       unsigned long secs, +			       unsigned long c_old, unsigned long c)  { -	trace_rcu_torture_read(rcutorturename, rhp); +	trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c);  }  EXPORT_SYMBOL_GPL(do_trace_rcu_torture_read);  #else -#define do_trace_rcu_torture_read(rcutorturename, rhp) do { } while (0) +#define do_trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c) \ +	do { } while (0)  #endif + +#ifdef CONFIG_RCU_STALL_COMMON + +#ifdef CONFIG_PROVE_RCU +#define RCU_STALL_DELAY_DELTA	       (5 * HZ) +#else +#define RCU_STALL_DELAY_DELTA	       0 +#endif + +int rcu_cpu_stall_suppress __read_mostly; /* 1 = suppress stall warnings. */ +int rcu_cpu_stall_timeout __read_mostly = CONFIG_RCU_CPU_STALL_TIMEOUT; + +module_param(rcu_cpu_stall_suppress, int, 0644); +module_param(rcu_cpu_stall_timeout, int, 0644); + +int rcu_jiffies_till_stall_check(void) +{ +	int till_stall_check = ACCESS_ONCE(rcu_cpu_stall_timeout); + +	/* +	 * Limit check must be consistent with the Kconfig limits +	 * for CONFIG_RCU_CPU_STALL_TIMEOUT. +	 */ +	if (till_stall_check < 3) { +		ACCESS_ONCE(rcu_cpu_stall_timeout) = 3; +		till_stall_check = 3; +	} else if (till_stall_check > 300) { +		ACCESS_ONCE(rcu_cpu_stall_timeout) = 300; +		till_stall_check = 300; +	} +	return till_stall_check * HZ + RCU_STALL_DELAY_DELTA; +} + +static int rcu_panic(struct notifier_block *this, unsigned long ev, void *ptr) +{ +	rcu_cpu_stall_suppress = 1; +	return NOTIFY_DONE; +} + +static struct notifier_block rcu_panic_block = { +	.notifier_call = rcu_panic, +}; + +static int __init check_cpu_stall_init(void) +{ +	atomic_notifier_chain_register(&panic_notifier_list, &rcu_panic_block); +	return 0; +} +early_initcall(check_cpu_stall_init); + +#endif /* #ifdef CONFIG_RCU_STALL_COMMON */ diff --git a/kernel/rcutiny.c b/kernel/rcutiny.c index e7dce58f9c2..a0714a51b6d 100644 --- a/kernel/rcutiny.c +++ b/kernel/rcutiny.c @@ -51,10 +51,10 @@ static void __call_rcu(struct rcu_head *head,  		       void (*func)(struct rcu_head *rcu),  		       struct rcu_ctrlblk *rcp); -#include "rcutiny_plugin.h" -  static long long rcu_dynticks_nesting = DYNTICK_TASK_EXIT_IDLE; +#include "rcutiny_plugin.h" +  /* Common code for rcu_idle_enter() and rcu_irq_exit(), see kernel/rcutree.c. */  static void rcu_idle_enter_common(long long newval)  { @@ -193,7 +193,7 @@ EXPORT_SYMBOL(rcu_is_cpu_idle);   * interrupts don't count, we must be running at the first interrupt   * level.   */ -int rcu_is_cpu_rrupt_from_idle(void) +static int rcu_is_cpu_rrupt_from_idle(void)  {  	return rcu_dynticks_nesting <= 1;  } @@ -205,6 +205,7 @@ int rcu_is_cpu_rrupt_from_idle(void)   */  static int rcu_qsctr_help(struct rcu_ctrlblk *rcp)  { +	reset_cpu_stall_ticks(rcp);  	if (rcp->rcucblist != NULL &&  	    rcp->donetail != rcp->curtail) {  		rcp->donetail = rcp->curtail; @@ -251,6 +252,7 @@ void rcu_bh_qs(int cpu)   */  void rcu_check_callbacks(int cpu, int user)  { +	check_cpu_stalls();  	if (user || rcu_is_cpu_rrupt_from_idle())  		rcu_sched_qs(cpu);  	else if (!in_softirq()) diff --git a/kernel/rcutiny_plugin.h b/kernel/rcutiny_plugin.h index f85016a2309..8a233002fae 100644 --- a/kernel/rcutiny_plugin.h +++ b/kernel/rcutiny_plugin.h @@ -33,6 +33,9 @@ struct rcu_ctrlblk {  	struct rcu_head **donetail;	/* ->next pointer of last "done" CB. */  	struct rcu_head **curtail;	/* ->next pointer of last CB. */  	RCU_TRACE(long qlen);		/* Number of pending CBs. */ +	RCU_TRACE(unsigned long gp_start); /* Start time for stalls. */ +	RCU_TRACE(unsigned long ticks_this_gp); /* Statistic for stalls. */ +	RCU_TRACE(unsigned long jiffies_stall); /* Jiffies at next stall. */  	RCU_TRACE(char *name);		/* Name of RCU type. */  }; @@ -54,6 +57,51 @@ int rcu_scheduler_active __read_mostly;  EXPORT_SYMBOL_GPL(rcu_scheduler_active);  #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ +#ifdef CONFIG_RCU_TRACE + +static void check_cpu_stall(struct rcu_ctrlblk *rcp) +{ +	unsigned long j; +	unsigned long js; + +	if (rcu_cpu_stall_suppress) +		return; +	rcp->ticks_this_gp++; +	j = jiffies; +	js = rcp->jiffies_stall; +	if (*rcp->curtail && ULONG_CMP_GE(j, js)) { +		pr_err("INFO: %s stall on CPU (%lu ticks this GP) idle=%llx (t=%lu jiffies q=%ld)\n", +		       rcp->name, rcp->ticks_this_gp, rcu_dynticks_nesting, +		       jiffies - rcp->gp_start, rcp->qlen); +		dump_stack(); +	} +	if (*rcp->curtail && ULONG_CMP_GE(j, js)) +		rcp->jiffies_stall = jiffies + +			3 * rcu_jiffies_till_stall_check() + 3; +	else if (ULONG_CMP_GE(j, js)) +		rcp->jiffies_stall = jiffies + rcu_jiffies_till_stall_check(); +} + +static void check_cpu_stall_preempt(void); + +#endif /* #ifdef CONFIG_RCU_TRACE */ + +static void reset_cpu_stall_ticks(struct rcu_ctrlblk *rcp) +{ +#ifdef CONFIG_RCU_TRACE +	rcp->ticks_this_gp = 0; +	rcp->gp_start = jiffies; +	rcp->jiffies_stall = jiffies + rcu_jiffies_till_stall_check(); +#endif /* #ifdef CONFIG_RCU_TRACE */ +} + +static void check_cpu_stalls(void) +{ +	RCU_TRACE(check_cpu_stall(&rcu_bh_ctrlblk)); +	RCU_TRACE(check_cpu_stall(&rcu_sched_ctrlblk)); +	RCU_TRACE(check_cpu_stall_preempt()); +} +  #ifdef CONFIG_TINY_PREEMPT_RCU  #include <linux/delay.h> @@ -448,6 +496,7 @@ static void rcu_preempt_start_gp(void)  		/* Official start of GP. */  		rcu_preempt_ctrlblk.gpnum++;  		RCU_TRACE(rcu_preempt_ctrlblk.n_grace_periods++); +		reset_cpu_stall_ticks(&rcu_preempt_ctrlblk.rcb);  		/* Any blocked RCU readers block new GP. */  		if (rcu_preempt_blocked_readers_any()) @@ -1054,4 +1103,11 @@ MODULE_AUTHOR("Paul E. McKenney");  MODULE_DESCRIPTION("Read-Copy Update tracing for tiny implementation");  MODULE_LICENSE("GPL"); +static void check_cpu_stall_preempt(void) +{ +#ifdef CONFIG_TINY_PREEMPT_RCU +	check_cpu_stall(&rcu_preempt_ctrlblk.rcb); +#endif /* #ifdef CONFIG_TINY_PREEMPT_RCU */ +} +  #endif /* #ifdef CONFIG_RCU_TRACE */ diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c index 31dea01c85f..e1f3a8c9672 100644 --- a/kernel/rcutorture.c +++ b/kernel/rcutorture.c @@ -46,6 +46,7 @@  #include <linux/stat.h>  #include <linux/srcu.h>  #include <linux/slab.h> +#include <linux/trace_clock.h>  #include <asm/byteorder.h>  MODULE_LICENSE("GPL"); @@ -207,6 +208,20 @@ MODULE_PARM_DESC(rcutorture_runnable, "Start rcutorture at boot");  #define rcu_can_boost() 0  #endif /* #else #if defined(CONFIG_RCU_BOOST) && !defined(CONFIG_HOTPLUG_CPU) */ +#ifdef CONFIG_RCU_TRACE +static u64 notrace rcu_trace_clock_local(void) +{ +	u64 ts = trace_clock_local(); +	unsigned long __maybe_unused ts_rem = do_div(ts, NSEC_PER_USEC); +	return ts; +} +#else /* #ifdef CONFIG_RCU_TRACE */ +static u64 notrace rcu_trace_clock_local(void) +{ +	return 0ULL; +} +#endif /* #else #ifdef CONFIG_RCU_TRACE */ +  static unsigned long shutdown_time;	/* jiffies to system shutdown. */  static unsigned long boost_starttime;	/* jiffies of next boost test start. */  DEFINE_MUTEX(boost_mutex);		/* protect setting boost_starttime */ @@ -845,7 +860,7 @@ static int rcu_torture_boost(void *arg)  		/* Wait for the next test interval. */  		oldstarttime = boost_starttime;  		while (ULONG_CMP_LT(jiffies, oldstarttime)) { -			schedule_timeout_uninterruptible(1); +			schedule_timeout_interruptible(oldstarttime - jiffies);  			rcu_stutter_wait("rcu_torture_boost");  			if (kthread_should_stop() ||  			    fullstop != FULLSTOP_DONTSTOP) @@ -1028,7 +1043,6 @@ void rcutorture_trace_dump(void)  		return;  	if (atomic_xchg(&beenhere, 1) != 0)  		return; -	do_trace_rcu_torture_read(cur_ops->name, (struct rcu_head *)~0UL);  	ftrace_dump(DUMP_ALL);  } @@ -1042,13 +1056,16 @@ static void rcu_torture_timer(unsigned long unused)  {  	int idx;  	int completed; +	int completed_end;  	static DEFINE_RCU_RANDOM(rand);  	static DEFINE_SPINLOCK(rand_lock);  	struct rcu_torture *p;  	int pipe_count; +	unsigned long long ts;  	idx = cur_ops->readlock();  	completed = cur_ops->completed(); +	ts = rcu_trace_clock_local();  	p = rcu_dereference_check(rcu_torture_current,  				  rcu_read_lock_bh_held() ||  				  rcu_read_lock_sched_held() || @@ -1058,7 +1075,6 @@ static void rcu_torture_timer(unsigned long unused)  		cur_ops->readunlock(idx);  		return;  	} -	do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu);  	if (p->rtort_mbtest == 0)  		atomic_inc(&n_rcu_torture_mberror);  	spin_lock(&rand_lock); @@ -1071,10 +1087,14 @@ static void rcu_torture_timer(unsigned long unused)  		/* Should not happen, but... */  		pipe_count = RCU_TORTURE_PIPE_LEN;  	} -	if (pipe_count > 1) +	completed_end = cur_ops->completed(); +	if (pipe_count > 1) { +		do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu, ts, +					  completed, completed_end);  		rcutorture_trace_dump(); +	}  	__this_cpu_inc(rcu_torture_count[pipe_count]); -	completed = cur_ops->completed() - completed; +	completed = completed_end - completed;  	if (completed > RCU_TORTURE_PIPE_LEN) {  		/* Should not happen, but... */  		completed = RCU_TORTURE_PIPE_LEN; @@ -1094,11 +1114,13 @@ static int  rcu_torture_reader(void *arg)  {  	int completed; +	int completed_end;  	int idx;  	DEFINE_RCU_RANDOM(rand);  	struct rcu_torture *p;  	int pipe_count;  	struct timer_list t; +	unsigned long long ts;  	VERBOSE_PRINTK_STRING("rcu_torture_reader task started");  	set_user_nice(current, 19); @@ -1112,6 +1134,7 @@ rcu_torture_reader(void *arg)  		}  		idx = cur_ops->readlock();  		completed = cur_ops->completed(); +		ts = rcu_trace_clock_local();  		p = rcu_dereference_check(rcu_torture_current,  					  rcu_read_lock_bh_held() ||  					  rcu_read_lock_sched_held() || @@ -1122,7 +1145,6 @@ rcu_torture_reader(void *arg)  			schedule_timeout_interruptible(HZ);  			continue;  		} -		do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu);  		if (p->rtort_mbtest == 0)  			atomic_inc(&n_rcu_torture_mberror);  		cur_ops->read_delay(&rand); @@ -1132,10 +1154,14 @@ rcu_torture_reader(void *arg)  			/* Should not happen, but... */  			pipe_count = RCU_TORTURE_PIPE_LEN;  		} -		if (pipe_count > 1) +		completed_end = cur_ops->completed(); +		if (pipe_count > 1) { +			do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu, +						  ts, completed, completed_end);  			rcutorture_trace_dump(); +		}  		__this_cpu_inc(rcu_torture_count[pipe_count]); -		completed = cur_ops->completed() - completed; +		completed = completed_end - completed;  		if (completed > RCU_TORTURE_PIPE_LEN) {  			/* Should not happen, but... */  			completed = RCU_TORTURE_PIPE_LEN; @@ -1301,19 +1327,35 @@ static void rcu_torture_shuffle_tasks(void)  				set_cpus_allowed_ptr(reader_tasks[i],  						     shuffle_tmp_mask);  	} -  	if (fakewriter_tasks) {  		for (i = 0; i < nfakewriters; i++)  			if (fakewriter_tasks[i])  				set_cpus_allowed_ptr(fakewriter_tasks[i],  						     shuffle_tmp_mask);  	} -  	if (writer_task)  		set_cpus_allowed_ptr(writer_task, shuffle_tmp_mask); -  	if (stats_task)  		set_cpus_allowed_ptr(stats_task, shuffle_tmp_mask); +	if (stutter_task) +		set_cpus_allowed_ptr(stutter_task, shuffle_tmp_mask); +	if (fqs_task) +		set_cpus_allowed_ptr(fqs_task, shuffle_tmp_mask); +	if (shutdown_task) +		set_cpus_allowed_ptr(shutdown_task, shuffle_tmp_mask); +#ifdef CONFIG_HOTPLUG_CPU +	if (onoff_task) +		set_cpus_allowed_ptr(onoff_task, shuffle_tmp_mask); +#endif /* #ifdef CONFIG_HOTPLUG_CPU */ +	if (stall_task) +		set_cpus_allowed_ptr(stall_task, shuffle_tmp_mask); +	if (barrier_cbs_tasks) +		for (i = 0; i < n_barrier_cbs; i++) +			if (barrier_cbs_tasks[i]) +				set_cpus_allowed_ptr(barrier_cbs_tasks[i], +						     shuffle_tmp_mask); +	if (barrier_task) +		set_cpus_allowed_ptr(barrier_task, shuffle_tmp_mask);  	if (rcu_idle_cpu == -1)  		rcu_idle_cpu = num_online_cpus() - 1; @@ -1749,7 +1791,7 @@ static int rcu_torture_barrier_init(void)  	barrier_cbs_wq =  		kzalloc(n_barrier_cbs * sizeof(barrier_cbs_wq[0]),  			GFP_KERNEL); -	if (barrier_cbs_tasks == NULL || barrier_cbs_wq == 0) +	if (barrier_cbs_tasks == NULL || !barrier_cbs_wq)  		return -ENOMEM;  	for (i = 0; i < n_barrier_cbs; i++) {  		init_waitqueue_head(&barrier_cbs_wq[i]); diff --git a/kernel/rcutree.c b/kernel/rcutree.c index e441b77b614..5b8ad827fd8 100644 --- a/kernel/rcutree.c +++ b/kernel/rcutree.c @@ -105,7 +105,7 @@ int rcu_num_nodes __read_mostly = NUM_RCU_NODES; /* Total # rcu_nodes in use. */   * The rcu_scheduler_active variable transitions from zero to one just   * before the first task is spawned.  So when this variable is zero, RCU   * can assume that there is but one task, allowing RCU to (for example) - * optimized synchronize_sched() to a simple barrier().  When this variable + * optimize synchronize_sched() to a simple barrier().  When this variable   * is one, RCU must actually do all the hard work required to detect real   * grace periods.  This variable is also used to suppress boot-time false   * positives from lockdep-RCU error checking. @@ -217,12 +217,6 @@ module_param(blimit, long, 0444);  module_param(qhimark, long, 0444);  module_param(qlowmark, long, 0444); -int rcu_cpu_stall_suppress __read_mostly; /* 1 = suppress stall warnings. */ -int rcu_cpu_stall_timeout __read_mostly = CONFIG_RCU_CPU_STALL_TIMEOUT; - -module_param(rcu_cpu_stall_suppress, int, 0644); -module_param(rcu_cpu_stall_timeout, int, 0644); -  static ulong jiffies_till_first_fqs = RCU_JIFFIES_TILL_FORCE_QS;  static ulong jiffies_till_next_fqs = RCU_JIFFIES_TILL_FORCE_QS; @@ -305,17 +299,27 @@ cpu_has_callbacks_ready_to_invoke(struct rcu_data *rdp)  }  /* - * Does the current CPU require a yet-as-unscheduled grace period? + * Does the current CPU require a not-yet-started grace period? + * The caller must have disabled interrupts to prevent races with + * normal callback registry.   */  static int  cpu_needs_another_gp(struct rcu_state *rsp, struct rcu_data *rdp)  { -	struct rcu_head **ntp; +	int i; -	ntp = rdp->nxttail[RCU_DONE_TAIL + -			   (ACCESS_ONCE(rsp->completed) != rdp->completed)]; -	return rdp->nxttail[RCU_DONE_TAIL] && ntp && *ntp && -	       !rcu_gp_in_progress(rsp); +	if (rcu_gp_in_progress(rsp)) +		return 0;  /* No, a grace period is already in progress. */ +	if (!rdp->nxttail[RCU_NEXT_TAIL]) +		return 0;  /* No, this is a no-CBs (or offline) CPU. */ +	if (*rdp->nxttail[RCU_NEXT_READY_TAIL]) +		return 1;  /* Yes, this CPU has newly registered callbacks. */ +	for (i = RCU_WAIT_TAIL; i < RCU_NEXT_TAIL; i++) +		if (rdp->nxttail[i - 1] != rdp->nxttail[i] && +		    ULONG_CMP_LT(ACCESS_ONCE(rsp->completed), +				 rdp->nxtcompleted[i])) +			return 1;  /* Yes, CBs for future grace period. */ +	return 0; /* No grace period needed. */  }  /* @@ -336,7 +340,7 @@ static struct rcu_node *rcu_get_root(struct rcu_state *rsp)  static void rcu_eqs_enter_common(struct rcu_dynticks *rdtp, long long oldval,  				bool user)  { -	trace_rcu_dyntick("Start", oldval, 0); +	trace_rcu_dyntick("Start", oldval, rdtp->dynticks_nesting);  	if (!user && !is_idle_task(current)) {  		struct task_struct *idle = idle_task(smp_processor_id()); @@ -727,7 +731,7 @@ EXPORT_SYMBOL_GPL(rcu_lockdep_current_cpu_online);   * interrupt from idle, return true.  The caller must have at least   * disabled preemption.   */ -int rcu_is_cpu_rrupt_from_idle(void) +static int rcu_is_cpu_rrupt_from_idle(void)  {  	return __get_cpu_var(rcu_dynticks).dynticks_nesting <= 1;  } @@ -793,28 +797,10 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)  	return 0;  } -static int jiffies_till_stall_check(void) -{ -	int till_stall_check = ACCESS_ONCE(rcu_cpu_stall_timeout); - -	/* -	 * Limit check must be consistent with the Kconfig limits -	 * for CONFIG_RCU_CPU_STALL_TIMEOUT. -	 */ -	if (till_stall_check < 3) { -		ACCESS_ONCE(rcu_cpu_stall_timeout) = 3; -		till_stall_check = 3; -	} else if (till_stall_check > 300) { -		ACCESS_ONCE(rcu_cpu_stall_timeout) = 300; -		till_stall_check = 300; -	} -	return till_stall_check * HZ + RCU_STALL_DELAY_DELTA; -} -  static void record_gp_stall_check_time(struct rcu_state *rsp)  {  	rsp->gp_start = jiffies; -	rsp->jiffies_stall = jiffies + jiffies_till_stall_check(); +	rsp->jiffies_stall = jiffies + rcu_jiffies_till_stall_check();  }  /* @@ -857,7 +843,7 @@ static void print_other_cpu_stall(struct rcu_state *rsp)  		raw_spin_unlock_irqrestore(&rnp->lock, flags);  		return;  	} -	rsp->jiffies_stall = jiffies + 3 * jiffies_till_stall_check() + 3; +	rsp->jiffies_stall = jiffies + 3 * rcu_jiffies_till_stall_check() + 3;  	raw_spin_unlock_irqrestore(&rnp->lock, flags);  	/* @@ -935,7 +921,7 @@ static void print_cpu_stall(struct rcu_state *rsp)  	raw_spin_lock_irqsave(&rnp->lock, flags);  	if (ULONG_CMP_GE(jiffies, rsp->jiffies_stall))  		rsp->jiffies_stall = jiffies + -				     3 * jiffies_till_stall_check() + 3; +				     3 * rcu_jiffies_till_stall_check() + 3;  	raw_spin_unlock_irqrestore(&rnp->lock, flags);  	set_need_resched();  /* kick ourselves to get things going. */ @@ -966,12 +952,6 @@ static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp)  	}  } -static int rcu_panic(struct notifier_block *this, unsigned long ev, void *ptr) -{ -	rcu_cpu_stall_suppress = 1; -	return NOTIFY_DONE; -} -  /**   * rcu_cpu_stall_reset - prevent further stall warnings in current grace period   * @@ -989,15 +969,6 @@ void rcu_cpu_stall_reset(void)  		rsp->jiffies_stall = jiffies + ULONG_MAX / 2;  } -static struct notifier_block rcu_panic_block = { -	.notifier_call = rcu_panic, -}; - -static void __init check_cpu_stall_init(void) -{ -	atomic_notifier_chain_register(&panic_notifier_list, &rcu_panic_block); -} -  /*   * Update CPU-local rcu_data state to record the newly noticed grace period.   * This is used both when we started the grace period and when we notice @@ -1071,6 +1042,145 @@ static void init_callback_list(struct rcu_data *rdp)  }  /* + * Determine the value that ->completed will have at the end of the + * next subsequent grace period.  This is used to tag callbacks so that + * a CPU can invoke callbacks in a timely fashion even if that CPU has + * been dyntick-idle for an extended period with callbacks under the + * influence of RCU_FAST_NO_HZ. + * + * The caller must hold rnp->lock with interrupts disabled. + */ +static unsigned long rcu_cbs_completed(struct rcu_state *rsp, +				       struct rcu_node *rnp) +{ +	/* +	 * If RCU is idle, we just wait for the next grace period. +	 * But we can only be sure that RCU is idle if we are looking +	 * at the root rcu_node structure -- otherwise, a new grace +	 * period might have started, but just not yet gotten around +	 * to initializing the current non-root rcu_node structure. +	 */ +	if (rcu_get_root(rsp) == rnp && rnp->gpnum == rnp->completed) +		return rnp->completed + 1; + +	/* +	 * Otherwise, wait for a possible partial grace period and +	 * then the subsequent full grace period. +	 */ +	return rnp->completed + 2; +} + +/* + * If there is room, assign a ->completed number to any callbacks on + * this CPU that have not already been assigned.  Also accelerate any + * callbacks that were previously assigned a ->completed number that has + * since proven to be too conservative, which can happen if callbacks get + * assigned a ->completed number while RCU is idle, but with reference to + * a non-root rcu_node structure.  This function is idempotent, so it does + * not hurt to call it repeatedly. + * + * The caller must hold rnp->lock with interrupts disabled. + */ +static void rcu_accelerate_cbs(struct rcu_state *rsp, struct rcu_node *rnp, +			       struct rcu_data *rdp) +{ +	unsigned long c; +	int i; + +	/* If the CPU has no callbacks, nothing to do. */ +	if (!rdp->nxttail[RCU_NEXT_TAIL] || !*rdp->nxttail[RCU_DONE_TAIL]) +		return; + +	/* +	 * Starting from the sublist containing the callbacks most +	 * recently assigned a ->completed number and working down, find the +	 * first sublist that is not assignable to an upcoming grace period. +	 * Such a sublist has something in it (first two tests) and has +	 * a ->completed number assigned that will complete sooner than +	 * the ->completed number for newly arrived callbacks (last test). +	 * +	 * The key point is that any later sublist can be assigned the +	 * same ->completed number as the newly arrived callbacks, which +	 * means that the callbacks in any of these later sublist can be +	 * grouped into a single sublist, whether or not they have already +	 * been assigned a ->completed number. +	 */ +	c = rcu_cbs_completed(rsp, rnp); +	for (i = RCU_NEXT_TAIL - 1; i > RCU_DONE_TAIL; i--) +		if (rdp->nxttail[i] != rdp->nxttail[i - 1] && +		    !ULONG_CMP_GE(rdp->nxtcompleted[i], c)) +			break; + +	/* +	 * If there are no sublist for unassigned callbacks, leave. +	 * At the same time, advance "i" one sublist, so that "i" will +	 * index into the sublist where all the remaining callbacks should +	 * be grouped into. +	 */ +	if (++i >= RCU_NEXT_TAIL) +		return; + +	/* +	 * Assign all subsequent callbacks' ->completed number to the next +	 * full grace period and group them all in the sublist initially +	 * indexed by "i". +	 */ +	for (; i <= RCU_NEXT_TAIL; i++) { +		rdp->nxttail[i] = rdp->nxttail[RCU_NEXT_TAIL]; +		rdp->nxtcompleted[i] = c; +	} + +	/* Trace depending on how much we were able to accelerate. */ +	if (!*rdp->nxttail[RCU_WAIT_TAIL]) +		trace_rcu_grace_period(rsp->name, rdp->gpnum, "AccWaitCB"); +	else +		trace_rcu_grace_period(rsp->name, rdp->gpnum, "AccReadyCB"); +} + +/* + * Move any callbacks whose grace period has completed to the + * RCU_DONE_TAIL sublist, then compact the remaining sublists and + * assign ->completed numbers to any callbacks in the RCU_NEXT_TAIL + * sublist.  This function is idempotent, so it does not hurt to + * invoke it repeatedly.  As long as it is not invoked -too- often... + * + * The caller must hold rnp->lock with interrupts disabled. + */ +static void rcu_advance_cbs(struct rcu_state *rsp, struct rcu_node *rnp, +			    struct rcu_data *rdp) +{ +	int i, j; + +	/* If the CPU has no callbacks, nothing to do. */ +	if (!rdp->nxttail[RCU_NEXT_TAIL] || !*rdp->nxttail[RCU_DONE_TAIL]) +		return; + +	/* +	 * Find all callbacks whose ->completed numbers indicate that they +	 * are ready to invoke, and put them into the RCU_DONE_TAIL sublist. +	 */ +	for (i = RCU_WAIT_TAIL; i < RCU_NEXT_TAIL; i++) { +		if (ULONG_CMP_LT(rnp->completed, rdp->nxtcompleted[i])) +			break; +		rdp->nxttail[RCU_DONE_TAIL] = rdp->nxttail[i]; +	} +	/* Clean up any sublist tail pointers that were misordered above. */ +	for (j = RCU_WAIT_TAIL; j < i; j++) +		rdp->nxttail[j] = rdp->nxttail[RCU_DONE_TAIL]; + +	/* Copy down callbacks to fill in empty sublists. */ +	for (j = RCU_WAIT_TAIL; i < RCU_NEXT_TAIL; i++, j++) { +		if (rdp->nxttail[j] == rdp->nxttail[RCU_NEXT_TAIL]) +			break; +		rdp->nxttail[j] = rdp->nxttail[i]; +		rdp->nxtcompleted[j] = rdp->nxtcompleted[i]; +	} + +	/* Classify any remaining callbacks. */ +	rcu_accelerate_cbs(rsp, rnp, rdp); +} + +/*   * Advance this CPU's callbacks, but only if the current grace period   * has ended.  This may be called only from the CPU to whom the rdp   * belongs.  In addition, the corresponding leaf rcu_node structure's @@ -1080,12 +1190,15 @@ static void  __rcu_process_gp_end(struct rcu_state *rsp, struct rcu_node *rnp, struct rcu_data *rdp)  {  	/* Did another grace period end? */ -	if (rdp->completed != rnp->completed) { +	if (rdp->completed == rnp->completed) { -		/* Advance callbacks.  No harm if list empty. */ -		rdp->nxttail[RCU_DONE_TAIL] = rdp->nxttail[RCU_WAIT_TAIL]; -		rdp->nxttail[RCU_WAIT_TAIL] = rdp->nxttail[RCU_NEXT_READY_TAIL]; -		rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL]; +		/* No, so just accelerate recent callbacks. */ +		rcu_accelerate_cbs(rsp, rnp, rdp); + +	} else { + +		/* Advance callbacks. */ +		rcu_advance_cbs(rsp, rnp, rdp);  		/* Remember that we saw this grace-period completion. */  		rdp->completed = rnp->completed; @@ -1392,17 +1505,10 @@ rcu_start_gp(struct rcu_state *rsp, unsigned long flags)  	/*  	 * Because there is no grace period in progress right now,  	 * any callbacks we have up to this point will be satisfied -	 * by the next grace period.  So promote all callbacks to be -	 * handled after the end of the next grace period.  If the -	 * CPU is not yet aware of the end of the previous grace period, -	 * we need to allow for the callback advancement that will -	 * occur when it does become aware.  Deadlock prevents us from -	 * making it aware at this point: We cannot acquire a leaf -	 * rcu_node ->lock while holding the root rcu_node ->lock. +	 * by the next grace period.  So this is a good place to +	 * assign a grace period number to recently posted callbacks.  	 */ -	rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL]; -	if (rdp->completed == rsp->completed) -		rdp->nxttail[RCU_WAIT_TAIL] = rdp->nxttail[RCU_NEXT_TAIL]; +	rcu_accelerate_cbs(rsp, rnp, rdp);  	rsp->gp_flags = RCU_GP_FLAG_INIT;  	raw_spin_unlock(&rnp->lock); /* Interrupts remain disabled. */ @@ -1527,7 +1633,7 @@ rcu_report_qs_rdp(int cpu, struct rcu_state *rsp, struct rcu_data *rdp)  		 * This GP can't end until cpu checks in, so all of our  		 * callbacks can be processed during the next GP.  		 */ -		rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL]; +		rcu_accelerate_cbs(rsp, rnp, rdp);  		rcu_report_qs_rnp(mask, rsp, rnp, flags); /* rlses rnp->lock */  	} @@ -1779,7 +1885,7 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)  	long bl, count, count_lazy;  	int i; -	/* If no callbacks are ready, just return.*/ +	/* If no callbacks are ready, just return. */  	if (!cpu_has_callbacks_ready_to_invoke(rdp)) {  		trace_rcu_batch_start(rsp->name, rdp->qlen_lazy, rdp->qlen, 0);  		trace_rcu_batch_end(rsp->name, 0, !!ACCESS_ONCE(rdp->nxtlist), @@ -2008,19 +2114,19 @@ __rcu_process_callbacks(struct rcu_state *rsp)  	WARN_ON_ONCE(rdp->beenonline == 0); -	/* -	 * Advance callbacks in response to end of earlier grace -	 * period that some other CPU ended. -	 */ +	/* Handle the end of a grace period that some other CPU ended.  */  	rcu_process_gp_end(rsp, rdp);  	/* Update RCU state based on any recent quiescent states. */  	rcu_check_quiescent_state(rsp, rdp);  	/* Does this CPU require a not-yet-started grace period? */ +	local_irq_save(flags);  	if (cpu_needs_another_gp(rsp, rdp)) { -		raw_spin_lock_irqsave(&rcu_get_root(rsp)->lock, flags); +		raw_spin_lock(&rcu_get_root(rsp)->lock); /* irqs disabled. */  		rcu_start_gp(rsp, flags);  /* releases above lock */ +	} else { +		local_irq_restore(flags);  	}  	/* If there are callbacks ready, invoke them. */ @@ -2719,9 +2825,6 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)  	rdp->dynticks = &per_cpu(rcu_dynticks, cpu);  	WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != DYNTICK_TASK_EXIT_IDLE);  	WARN_ON_ONCE(atomic_read(&rdp->dynticks->dynticks) != 1); -#ifdef CONFIG_RCU_USER_QS -	WARN_ON_ONCE(rdp->dynticks->in_user); -#endif  	rdp->cpu = cpu;  	rdp->rsp = rsp;  	rcu_boot_init_nocb_percpu_data(rdp); @@ -2938,6 +3041,10 @@ static void __init rcu_init_one(struct rcu_state *rsp,  	BUILD_BUG_ON(MAX_RCU_LVLS > ARRAY_SIZE(buf));  /* Fix buf[] init! */ +	/* Silence gcc 4.8 warning about array index out of range. */ +	if (rcu_num_lvls > RCU_NUM_LVLS) +		panic("rcu_init_one: rcu_num_lvls overflow"); +  	/* Initialize the level-tracking arrays. */  	for (i = 0; i < rcu_num_lvls; i++) @@ -3074,7 +3181,6 @@ void __init rcu_init(void)  	cpu_notifier(rcu_cpu_notify, 0);  	for_each_online_cpu(cpu)  		rcu_cpu_notify(NULL, CPU_UP_PREPARE, (void *)(long)cpu); -	check_cpu_stall_init();  }  #include "rcutree_plugin.h" diff --git a/kernel/rcutree.h b/kernel/rcutree.h index 4b69291b093..c896b5045d9 100644 --- a/kernel/rcutree.h +++ b/kernel/rcutree.h @@ -102,10 +102,6 @@ struct rcu_dynticks {  				    /* idle-period nonlazy_posted snapshot. */  	int tick_nohz_enabled_snap; /* Previously seen value from sysfs. */  #endif /* #ifdef CONFIG_RCU_FAST_NO_HZ */ -#ifdef CONFIG_RCU_USER_QS -	bool ignore_user_qs;	    /* Treat userspace as extended QS or not */ -	bool in_user;		    /* Is the CPU in userland from RCU POV? */ -#endif  };  /* RCU's kthread states for tracing. */ @@ -282,6 +278,8 @@ struct rcu_data {  	 */  	struct rcu_head *nxtlist;  	struct rcu_head **nxttail[RCU_NEXT_SIZE]; +	unsigned long	nxtcompleted[RCU_NEXT_SIZE]; +					/* grace periods for sublists. */  	long		qlen_lazy;	/* # of lazy queued callbacks */  	long		qlen;		/* # of queued callbacks, incl lazy */  	long		qlen_last_fqs_check; @@ -343,11 +341,6 @@ struct rcu_data {  #define RCU_JIFFIES_TILL_FORCE_QS	 3	/* for rsp->jiffies_force_qs */ -#ifdef CONFIG_PROVE_RCU -#define RCU_STALL_DELAY_DELTA	       (5 * HZ) -#else -#define RCU_STALL_DELAY_DELTA	       0 -#endif  #define RCU_STALL_RAT_DELAY		2	/* Allow other CPUs time */  						/*  to take at least one */  						/*  scheduling clock irq */ diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h index f6e5ec2932b..c1cc7e17ff9 100644 --- a/kernel/rcutree_plugin.h +++ b/kernel/rcutree_plugin.h @@ -40,8 +40,7 @@  #ifdef CONFIG_RCU_NOCB_CPU  static cpumask_var_t rcu_nocb_mask; /* CPUs to have callbacks offloaded. */  static bool have_rcu_nocb_mask;	    /* Was rcu_nocb_mask allocated? */ -static bool rcu_nocb_poll;	    /* Offload kthread are to poll. */ -module_param(rcu_nocb_poll, bool, 0444); +static bool __read_mostly rcu_nocb_poll;    /* Offload kthread are to poll. */  static char __initdata nocb_buf[NR_CPUS * 5];  #endif /* #ifdef CONFIG_RCU_NOCB_CPU */ @@ -2159,6 +2158,13 @@ static int __init rcu_nocb_setup(char *str)  }  __setup("rcu_nocbs=", rcu_nocb_setup); +static int __init parse_rcu_nocb_poll(char *arg) +{ +	rcu_nocb_poll = 1; +	return 0; +} +early_param("rcu_nocb_poll", parse_rcu_nocb_poll); +  /* Is the specified CPU a no-CPUs CPU? */  static bool is_nocb_cpu(int cpu)  { @@ -2366,10 +2372,11 @@ static int rcu_nocb_kthread(void *arg)  	for (;;) {  		/* If not polling, wait for next batch of callbacks. */  		if (!rcu_nocb_poll) -			wait_event(rdp->nocb_wq, rdp->nocb_head); +			wait_event_interruptible(rdp->nocb_wq, rdp->nocb_head);  		list = ACCESS_ONCE(rdp->nocb_head);  		if (!list) {  			schedule_timeout_interruptible(1); +			flush_signals(current);  			continue;  		} diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 257002c13bb..26058d0bebb 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -1523,7 +1523,8 @@ out:   */  int wake_up_process(struct task_struct *p)  { -	return try_to_wake_up(p, TASK_ALL, 0); +	WARN_ON(task_is_stopped_or_traced(p)); +	return try_to_wake_up(p, TASK_NORMAL, 0);  }  EXPORT_SYMBOL(wake_up_process); diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c index 2cd3c1b4e58..7ae4c4c5420 100644 --- a/kernel/sched/debug.c +++ b/kernel/sched/debug.c @@ -222,8 +222,8 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)  			cfs_rq->runnable_load_avg);  	SEQ_printf(m, "  .%-30s: %lld\n", "blocked_load_avg",  			cfs_rq->blocked_load_avg); -	SEQ_printf(m, "  .%-30s: %ld\n", "tg_load_avg", -			atomic64_read(&cfs_rq->tg->load_avg)); +	SEQ_printf(m, "  .%-30s: %lld\n", "tg_load_avg", +			(unsigned long long)atomic64_read(&cfs_rq->tg->load_avg));  	SEQ_printf(m, "  .%-30s: %lld\n", "tg_load_contrib",  			cfs_rq->tg_load_contrib);  	SEQ_printf(m, "  .%-30s: %d\n", "tg_runnable_contrib", diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 5eea8707234..81fa5364340 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -2663,7 +2663,7 @@ static void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b)  	hrtimer_cancel(&cfs_b->slack_timer);  } -static void unthrottle_offline_cfs_rqs(struct rq *rq) +static void __maybe_unused unthrottle_offline_cfs_rqs(struct rq *rq)  {  	struct cfs_rq *cfs_rq; diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index 418feb01344..4f02b284735 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c @@ -566,7 +566,7 @@ static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)  static int do_balance_runtime(struct rt_rq *rt_rq)  {  	struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq); -	struct root_domain *rd = cpu_rq(smp_processor_id())->rd; +	struct root_domain *rd = rq_of_rt_rq(rt_rq)->rd;  	int i, weight, more = 0;  	u64 rt_period; diff --git a/kernel/signal.c b/kernel/signal.c index 372771e948c..3d09cf6cde7 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -680,23 +680,17 @@ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)   * No need to set need_resched since signal event passing   * goes through ->blocked   */ -void signal_wake_up(struct task_struct *t, int resume) +void signal_wake_up_state(struct task_struct *t, unsigned int state)  { -	unsigned int mask; -  	set_tsk_thread_flag(t, TIF_SIGPENDING); -  	/* -	 * For SIGKILL, we want to wake it up in the stopped/traced/killable +	 * TASK_WAKEKILL also means wake it up in the stopped/traced/killable  	 * case. We don't check t->state here because there is a race with it  	 * executing another processor and just now entering stopped state.  	 * By using wake_up_state, we ensure the process will wake up and  	 * handle its death signal.  	 */ -	mask = TASK_INTERRUPTIBLE; -	if (resume) -		mask |= TASK_WAKEKILL; -	if (!wake_up_state(t, mask)) +	if (!wake_up_state(t, state | TASK_INTERRUPTIBLE))  		kick_process(t);  } @@ -844,7 +838,7 @@ static void ptrace_trap_notify(struct task_struct *t)  	assert_spin_locked(&t->sighand->siglock);  	task_set_jobctl_pending(t, JOBCTL_TRAP_NOTIFY); -	signal_wake_up(t, t->jobctl & JOBCTL_LISTENING); +	ptrace_signal_wake_up(t, t->jobctl & JOBCTL_LISTENING);  }  /* @@ -1800,6 +1794,10 @@ static inline int may_ptrace_stop(void)  	 * If SIGKILL was already sent before the caller unlocked  	 * ->siglock we must see ->core_state != NULL. Otherwise it  	 * is safe to enter schedule(). +	 * +	 * This is almost outdated, a task with the pending SIGKILL can't +	 * block in TASK_TRACED. But PTRACE_EVENT_EXIT can be reported +	 * after SIGKILL was already dequeued.  	 */  	if (unlikely(current->mm->core_state) &&  	    unlikely(current->mm == current->parent->mm)) @@ -1925,6 +1923,7 @@ static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info)  		if (gstop_done)  			do_notify_parent_cldstop(current, false, why); +		/* tasklist protects us from ptrace_freeze_traced() */  		__set_current_state(TASK_RUNNING);  		if (clear_code)  			current->exit_code = 0; @@ -3116,8 +3115,9 @@ int __save_altstack(stack_t __user *uss, unsigned long sp)  #ifdef CONFIG_COMPAT  #ifdef CONFIG_GENERIC_SIGALTSTACK -asmlinkage long compat_sys_sigaltstack(const compat_stack_t __user *uss_ptr, -				       compat_stack_t __user *uoss_ptr) +COMPAT_SYSCALL_DEFINE2(sigaltstack, +			const compat_stack_t __user *, uss_ptr, +			compat_stack_t __user *, uoss_ptr)  {  	stack_t uss, uoss;  	int ret; diff --git a/kernel/smp.c b/kernel/smp.c index 29dd40a9f2f..69f38bd98b4 100644 --- a/kernel/smp.c +++ b/kernel/smp.c @@ -33,6 +33,7 @@ struct call_function_data {  	struct call_single_data	csd;  	atomic_t		refs;  	cpumask_var_t		cpumask; +	cpumask_var_t		cpumask_ipi;  };  static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_function_data, cfd_data); @@ -56,6 +57,9 @@ hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu)  		if (!zalloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL,  				cpu_to_node(cpu)))  			return notifier_from_errno(-ENOMEM); +		if (!zalloc_cpumask_var_node(&cfd->cpumask_ipi, GFP_KERNEL, +				cpu_to_node(cpu))) +			return notifier_from_errno(-ENOMEM);  		break;  #ifdef CONFIG_HOTPLUG_CPU @@ -65,6 +69,7 @@ hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu)  	case CPU_DEAD:  	case CPU_DEAD_FROZEN:  		free_cpumask_var(cfd->cpumask); +		free_cpumask_var(cfd->cpumask_ipi);  		break;  #endif  	}; @@ -526,6 +531,12 @@ void smp_call_function_many(const struct cpumask *mask,  		return;  	} +	/* +	 * After we put an entry into the list, data->cpumask +	 * may be cleared again when another CPU sends another IPI for +	 * a SMP function call, so data->cpumask will be zero. +	 */ +	cpumask_copy(data->cpumask_ipi, data->cpumask);  	raw_spin_lock_irqsave(&call_function.lock, flags);  	/*  	 * Place entry at the _HEAD_ of the list, so that any cpu still @@ -549,7 +560,7 @@ void smp_call_function_many(const struct cpumask *mask,  	smp_mb();  	/* Send a message to all CPUs in the map */ -	arch_send_call_function_ipi_mask(data->cpumask); +	arch_send_call_function_ipi_mask(data->cpumask_ipi);  	/* Optionally wait for the CPUs to complete */  	if (wait) diff --git a/kernel/srcu.c b/kernel/srcu.c index 2b859828cdc..01d5ccb8bfe 100644 --- a/kernel/srcu.c +++ b/kernel/srcu.c @@ -282,12 +282,8 @@ static int srcu_readers_active(struct srcu_struct *sp)   */  void cleanup_srcu_struct(struct srcu_struct *sp)  { -	int sum; - -	sum = srcu_readers_active(sp); -	WARN_ON(sum);  /* Leakage unless caller handles error. */ -	if (sum != 0) -		return; +	if (WARN_ON(srcu_readers_active(sp))) +		return; /* Leakage unless caller handles error. */  	free_percpu(sp->per_cpu_ref);  	sp->per_cpu_ref = NULL;  } @@ -302,9 +298,8 @@ int __srcu_read_lock(struct srcu_struct *sp)  {  	int idx; +	idx = ACCESS_ONCE(sp->completed) & 0x1;  	preempt_disable(); -	idx = rcu_dereference_index_check(sp->completed, -					  rcu_read_lock_sched_held()) & 0x1;  	ACCESS_ONCE(this_cpu_ptr(sp->per_cpu_ref)->c[idx]) += 1;  	smp_mb(); /* B */  /* Avoid leaking the critical section. */  	ACCESS_ONCE(this_cpu_ptr(sp->per_cpu_ref)->seq[idx]) += 1; @@ -321,10 +316,8 @@ EXPORT_SYMBOL_GPL(__srcu_read_lock);   */  void __srcu_read_unlock(struct srcu_struct *sp, int idx)  { -	preempt_disable();  	smp_mb(); /* C */  /* Avoid leaking the critical section. */ -	ACCESS_ONCE(this_cpu_ptr(sp->per_cpu_ref)->c[idx]) -= 1; -	preempt_enable(); +	this_cpu_dec(sp->per_cpu_ref->c[idx]);  }  EXPORT_SYMBOL_GPL(__srcu_read_unlock); @@ -423,6 +416,7 @@ static void __synchronize_srcu(struct srcu_struct *sp, int trycount)  			   !lock_is_held(&rcu_sched_lock_map),  			   "Illegal synchronize_srcu() in same-type SRCU (or RCU) read-side critical section"); +	might_sleep();  	init_completion(&rcu.completion);  	head->next = NULL; @@ -455,10 +449,12 @@ static void __synchronize_srcu(struct srcu_struct *sp, int trycount)   * synchronize_srcu - wait for prior SRCU read-side critical-section completion   * @sp: srcu_struct with which to synchronize.   * - * Flip the completed counter, and wait for the old count to drain to zero. - * As with classic RCU, the updater must use some separate means of - * synchronizing concurrent updates.  Can block; must be called from - * process context. + * Wait for the count to drain to zero of both indexes. To avoid the + * possible starvation of synchronize_srcu(), it waits for the count of + * the index=((->completed & 1) ^ 1) to drain to zero at first, + * and then flip the completed and wait for the count of the other index. + * + * Can block; must be called from process context.   *   * Note that it is illegal to call synchronize_srcu() from the corresponding   * SRCU read-side critical section; doing so will result in deadlock. @@ -480,12 +476,11 @@ EXPORT_SYMBOL_GPL(synchronize_srcu);   * Wait for an SRCU grace period to elapse, but be more aggressive about   * spinning rather than blocking when waiting.   * - * Note that it is illegal to call this function while holding any lock - * that is acquired by a CPU-hotplug notifier.  It is also illegal to call - * synchronize_srcu_expedited() from the corresponding SRCU read-side - * critical section; doing so will result in deadlock.  However, it is - * perfectly legal to call synchronize_srcu_expedited() on one srcu_struct - * from some other srcu_struct's read-side critical section, as long as + * Note that it is also illegal to call synchronize_srcu_expedited() + * from the corresponding SRCU read-side critical section; + * doing so will result in deadlock.  However, it is perfectly legal + * to call synchronize_srcu_expedited() on one srcu_struct from some + * other srcu_struct's read-side critical section, as long as   * the resulting graph of srcu_structs is acyclic.   */  void synchronize_srcu_expedited(struct srcu_struct *sp) diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 3ffe4c5ad3f..41473b4ad7a 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -3998,7 +3998,7 @@ static int ftrace_module_notify(struct notifier_block *self,  struct notifier_block ftrace_module_nb = {  	.notifier_call = ftrace_module_notify, -	.priority = 0, +	.priority = INT_MAX,	/* Run before anything that can use kprobes */  };  extern unsigned long __start_mcount_loc[]; diff --git a/kernel/trace/trace_clock.c b/kernel/trace/trace_clock.c index 394783531cb..1bbb1b200ce 100644 --- a/kernel/trace/trace_clock.c +++ b/kernel/trace/trace_clock.c @@ -44,6 +44,7 @@ u64 notrace trace_clock_local(void)  	return clock;  } +EXPORT_SYMBOL_GPL(trace_clock_local);  /*   * trace_clock(): 'between' trace clock. Not completely serialized, diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 67604e59938..a1714c897e3 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -605,61 +605,6 @@ config PROVE_LOCKING  	 For more details, see Documentation/lockdep-design.txt. -config PROVE_RCU -	bool "RCU debugging: prove RCU correctness" -	depends on PROVE_LOCKING -	default n -	help -	 This feature enables lockdep extensions that check for correct -	 use of RCU APIs.  This is currently under development.  Say Y -	 if you want to debug RCU usage or help work on the PROVE_RCU -	 feature. - -	 Say N if you are unsure. - -config PROVE_RCU_REPEATEDLY -	bool "RCU debugging: don't disable PROVE_RCU on first splat" -	depends on PROVE_RCU -	default n -	help -	 By itself, PROVE_RCU will disable checking upon issuing the -	 first warning (or "splat").  This feature prevents such -	 disabling, allowing multiple RCU-lockdep warnings to be printed -	 on a single reboot. - -	 Say Y to allow multiple RCU-lockdep warnings per boot. - -	 Say N if you are unsure. - -config PROVE_RCU_DELAY -	bool "RCU debugging: preemptible RCU race provocation" -	depends on DEBUG_KERNEL && PREEMPT_RCU -	default n -	help -	 There is a class of races that involve an unlikely preemption -	 of __rcu_read_unlock() just after ->rcu_read_lock_nesting has -	 been set to INT_MIN.  This feature inserts a delay at that -	 point to increase the probability of these races. - -	 Say Y to increase probability of preemption of __rcu_read_unlock(). - -	 Say N if you are unsure. - -config SPARSE_RCU_POINTER -	bool "RCU debugging: sparse-based checks for pointer usage" -	default n -	help -	 This feature enables the __rcu sparse annotation for -	 RCU-protected pointers.  This annotation will cause sparse -	 to flag any non-RCU used of annotated pointers.  This can be -	 helpful when debugging RCU usage.  Please note that this feature -	 is not intended to enforce code cleanliness; it is instead merely -	 a debugging aid. - -	 Say Y to make sparse flag questionable use of RCU-protected pointers - -	 Say N if you are unsure. -  config LOCKDEP  	bool  	depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT @@ -937,6 +882,63 @@ config BOOT_PRINTK_DELAY  	  BOOT_PRINTK_DELAY also may cause LOCKUP_DETECTOR to detect  	  what it believes to be lockup conditions. +menu "RCU Debugging" + +config PROVE_RCU +	bool "RCU debugging: prove RCU correctness" +	depends on PROVE_LOCKING +	default n +	help +	 This feature enables lockdep extensions that check for correct +	 use of RCU APIs.  This is currently under development.  Say Y +	 if you want to debug RCU usage or help work on the PROVE_RCU +	 feature. + +	 Say N if you are unsure. + +config PROVE_RCU_REPEATEDLY +	bool "RCU debugging: don't disable PROVE_RCU on first splat" +	depends on PROVE_RCU +	default n +	help +	 By itself, PROVE_RCU will disable checking upon issuing the +	 first warning (or "splat").  This feature prevents such +	 disabling, allowing multiple RCU-lockdep warnings to be printed +	 on a single reboot. + +	 Say Y to allow multiple RCU-lockdep warnings per boot. + +	 Say N if you are unsure. + +config PROVE_RCU_DELAY +	bool "RCU debugging: preemptible RCU race provocation" +	depends on DEBUG_KERNEL && PREEMPT_RCU +	default n +	help +	 There is a class of races that involve an unlikely preemption +	 of __rcu_read_unlock() just after ->rcu_read_lock_nesting has +	 been set to INT_MIN.  This feature inserts a delay at that +	 point to increase the probability of these races. + +	 Say Y to increase probability of preemption of __rcu_read_unlock(). + +	 Say N if you are unsure. + +config SPARSE_RCU_POINTER +	bool "RCU debugging: sparse-based checks for pointer usage" +	default n +	help +	 This feature enables the __rcu sparse annotation for +	 RCU-protected pointers.  This annotation will cause sparse +	 to flag any non-RCU used of annotated pointers.  This can be +	 helpful when debugging RCU usage.  Please note that this feature +	 is not intended to enforce code cleanliness; it is instead merely +	 a debugging aid. + +	 Say Y to make sparse flag questionable use of RCU-protected pointers + +	 Say N if you are unsure. +  config RCU_TORTURE_TEST  	tristate "torture tests for RCU"  	depends on DEBUG_KERNEL @@ -970,7 +972,7 @@ config RCU_TORTURE_TEST_RUNNABLE  config RCU_CPU_STALL_TIMEOUT  	int "RCU CPU stall timeout in seconds" -	depends on TREE_RCU || TREE_PREEMPT_RCU +	depends on RCU_STALL_COMMON  	range 3 300  	default 21  	help @@ -1008,6 +1010,7 @@ config RCU_CPU_STALL_INFO  config RCU_TRACE  	bool "Enable tracing for RCU"  	depends on DEBUG_KERNEL +	select TRACE_CLOCK  	help  	  This option provides tracing in RCU which presents stats  	  in debugfs for debugging RCU implementation. @@ -1015,6 +1018,8 @@ config RCU_TRACE  	  Say Y here if you want to enable RCU tracing  	  Say N if you are unsure. +endmenu # "RCU Debugging" +  config KPROBES_SANITY_TEST  	bool "Kprobes sanity tests"  	depends on DEBUG_KERNEL diff --git a/lib/bug.c b/lib/bug.c index a28c1415357..d0cdf14c651 100644 --- a/lib/bug.c +++ b/lib/bug.c @@ -55,6 +55,7 @@ static inline unsigned long bug_addr(const struct bug_entry *bug)  }  #ifdef CONFIG_MODULES +/* Updates are protected by module mutex */  static LIST_HEAD(module_bug_list);  static const struct bug_entry *module_find_bug(unsigned long bugaddr) diff --git a/lib/digsig.c b/lib/digsig.c index 8c0e62975c8..dc2be7ed176 100644 --- a/lib/digsig.c +++ b/lib/digsig.c @@ -162,6 +162,8 @@ static int digsig_verify_rsa(struct key *key,  	memset(out1, 0, head);  	memcpy(out1 + head, p, l); +	kfree(p); +  	err = pkcs_1_v1_5_decode_emsa(out1, len, mblen, out2, &len);  	if (err)  		goto err; diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 6001ee6347a..b5783d81eda 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -1257,6 +1257,10 @@ struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,  	if (flags & FOLL_WRITE && !pmd_write(*pmd))  		goto out; +	/* Avoid dumping huge zero page */ +	if ((flags & FOLL_DUMP) && is_huge_zero_pmd(*pmd)) +		return ERR_PTR(-EFAULT); +  	page = pmd_page(*pmd);  	VM_BUG_ON(!PageHead(page));  	if (flags & FOLL_TOUCH) { diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 4f3ea0b1e57..546db81820e 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -3033,6 +3033,7 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,  		if (!huge_pte_none(huge_ptep_get(ptep))) {  			pte = huge_ptep_get_and_clear(mm, address, ptep);  			pte = pte_mkhuge(pte_modify(pte, newprot)); +			pte = arch_make_huge_pte(pte, vma, NULL, 0);  			set_huge_pte_at(mm, address, ptep, pte);  			pages++;  		} diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 09255ec8159..fbb60b103e6 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -3030,7 +3030,9 @@ int memcg_register_cache(struct mem_cgroup *memcg, struct kmem_cache *s,  	if (memcg) {  		s->memcg_params->memcg = memcg;  		s->memcg_params->root_cache = root_cache; -	} +	} else +		s->memcg_params->is_root_cache = true; +  	return 0;  } diff --git a/mm/migrate.c b/mm/migrate.c index c38778610aa..2fd8b4af474 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -160,8 +160,10 @@ static int remove_migration_pte(struct page *new, struct vm_area_struct *vma,  	if (is_write_migration_entry(entry))  		pte = pte_mkwrite(pte);  #ifdef CONFIG_HUGETLB_PAGE -	if (PageHuge(new)) +	if (PageHuge(new)) {  		pte = pte_mkhuge(pte); +		pte = arch_make_huge_pte(pte, vma, new, 0); +	}  #endif  	flush_cache_page(vma, addr, pte_pfn(pte));  	set_pte_at(mm, addr, ptep, pte); diff --git a/mm/mlock.c b/mm/mlock.c index f0b9ce572fc..c9bd528b01d 100644 --- a/mm/mlock.c +++ b/mm/mlock.c @@ -517,11 +517,11 @@ SYSCALL_DEFINE2(munlock, unsigned long, start, size_t, len)  static int do_mlockall(int flags)  {  	struct vm_area_struct * vma, * prev = NULL; -	unsigned int def_flags = 0;  	if (flags & MCL_FUTURE) -		def_flags = VM_LOCKED; -	current->mm->def_flags = def_flags; +		current->mm->def_flags |= VM_LOCKED; +	else +		current->mm->def_flags &= ~VM_LOCKED;  	if (flags == MCL_FUTURE)  		goto out; diff --git a/mm/mmap.c b/mm/mmap.c index 35730ee9d51..d1e4124f3d0 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -2943,7 +2943,7 @@ static void vm_lock_mapping(struct mm_struct *mm, struct address_space *mapping)   * vma in this mm is backed by the same anon_vma or address_space.   *   * We can take all the locks in random order because the VM code - * taking i_mmap_mutex or anon_vma->mutex outside the mmap_sem never + * taking i_mmap_mutex or anon_vma->rwsem outside the mmap_sem never   * takes more than one of them in a row. Secondly we're protected   * against a concurrent mm_take_all_locks() by the mm_all_locks_mutex.   * diff --git a/mm/page_alloc.c b/mm/page_alloc.c index df2022ff0c8..6a83cd35cfd 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -773,6 +773,10 @@ void __init init_cma_reserved_pageblock(struct page *page)  	set_pageblock_migratetype(page, MIGRATE_CMA);  	__free_pages(page, pageblock_order);  	totalram_pages += pageblock_nr_pages; +#ifdef CONFIG_HIGHMEM +	if (PageHighMem(page)) +		totalhigh_pages += pageblock_nr_pages; +#endif  }  #endif @@ -4416,10 +4420,11 @@ static void __meminit calculate_node_totalpages(struct pglist_data *pgdat,   * round what is now in bits to nearest long in bits, then return it in   * bytes.   */ -static unsigned long __init usemap_size(unsigned long zonesize) +static unsigned long __init usemap_size(unsigned long zone_start_pfn, unsigned long zonesize)  {  	unsigned long usemapsize; +	zonesize += zone_start_pfn & (pageblock_nr_pages-1);  	usemapsize = roundup(zonesize, pageblock_nr_pages);  	usemapsize = usemapsize >> pageblock_order;  	usemapsize *= NR_PAGEBLOCK_BITS; @@ -4429,17 +4434,19 @@ static unsigned long __init usemap_size(unsigned long zonesize)  }  static void __init setup_usemap(struct pglist_data *pgdat, -				struct zone *zone, unsigned long zonesize) +				struct zone *zone, +				unsigned long zone_start_pfn, +				unsigned long zonesize)  { -	unsigned long usemapsize = usemap_size(zonesize); +	unsigned long usemapsize = usemap_size(zone_start_pfn, zonesize);  	zone->pageblock_flags = NULL;  	if (usemapsize)  		zone->pageblock_flags = alloc_bootmem_node_nopanic(pgdat,  								   usemapsize);  }  #else -static inline void setup_usemap(struct pglist_data *pgdat, -				struct zone *zone, unsigned long zonesize) {} +static inline void setup_usemap(struct pglist_data *pgdat, struct zone *zone, +				unsigned long zone_start_pfn, unsigned long zonesize) {}  #endif /* CONFIG_SPARSEMEM */  #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE @@ -4590,7 +4597,7 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat,  			continue;  		set_pageblock_order(); -		setup_usemap(pgdat, zone, size); +		setup_usemap(pgdat, zone, zone_start_pfn, size);  		ret = init_currently_empty_zone(zone, zone_start_pfn,  						size, MEMMAP_EARLY);  		BUG_ON(ret); diff --git a/net/batman-adv/distributed-arp-table.c b/net/batman-adv/distributed-arp-table.c index 8e1d89d2b1c..553921511e4 100644 --- a/net/batman-adv/distributed-arp-table.c +++ b/net/batman-adv/distributed-arp-table.c @@ -440,7 +440,7 @@ static bool batadv_is_orig_node_eligible(struct batadv_dat_candidate *res,  	/* this is an hash collision with the temporary selected node. Choose  	 * the one with the lowest address  	 */ -	if ((tmp_max == max) && +	if ((tmp_max == max) && max_orig_node &&  	    (batadv_compare_eth(candidate->orig, max_orig_node->orig) > 0))  		goto out; @@ -738,6 +738,7 @@ static uint16_t batadv_arp_get_type(struct batadv_priv *bat_priv,  	struct arphdr *arphdr;  	struct ethhdr *ethhdr;  	__be32 ip_src, ip_dst; +	uint8_t *hw_src, *hw_dst;  	uint16_t type = 0;  	/* pull the ethernet header */ @@ -777,9 +778,23 @@ static uint16_t batadv_arp_get_type(struct batadv_priv *bat_priv,  	ip_src = batadv_arp_ip_src(skb, hdr_size);  	ip_dst = batadv_arp_ip_dst(skb, hdr_size);  	if (ipv4_is_loopback(ip_src) || ipv4_is_multicast(ip_src) || -	    ipv4_is_loopback(ip_dst) || ipv4_is_multicast(ip_dst)) +	    ipv4_is_loopback(ip_dst) || ipv4_is_multicast(ip_dst) || +	    ipv4_is_zeronet(ip_src) || ipv4_is_lbcast(ip_src) || +	    ipv4_is_zeronet(ip_dst) || ipv4_is_lbcast(ip_dst))  		goto out; +	hw_src = batadv_arp_hw_src(skb, hdr_size); +	if (is_zero_ether_addr(hw_src) || is_multicast_ether_addr(hw_src)) +		goto out; + +	/* we don't care about the destination MAC address in ARP requests */ +	if (arphdr->ar_op != htons(ARPOP_REQUEST)) { +		hw_dst = batadv_arp_hw_dst(skb, hdr_size); +		if (is_zero_ether_addr(hw_dst) || +		    is_multicast_ether_addr(hw_dst)) +			goto out; +	} +  	type = ntohs(arphdr->ar_op);  out:  	return type; @@ -1012,6 +1027,8 @@ bool batadv_dat_snoop_incoming_arp_reply(struct batadv_priv *bat_priv,  	 */  	ret = !batadv_is_my_client(bat_priv, hw_dst);  out: +	if (ret) +		kfree_skb(skb);  	/* if ret == false -> packet has to be delivered to the interface */  	return ret;  } diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c index 25bfce0666e..4925a02ae7e 100644 --- a/net/bluetooth/hci_conn.c +++ b/net/bluetooth/hci_conn.c @@ -249,12 +249,12 @@ static void hci_conn_disconnect(struct hci_conn *conn)  	__u8 reason = hci_proto_disconn_ind(conn);  	switch (conn->type) { -	case ACL_LINK: -		hci_acl_disconn(conn, reason); -		break;  	case AMP_LINK:  		hci_amp_disconn(conn, reason);  		break; +	default: +		hci_acl_disconn(conn, reason); +		break;  	}  } diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c index 596660d37c5..0f78e34220c 100644 --- a/net/bluetooth/hci_core.c +++ b/net/bluetooth/hci_core.c @@ -2810,14 +2810,6 @@ static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)  	if (conn) {  		hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF); -		hci_dev_lock(hdev); -		if (test_bit(HCI_MGMT, &hdev->dev_flags) && -		    !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) -			mgmt_device_connected(hdev, &conn->dst, conn->type, -					      conn->dst_type, 0, NULL, 0, -					      conn->dev_class); -		hci_dev_unlock(hdev); -  		/* Send to upper protocol */  		l2cap_recv_acldata(conn, skb, flags);  		return; diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c index 705078a0cc3..81b44481d0d 100644 --- a/net/bluetooth/hci_event.c +++ b/net/bluetooth/hci_event.c @@ -2688,7 +2688,7 @@ static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)  	if (ev->opcode != HCI_OP_NOP)  		del_timer(&hdev->cmd_timer); -	if (ev->ncmd) { +	if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {  		atomic_set(&hdev->cmd_cnt, 1);  		if (!skb_queue_empty(&hdev->cmd_q))  			queue_work(hdev->workqueue, &hdev->cmd_work); diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c index b2bcbe2dc32..a7352ff3fd1 100644 --- a/net/bluetooth/hidp/core.c +++ b/net/bluetooth/hidp/core.c @@ -931,7 +931,7 @@ static int hidp_setup_hid(struct hidp_session *session,  	hid->version = req->version;  	hid->country = req->country; -	strncpy(hid->name, req->name, 128); +	strncpy(hid->name, req->name, sizeof(req->name) - 1);  	snprintf(hid->phys, sizeof(hid->phys), "%pMR",  		 &bt_sk(session->ctrl_sock->sk)->src); diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c index 2c78208d793..22e65832284 100644 --- a/net/bluetooth/l2cap_core.c +++ b/net/bluetooth/l2cap_core.c @@ -3727,6 +3727,17 @@ sendresp:  static int l2cap_connect_req(struct l2cap_conn *conn,  			     struct l2cap_cmd_hdr *cmd, u8 *data)  { +	struct hci_dev *hdev = conn->hcon->hdev; +	struct hci_conn *hcon = conn->hcon; + +	hci_dev_lock(hdev); +	if (test_bit(HCI_MGMT, &hdev->dev_flags) && +	    !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &hcon->flags)) +		mgmt_device_connected(hdev, &hcon->dst, hcon->type, +				      hcon->dst_type, 0, NULL, 0, +				      hcon->dev_class); +	hci_dev_unlock(hdev); +  	l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);  	return 0;  } diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c index 531a93d613d..57f250c20e3 100644 --- a/net/bluetooth/sco.c +++ b/net/bluetooth/sco.c @@ -352,7 +352,7 @@ static void __sco_sock_close(struct sock *sk)  	case BT_CONNECTED:  	case BT_CONFIG: -		if (sco_pi(sk)->conn) { +		if (sco_pi(sk)->conn->hcon) {  			sk->sk_state = BT_DISCONN;  			sco_sock_set_timer(sk, SCO_DISCONN_TIMEOUT);  			hci_conn_put(sco_pi(sk)->conn->hcon); diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c index 68a9587c969..5abefb12891 100644 --- a/net/bluetooth/smp.c +++ b/net/bluetooth/smp.c @@ -859,6 +859,19 @@ int smp_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)  	skb_pull(skb, sizeof(code)); +	/* +	 * The SMP context must be initialized for all other PDUs except +	 * pairing and security requests. If we get any other PDU when +	 * not initialized simply disconnect (done if this function +	 * returns an error). +	 */ +	if (code != SMP_CMD_PAIRING_REQ && code != SMP_CMD_SECURITY_REQ && +	    !conn->smp_chan) { +		BT_ERR("Unexpected SMP command 0x%02x. Disconnecting.", code); +		kfree_skb(skb); +		return -ENOTSUPP; +	} +  	switch (code) {  	case SMP_CMD_PAIRING_REQ:  		reason = smp_cmd_pairing_req(conn, skb); diff --git a/net/bridge/br_stp_bpdu.c b/net/bridge/br_stp_bpdu.c index 7f884e3fb95..8660ea3be70 100644 --- a/net/bridge/br_stp_bpdu.c +++ b/net/bridge/br_stp_bpdu.c @@ -16,6 +16,7 @@  #include <linux/etherdevice.h>  #include <linux/llc.h>  #include <linux/slab.h> +#include <linux/pkt_sched.h>  #include <net/net_namespace.h>  #include <net/llc.h>  #include <net/llc_pdu.h> @@ -40,6 +41,7 @@ static void br_send_bpdu(struct net_bridge_port *p,  	skb->dev = p->dev;  	skb->protocol = htons(ETH_P_802_2); +	skb->priority = TC_PRIO_CONTROL;  	skb_reserve(skb, LLC_RESERVE);  	memcpy(__skb_put(skb, length), data, length); diff --git a/net/core/datagram.c b/net/core/datagram.c index 0337e2b7686..368f9c3f9dc 100644 --- a/net/core/datagram.c +++ b/net/core/datagram.c @@ -187,7 +187,7 @@ struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned int flags,  		skb_queue_walk(queue, skb) {  			*peeked = skb->peeked;  			if (flags & MSG_PEEK) { -				if (*off >= skb->len) { +				if (*off >= skb->len && skb->len) {  					*off -= skb->len;  					continue;  				} diff --git a/net/core/pktgen.c b/net/core/pktgen.c index b29dacf900f..e6e1cbe863f 100644 --- a/net/core/pktgen.c +++ b/net/core/pktgen.c @@ -1781,10 +1781,13 @@ static ssize_t pktgen_thread_write(struct file *file,  			return -EFAULT;  		i += len;  		mutex_lock(&pktgen_thread_lock); -		pktgen_add_device(t, f); +		ret = pktgen_add_device(t, f);  		mutex_unlock(&pktgen_thread_lock); -		ret = count; -		sprintf(pg_result, "OK: add_device=%s", f); +		if (!ret) { +			ret = count; +			sprintf(pg_result, "OK: add_device=%s", f); +		} else +			sprintf(pg_result, "ERROR: can not add device %s", f);  		goto out;  	} diff --git a/net/core/request_sock.c b/net/core/request_sock.c index c31d9e8668c..4425148d2b5 100644 --- a/net/core/request_sock.c +++ b/net/core/request_sock.c @@ -186,8 +186,6 @@ void reqsk_fastopen_remove(struct sock *sk, struct request_sock *req,  	struct fastopen_queue *fastopenq =  	    inet_csk(lsk)->icsk_accept_queue.fastopenq; -	BUG_ON(!spin_is_locked(&sk->sk_lock.slock) && !sock_owned_by_user(sk)); -  	tcp_sk(sk)->fastopen_rsk = NULL;  	spin_lock_bh(&fastopenq->lock);  	fastopenq->qlen--; diff --git a/net/core/scm.c b/net/core/scm.c index 57fb1ee6649..905dcc6ad1e 100644 --- a/net/core/scm.c +++ b/net/core/scm.c @@ -35,6 +35,7 @@  #include <net/sock.h>  #include <net/compat.h>  #include <net/scm.h> +#include <net/cls_cgroup.h>  /* @@ -302,8 +303,10 @@ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)  		}  		/* Bump the usage count and install the file. */  		sock = sock_from_file(fp[i], &err); -		if (sock) +		if (sock) {  			sock_update_netprioidx(sock->sk, current); +			sock_update_classid(sock->sk, current); +		}  		fd_install(new_fd, get_file(fp[i]));  	} diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 3ab989b0de4..32443ebc3e8 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -683,7 +683,7 @@ static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)  	new->network_header	= old->network_header;  	new->mac_header		= old->mac_header;  	new->inner_transport_header = old->inner_transport_header; -	new->inner_network_header = old->inner_transport_header; +	new->inner_network_header = old->inner_network_header;  	skb_dst_copy(new, old);  	new->rxhash		= old->rxhash;  	new->ooo_okay		= old->ooo_okay; @@ -1649,7 +1649,7 @@ static void sock_spd_release(struct splice_pipe_desc *spd, unsigned int i)  static struct page *linear_to_page(struct page *page, unsigned int *len,  				   unsigned int *offset, -				   struct sk_buff *skb, struct sock *sk) +				   struct sock *sk)  {  	struct page_frag *pfrag = sk_page_frag(sk); @@ -1682,14 +1682,14 @@ static bool spd_can_coalesce(const struct splice_pipe_desc *spd,  static bool spd_fill_page(struct splice_pipe_desc *spd,  			  struct pipe_inode_info *pipe, struct page *page,  			  unsigned int *len, unsigned int offset, -			  struct sk_buff *skb, bool linear, +			  bool linear,  			  struct sock *sk)  {  	if (unlikely(spd->nr_pages == MAX_SKB_FRAGS))  		return true;  	if (linear) { -		page = linear_to_page(page, len, &offset, skb, sk); +		page = linear_to_page(page, len, &offset, sk);  		if (!page)  			return true;  	} @@ -1706,23 +1706,9 @@ static bool spd_fill_page(struct splice_pipe_desc *spd,  	return false;  } -static inline void __segment_seek(struct page **page, unsigned int *poff, -				  unsigned int *plen, unsigned int off) -{ -	unsigned long n; - -	*poff += off; -	n = *poff / PAGE_SIZE; -	if (n) -		*page = nth_page(*page, n); - -	*poff = *poff % PAGE_SIZE; -	*plen -= off; -} -  static bool __splice_segment(struct page *page, unsigned int poff,  			     unsigned int plen, unsigned int *off, -			     unsigned int *len, struct sk_buff *skb, +			     unsigned int *len,  			     struct splice_pipe_desc *spd, bool linear,  			     struct sock *sk,  			     struct pipe_inode_info *pipe) @@ -1737,23 +1723,19 @@ static bool __splice_segment(struct page *page, unsigned int poff,  	}  	/* ignore any bits we already processed */ -	if (*off) { -		__segment_seek(&page, &poff, &plen, *off); -		*off = 0; -	} +	poff += *off; +	plen -= *off; +	*off = 0;  	do {  		unsigned int flen = min(*len, plen); -		/* the linear region may spread across several pages  */ -		flen = min_t(unsigned int, flen, PAGE_SIZE - poff); - -		if (spd_fill_page(spd, pipe, page, &flen, poff, skb, linear, sk)) +		if (spd_fill_page(spd, pipe, page, &flen, poff, +				  linear, sk))  			return true; - -		__segment_seek(&page, &poff, &plen, flen); +		poff += flen; +		plen -= flen;  		*len -= flen; -  	} while (*len && plen);  	return false; @@ -1777,7 +1759,7 @@ static bool __skb_splice_bits(struct sk_buff *skb, struct pipe_inode_info *pipe,  	if (__splice_segment(virt_to_page(skb->data),  			     (unsigned long) skb->data & (PAGE_SIZE - 1),  			     skb_headlen(skb), -			     offset, len, skb, spd, +			     offset, len, spd,  			     skb_head_is_locked(skb),  			     sk, pipe))  		return true; @@ -1790,7 +1772,7 @@ static bool __skb_splice_bits(struct sk_buff *skb, struct pipe_inode_info *pipe,  		if (__splice_segment(skb_frag_page(f),  				     f->page_offset, skb_frag_size(f), -				     offset, len, skb, spd, false, sk, pipe)) +				     offset, len, spd, false, sk, pipe))  			return true;  	} diff --git a/net/ipv4/ah4.c b/net/ipv4/ah4.c index a0d8392491c..a69b4e4a02b 100644 --- a/net/ipv4/ah4.c +++ b/net/ipv4/ah4.c @@ -269,7 +269,11 @@ static void ah_input_done(struct crypto_async_request *base, int err)  	skb->network_header += ah_hlen;  	memcpy(skb_network_header(skb), work_iph, ihl);  	__skb_pull(skb, ah_hlen + ihl); -	skb_set_transport_header(skb, -ihl); + +	if (x->props.mode == XFRM_MODE_TUNNEL) +		skb_reset_transport_header(skb); +	else +		skb_set_transport_header(skb, -ihl);  out:  	kfree(AH_SKB_CB(skb)->tmp);  	xfrm_input_resume(skb, err); @@ -381,7 +385,10 @@ static int ah_input(struct xfrm_state *x, struct sk_buff *skb)  	skb->network_header += ah_hlen;  	memcpy(skb_network_header(skb), work_iph, ihl);  	__skb_pull(skb, ah_hlen + ihl); -	skb_set_transport_header(skb, -ihl); +	if (x->props.mode == XFRM_MODE_TUNNEL) +		skb_reset_transport_header(skb); +	else +		skb_set_transport_header(skb, -ihl);  	err = nexthdr; @@ -413,9 +420,12 @@ static void ah4_err(struct sk_buff *skb, u32 info)  	if (!x)  		return; -	if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH) +	if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH) { +		atomic_inc(&flow_cache_genid); +		rt_genid_bump(net); +  		ipv4_update_pmtu(skb, net, info, 0, 0, IPPROTO_AH, 0); -	else +	} else  		ipv4_redirect(skb, net, 0, 0, IPPROTO_AH, 0);  	xfrm_state_put(x);  } diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c index 9547a273b9e..ded146b217f 100644 --- a/net/ipv4/arp.c +++ b/net/ipv4/arp.c @@ -928,24 +928,25 @@ static void parp_redo(struct sk_buff *skb)  static int arp_rcv(struct sk_buff *skb, struct net_device *dev,  		   struct packet_type *pt, struct net_device *orig_dev)  { -	struct arphdr *arp; +	const struct arphdr *arp; + +	if (dev->flags & IFF_NOARP || +	    skb->pkt_type == PACKET_OTHERHOST || +	    skb->pkt_type == PACKET_LOOPBACK) +		goto freeskb; + +	skb = skb_share_check(skb, GFP_ATOMIC); +	if (!skb) +		goto out_of_mem;  	/* ARP header, plus 2 device addresses, plus 2 IP addresses.  */  	if (!pskb_may_pull(skb, arp_hdr_len(dev)))  		goto freeskb;  	arp = arp_hdr(skb); -	if (arp->ar_hln != dev->addr_len || -	    dev->flags & IFF_NOARP || -	    skb->pkt_type == PACKET_OTHERHOST || -	    skb->pkt_type == PACKET_LOOPBACK || -	    arp->ar_pln != 4) +	if (arp->ar_hln != dev->addr_len || arp->ar_pln != 4)  		goto freeskb; -	skb = skb_share_check(skb, GFP_ATOMIC); -	if (skb == NULL) -		goto out_of_mem; -  	memset(NEIGH_CB(skb), 0, sizeof(struct neighbour_cb));  	return NF_HOOK(NFPROTO_ARP, NF_ARP_IN, skb, dev, NULL, arp_process); diff --git a/net/ipv4/datagram.c b/net/ipv4/datagram.c index 424fafbc8cb..b28e863fe0a 100644 --- a/net/ipv4/datagram.c +++ b/net/ipv4/datagram.c @@ -85,3 +85,28 @@ out:  	return err;  }  EXPORT_SYMBOL(ip4_datagram_connect); + +void ip4_datagram_release_cb(struct sock *sk) +{ +	const struct inet_sock *inet = inet_sk(sk); +	const struct ip_options_rcu *inet_opt; +	__be32 daddr = inet->inet_daddr; +	struct flowi4 fl4; +	struct rtable *rt; + +	if (! __sk_dst_get(sk) || __sk_dst_check(sk, 0)) +		return; + +	rcu_read_lock(); +	inet_opt = rcu_dereference(inet->inet_opt); +	if (inet_opt && inet_opt->opt.srr) +		daddr = inet_opt->opt.faddr; +	rt = ip_route_output_ports(sock_net(sk), &fl4, sk, daddr, +				   inet->inet_saddr, inet->inet_dport, +				   inet->inet_sport, sk->sk_protocol, +				   RT_CONN_FLAGS(sk), sk->sk_bound_dev_if); +	if (!IS_ERR(rt)) +		__sk_dst_set(sk, &rt->dst); +	rcu_read_unlock(); +} +EXPORT_SYMBOL_GPL(ip4_datagram_release_cb); diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c index b61e9deb7c7..3b4f0cd2e63 100644 --- a/net/ipv4/esp4.c +++ b/net/ipv4/esp4.c @@ -346,7 +346,10 @@ static int esp_input_done2(struct sk_buff *skb, int err)  	pskb_trim(skb, skb->len - alen - padlen - 2);  	__skb_pull(skb, hlen); -	skb_set_transport_header(skb, -ihl); +	if (x->props.mode == XFRM_MODE_TUNNEL) +		skb_reset_transport_header(skb); +	else +		skb_set_transport_header(skb, -ihl);  	err = nexthdr[1]; @@ -499,9 +502,12 @@ static void esp4_err(struct sk_buff *skb, u32 info)  	if (!x)  		return; -	if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH) +	if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH) { +		atomic_inc(&flow_cache_genid); +		rt_genid_bump(net); +  		ipv4_update_pmtu(skb, net, info, 0, 0, IPPROTO_ESP, 0); -	else +	} else  		ipv4_redirect(skb, net, 0, 0, IPPROTO_ESP, 0);  	xfrm_state_put(x);  } diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index 303012adf9e..e81b1caf2ea 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c @@ -963,8 +963,12 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev  			ptr--;  		}  		if (tunnel->parms.o_flags&GRE_CSUM) { +			int offset = skb_transport_offset(skb); +  			*ptr = 0; -			*(__sum16 *)ptr = ip_compute_csum((void *)(iph+1), skb->len - sizeof(struct iphdr)); +			*(__sum16 *)ptr = csum_fold(skb_checksum(skb, offset, +								 skb->len - offset, +								 0));  		}  	} diff --git a/net/ipv4/ipcomp.c b/net/ipv4/ipcomp.c index d3ab47e19a8..9a46daed2f3 100644 --- a/net/ipv4/ipcomp.c +++ b/net/ipv4/ipcomp.c @@ -47,9 +47,12 @@ static void ipcomp4_err(struct sk_buff *skb, u32 info)  	if (!x)  		return; -	if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH) +	if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH) { +		atomic_inc(&flow_cache_genid); +		rt_genid_bump(net); +  		ipv4_update_pmtu(skb, net, info, 0, 0, IPPROTO_COMP, 0); -	else +	} else  		ipv4_redirect(skb, net, 0, 0, IPPROTO_COMP, 0);  	xfrm_state_put(x);  } diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c index 8f3d05424a3..6f9c07268cf 100644 --- a/net/ipv4/ping.c +++ b/net/ipv4/ping.c @@ -738,6 +738,7 @@ struct proto ping_prot = {  	.recvmsg =	ping_recvmsg,  	.bind =		ping_bind,  	.backlog_rcv =	ping_queue_rcv_skb, +	.release_cb =	ip4_datagram_release_cb,  	.hash =		ping_v4_hash,  	.unhash =	ping_v4_unhash,  	.get_port =	ping_v4_get_port, diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c index 73d1e4df4bf..6f08991409c 100644 --- a/net/ipv4/raw.c +++ b/net/ipv4/raw.c @@ -894,6 +894,7 @@ struct proto raw_prot = {  	.recvmsg	   = raw_recvmsg,  	.bind		   = raw_bind,  	.backlog_rcv	   = raw_rcv_skb, +	.release_cb	   = ip4_datagram_release_cb,  	.hash		   = raw_hash_sk,  	.unhash		   = raw_unhash_sk,  	.obj_size	   = sizeof(struct raw_sock), diff --git a/net/ipv4/route.c b/net/ipv4/route.c index 844a9ef60db..a0fcc47fee7 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -912,6 +912,9 @@ static void __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu)  	struct dst_entry *dst = &rt->dst;  	struct fib_result res; +	if (dst_metric_locked(dst, RTAX_MTU)) +		return; +  	if (dst->dev->mtu < mtu)  		return; @@ -962,7 +965,7 @@ void ipv4_update_pmtu(struct sk_buff *skb, struct net *net, u32 mtu,  }  EXPORT_SYMBOL_GPL(ipv4_update_pmtu); -void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu) +static void __ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)  {  	const struct iphdr *iph = (const struct iphdr *) skb->data;  	struct flowi4 fl4; @@ -975,6 +978,53 @@ void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)  		ip_rt_put(rt);  	}  } + +void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu) +{ +	const struct iphdr *iph = (const struct iphdr *) skb->data; +	struct flowi4 fl4; +	struct rtable *rt; +	struct dst_entry *dst; +	bool new = false; + +	bh_lock_sock(sk); +	rt = (struct rtable *) __sk_dst_get(sk); + +	if (sock_owned_by_user(sk) || !rt) { +		__ipv4_sk_update_pmtu(skb, sk, mtu); +		goto out; +	} + +	__build_flow_key(&fl4, sk, iph, 0, 0, 0, 0, 0); + +	if (!__sk_dst_check(sk, 0)) { +		rt = ip_route_output_flow(sock_net(sk), &fl4, sk); +		if (IS_ERR(rt)) +			goto out; + +		new = true; +	} + +	__ip_rt_update_pmtu((struct rtable *) rt->dst.path, &fl4, mtu); + +	dst = dst_check(&rt->dst, 0); +	if (!dst) { +		if (new) +			dst_release(&rt->dst); + +		rt = ip_route_output_flow(sock_net(sk), &fl4, sk); +		if (IS_ERR(rt)) +			goto out; + +		new = true; +	} + +	if (new) +		__sk_dst_set(sk, &rt->dst); + +out: +	bh_unlock_sock(sk); +}  EXPORT_SYMBOL_GPL(ipv4_sk_update_pmtu);  void ipv4_redirect(struct sk_buff *skb, struct net *net, @@ -1120,7 +1170,7 @@ static unsigned int ipv4_mtu(const struct dst_entry *dst)  	if (!mtu || time_after_eq(jiffies, rt->dst.expires))  		mtu = dst_metric_raw(dst, RTAX_MTU); -	if (mtu && rt_is_output_route(rt)) +	if (mtu)  		return mtu;  	mtu = dst->dev->mtu; diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c index 291f2ed7cc3..cdf2e707bb1 100644 --- a/net/ipv4/tcp_cong.c +++ b/net/ipv4/tcp_cong.c @@ -310,6 +310,12 @@ void tcp_slow_start(struct tcp_sock *tp)  {  	int cnt; /* increase in packets */  	unsigned int delta = 0; +	u32 snd_cwnd = tp->snd_cwnd; + +	if (unlikely(!snd_cwnd)) { +		pr_err_once("snd_cwnd is nul, please report this bug.\n"); +		snd_cwnd = 1U; +	}  	/* RFC3465: ABC Slow start  	 * Increase only after a full MSS of bytes is acked @@ -324,7 +330,7 @@ void tcp_slow_start(struct tcp_sock *tp)  	if (sysctl_tcp_max_ssthresh > 0 && tp->snd_cwnd > sysctl_tcp_max_ssthresh)  		cnt = sysctl_tcp_max_ssthresh >> 1;	/* limited slow start */  	else -		cnt = tp->snd_cwnd;			/* exponential increase */ +		cnt = snd_cwnd;				/* exponential increase */  	/* RFC3465: ABC  	 * We MAY increase by 2 if discovered delayed ack @@ -334,11 +340,11 @@ void tcp_slow_start(struct tcp_sock *tp)  	tp->bytes_acked = 0;  	tp->snd_cwnd_cnt += cnt; -	while (tp->snd_cwnd_cnt >= tp->snd_cwnd) { -		tp->snd_cwnd_cnt -= tp->snd_cwnd; +	while (tp->snd_cwnd_cnt >= snd_cwnd) { +		tp->snd_cwnd_cnt -= snd_cwnd;  		delta++;  	} -	tp->snd_cwnd = min(tp->snd_cwnd + delta, tp->snd_cwnd_clamp); +	tp->snd_cwnd = min(snd_cwnd + delta, tp->snd_cwnd_clamp);  }  EXPORT_SYMBOL_GPL(tcp_slow_start); diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 18f97ca76b0..ad70a962c20 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -3504,6 +3504,11 @@ static bool tcp_process_frto(struct sock *sk, int flag)  		}  	} else {  		if (!(flag & FLAG_DATA_ACKED) && (tp->frto_counter == 1)) { +			if (!tcp_packets_in_flight(tp)) { +				tcp_enter_frto_loss(sk, 2, flag); +				return true; +			} +  			/* Prevent sending of new data. */  			tp->snd_cwnd = min(tp->snd_cwnd,  					   tcp_packets_in_flight(tp)); @@ -5649,8 +5654,7 @@ static bool tcp_rcv_fastopen_synack(struct sock *sk, struct sk_buff *synack,  	 * the remote receives only the retransmitted (regular) SYNs: either  	 * the original SYN-data or the corresponding SYN-ACK is lost.  	 */ -	syn_drop = (cookie->len <= 0 && data && -		    inet_csk(sk)->icsk_retransmits); +	syn_drop = (cookie->len <= 0 && data && tp->total_retrans);  	tcp_fastopen_cache_set(sk, mss, cookie, syn_drop); diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 54139fa514e..eadb693eef5 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -369,11 +369,10 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)  	 * We do take care of PMTU discovery (RFC1191) special case :  	 * we can receive locally generated ICMP messages while socket is held.  	 */ -	if (sock_owned_by_user(sk) && -	    type != ICMP_DEST_UNREACH && -	    code != ICMP_FRAG_NEEDED) -		NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS); - +	if (sock_owned_by_user(sk)) { +		if (!(type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED)) +			NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS); +	}  	if (sk->sk_state == TCP_CLOSE)  		goto out; @@ -497,6 +496,7 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)  		 * errors returned from accept().  		 */  		inet_csk_reqsk_queue_drop(sk, req, prev); +		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);  		goto out;  	case TCP_SYN_SENT: @@ -1501,8 +1501,10 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)  	 * clogging syn queue with openreqs with exponentially increasing  	 * timeout.  	 */ -	if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1) +	if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1) { +		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);  		goto drop; +	}  	req = inet_reqsk_alloc(&tcp_request_sock_ops);  	if (!req) @@ -1667,6 +1669,7 @@ drop_and_release:  drop_and_free:  	reqsk_free(req);  drop: +	NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);  	return 0;  }  EXPORT_SYMBOL(tcp_v4_conn_request); diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 79c8dbe59b5..1f4d405eafb 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -1952,6 +1952,7 @@ struct proto udp_prot = {  	.recvmsg	   = udp_recvmsg,  	.sendpage	   = udp_sendpage,  	.backlog_rcv	   = __udp_queue_rcv_skb, +	.release_cb	   = ip4_datagram_release_cb,  	.hash		   = udp_lib_hash,  	.unhash		   = udp_lib_unhash,  	.rehash		   = udp_v4_rehash, diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 420e5632638..1b5d8cb9b12 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -1660,6 +1660,7 @@ static int addrconf_ifid_eui64(u8 *eui, struct net_device *dev)  	if (dev->addr_len != IEEE802154_ADDR_LEN)  		return -1;  	memcpy(eui, dev->dev_addr, 8); +	eui[0] ^= 2;  	return 0;  } diff --git a/net/ipv6/ah6.c b/net/ipv6/ah6.c index ecc35b93314..384233188ac 100644 --- a/net/ipv6/ah6.c +++ b/net/ipv6/ah6.c @@ -472,7 +472,10 @@ static void ah6_input_done(struct crypto_async_request *base, int err)  	skb->network_header += ah_hlen;  	memcpy(skb_network_header(skb), work_iph, hdr_len);  	__skb_pull(skb, ah_hlen + hdr_len); -	skb_set_transport_header(skb, -hdr_len); +	if (x->props.mode == XFRM_MODE_TUNNEL) +		skb_reset_transport_header(skb); +	else +		skb_set_transport_header(skb, -hdr_len);  out:  	kfree(AH_SKB_CB(skb)->tmp);  	xfrm_input_resume(skb, err); @@ -593,9 +596,13 @@ static int ah6_input(struct xfrm_state *x, struct sk_buff *skb)  	skb->network_header += ah_hlen;  	memcpy(skb_network_header(skb), work_iph, hdr_len); -	skb->transport_header = skb->network_header;  	__skb_pull(skb, ah_hlen + hdr_len); +	if (x->props.mode == XFRM_MODE_TUNNEL) +		skb_reset_transport_header(skb); +	else +		skb_set_transport_header(skb, -hdr_len); +  	err = nexthdr;  out_free: diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c index 8edf2601065..7a778b9a7b8 100644 --- a/net/ipv6/datagram.c +++ b/net/ipv6/datagram.c @@ -380,7 +380,7 @@ int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len)  		if (skb->protocol == htons(ETH_P_IPV6)) {  			sin->sin6_addr = ipv6_hdr(skb)->saddr;  			if (np->rxopt.all) -				datagram_recv_ctl(sk, msg, skb); +				ip6_datagram_recv_ctl(sk, msg, skb);  			if (ipv6_addr_type(&sin->sin6_addr) & IPV6_ADDR_LINKLOCAL)  				sin->sin6_scope_id = IP6CB(skb)->iif;  		} else { @@ -468,7 +468,8 @@ out:  } -int datagram_recv_ctl(struct sock *sk, struct msghdr *msg, struct sk_buff *skb) +int ip6_datagram_recv_ctl(struct sock *sk, struct msghdr *msg, +			  struct sk_buff *skb)  {  	struct ipv6_pinfo *np = inet6_sk(sk);  	struct inet6_skb_parm *opt = IP6CB(skb); @@ -597,11 +598,12 @@ int datagram_recv_ctl(struct sock *sk, struct msghdr *msg, struct sk_buff *skb)  	}  	return 0;  } +EXPORT_SYMBOL_GPL(ip6_datagram_recv_ctl); -int datagram_send_ctl(struct net *net, struct sock *sk, -		      struct msghdr *msg, struct flowi6 *fl6, -		      struct ipv6_txoptions *opt, -		      int *hlimit, int *tclass, int *dontfrag) +int ip6_datagram_send_ctl(struct net *net, struct sock *sk, +			  struct msghdr *msg, struct flowi6 *fl6, +			  struct ipv6_txoptions *opt, +			  int *hlimit, int *tclass, int *dontfrag)  {  	struct in6_pktinfo *src_info;  	struct cmsghdr *cmsg; @@ -871,4 +873,4 @@ int datagram_send_ctl(struct net *net, struct sock *sk,  exit_f:  	return err;  } -EXPORT_SYMBOL_GPL(datagram_send_ctl); +EXPORT_SYMBOL_GPL(ip6_datagram_send_ctl); diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c index 282f3723ee1..40ffd72243a 100644 --- a/net/ipv6/esp6.c +++ b/net/ipv6/esp6.c @@ -300,7 +300,10 @@ static int esp_input_done2(struct sk_buff *skb, int err)  	pskb_trim(skb, skb->len - alen - padlen - 2);  	__skb_pull(skb, hlen); -	skb_set_transport_header(skb, -hdr_len); +	if (x->props.mode == XFRM_MODE_TUNNEL) +		skb_reset_transport_header(skb); +	else +		skb_set_transport_header(skb, -hdr_len);  	err = nexthdr[1]; diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c index b4a9fd51dae..fff5bdd8b68 100644 --- a/net/ipv6/icmp.c +++ b/net/ipv6/icmp.c @@ -81,10 +81,22 @@ static inline struct sock *icmpv6_sk(struct net *net)  	return net->ipv6.icmp_sk[smp_processor_id()];  } +static void icmpv6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, +		       u8 type, u8 code, int offset, __be32 info) +{ +	struct net *net = dev_net(skb->dev); + +	if (type == ICMPV6_PKT_TOOBIG) +		ip6_update_pmtu(skb, net, info, 0, 0); +	else if (type == NDISC_REDIRECT) +		ip6_redirect(skb, net, 0, 0); +} +  static int icmpv6_rcv(struct sk_buff *skb);  static const struct inet6_protocol icmpv6_protocol = {  	.handler	=	icmpv6_rcv, +	.err_handler	=	icmpv6_err,  	.flags		=	INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,  }; diff --git a/net/ipv6/ip6_flowlabel.c b/net/ipv6/ip6_flowlabel.c index 29124b7a04c..d6de4b44725 100644 --- a/net/ipv6/ip6_flowlabel.c +++ b/net/ipv6/ip6_flowlabel.c @@ -365,8 +365,8 @@ fl_create(struct net *net, struct sock *sk, struct in6_flowlabel_req *freq,  		msg.msg_control = (void*)(fl->opt+1);  		memset(&flowi6, 0, sizeof(flowi6)); -		err = datagram_send_ctl(net, sk, &msg, &flowi6, fl->opt, &junk, -					&junk, &junk); +		err = ip6_datagram_send_ctl(net, sk, &msg, &flowi6, fl->opt, +					    &junk, &junk, &junk);  		if (err)  			goto done;  		err = -EINVAL; diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c index c727e471275..131dd097736 100644 --- a/net/ipv6/ip6_gre.c +++ b/net/ipv6/ip6_gre.c @@ -960,7 +960,7 @@ static netdev_tx_t ip6gre_tunnel_xmit(struct sk_buff *skb,  	int ret;  	if (!ip6_tnl_xmit_ctl(t)) -		return -1; +		goto tx_err;  	switch (skb->protocol) {  	case htons(ETH_P_IP): diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index 5552d13ae92..0c7c03d50dc 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c @@ -1213,10 +1213,10 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,  		if (dst_allfrag(rt->dst.path))  			cork->flags |= IPCORK_ALLFRAG;  		cork->length = 0; -		exthdrlen = (opt ? opt->opt_flen : 0) - rt->rt6i_nfheader_len; +		exthdrlen = (opt ? opt->opt_flen : 0);  		length += exthdrlen;  		transhdrlen += exthdrlen; -		dst_exthdrlen = rt->dst.header_len; +		dst_exthdrlen = rt->dst.header_len - rt->rt6i_nfheader_len;  	} else {  		rt = (struct rt6_info *)cork->dst;  		fl6 = &inet->cork.fl.u.ip6; diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c index 26dcdec9e3a..8fd154e5f07 100644 --- a/net/ipv6/ip6mr.c +++ b/net/ipv6/ip6mr.c @@ -1710,6 +1710,9 @@ int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, uns  			return -EINVAL;  		if (get_user(v, (u32 __user *)optval))  			return -EFAULT; +		/* "pim6reg%u" should not exceed 16 bytes (IFNAMSIZ) */ +		if (v != RT_TABLE_DEFAULT && v >= 100000000) +			return -EINVAL;  		if (sk == mrt->mroute6_sk)  			return -EBUSY; diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c index ee94d31c9d4..d1e2e8ef29c 100644 --- a/net/ipv6/ipv6_sockglue.c +++ b/net/ipv6/ipv6_sockglue.c @@ -476,8 +476,8 @@ sticky_done:  		msg.msg_controllen = optlen;  		msg.msg_control = (void*)(opt+1); -		retv = datagram_send_ctl(net, sk, &msg, &fl6, opt, &junk, &junk, -					 &junk); +		retv = ip6_datagram_send_ctl(net, sk, &msg, &fl6, opt, &junk, +					     &junk, &junk);  		if (retv)  			goto done;  update: @@ -1002,7 +1002,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,  		release_sock(sk);  		if (skb) { -			int err = datagram_recv_ctl(sk, &msg, skb); +			int err = ip6_datagram_recv_ctl(sk, &msg, skb);  			kfree_skb(skb);  			if (err)  				return err; diff --git a/net/ipv6/netfilter/ip6t_NPT.c b/net/ipv6/netfilter/ip6t_NPT.c index 7302b0b7b64..83acc1405a1 100644 --- a/net/ipv6/netfilter/ip6t_NPT.c +++ b/net/ipv6/netfilter/ip6t_NPT.c @@ -9,6 +9,7 @@  #include <linux/module.h>  #include <linux/skbuff.h>  #include <linux/ipv6.h> +#include <net/ipv6.h>  #include <linux/netfilter.h>  #include <linux/netfilter_ipv6.h>  #include <linux/netfilter_ipv6/ip6t_NPT.h> @@ -18,11 +19,20 @@ static int ip6t_npt_checkentry(const struct xt_tgchk_param *par)  {  	struct ip6t_npt_tginfo *npt = par->targinfo;  	__wsum src_sum = 0, dst_sum = 0; +	struct in6_addr pfx;  	unsigned int i;  	if (npt->src_pfx_len > 64 || npt->dst_pfx_len > 64)  		return -EINVAL; +	/* Ensure that LSB of prefix is zero */ +	ipv6_addr_prefix(&pfx, &npt->src_pfx.in6, npt->src_pfx_len); +	if (!ipv6_addr_equal(&pfx, &npt->src_pfx.in6)) +		return -EINVAL; +	ipv6_addr_prefix(&pfx, &npt->dst_pfx.in6, npt->dst_pfx_len); +	if (!ipv6_addr_equal(&pfx, &npt->dst_pfx.in6)) +		return -EINVAL; +  	for (i = 0; i < ARRAY_SIZE(npt->src_pfx.in6.s6_addr16); i++) {  		src_sum = csum_add(src_sum,  				(__force __wsum)npt->src_pfx.in6.s6_addr16[i]); @@ -30,7 +40,7 @@ static int ip6t_npt_checkentry(const struct xt_tgchk_param *par)  				(__force __wsum)npt->dst_pfx.in6.s6_addr16[i]);  	} -	npt->adjustment = (__force __sum16) csum_sub(src_sum, dst_sum); +	npt->adjustment = ~csum_fold(csum_sub(src_sum, dst_sum));  	return 0;  } @@ -51,7 +61,7 @@ static bool ip6t_npt_map_pfx(const struct ip6t_npt_tginfo *npt,  		idx = i / 32;  		addr->s6_addr32[idx] &= mask; -		addr->s6_addr32[idx] |= npt->dst_pfx.in6.s6_addr32[idx]; +		addr->s6_addr32[idx] |= ~mask & npt->dst_pfx.in6.s6_addr32[idx];  	}  	if (pfx_len <= 48) @@ -66,8 +76,8 @@ static bool ip6t_npt_map_pfx(const struct ip6t_npt_tginfo *npt,  			return false;  	} -	sum = (__force __sum16) csum_add((__force __wsum)addr->s6_addr16[idx], -			 npt->adjustment); +	sum = ~csum_fold(csum_add(csum_unfold((__force __sum16)addr->s6_addr16[idx]), +				  csum_unfold(npt->adjustment)));  	if (sum == CSUM_MANGLED_0)  		sum = 0;  	*(__force __sum16 *)&addr->s6_addr16[idx] = sum; diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c index 6cd29b1e8b9..70fa8144999 100644 --- a/net/ipv6/raw.c +++ b/net/ipv6/raw.c @@ -507,7 +507,7 @@ static int rawv6_recvmsg(struct kiocb *iocb, struct sock *sk,  	sock_recv_ts_and_drops(msg, sk, skb);  	if (np->rxopt.all) -		datagram_recv_ctl(sk, msg, skb); +		ip6_datagram_recv_ctl(sk, msg, skb);  	err = copied;  	if (flags & MSG_TRUNC) @@ -822,8 +822,8 @@ static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk,  		memset(opt, 0, sizeof(struct ipv6_txoptions));  		opt->tot_len = sizeof(struct ipv6_txoptions); -		err = datagram_send_ctl(sock_net(sk), sk, msg, &fl6, opt, -					&hlimit, &tclass, &dontfrag); +		err = ip6_datagram_send_ctl(sock_net(sk), sk, msg, &fl6, opt, +					    &hlimit, &tclass, &dontfrag);  		if (err < 0) {  			fl6_sock_release(flowlabel);  			return err; diff --git a/net/ipv6/route.c b/net/ipv6/route.c index e229a3bc345..363d8b7772e 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -928,7 +928,7 @@ restart:  	dst_hold(&rt->dst);  	read_unlock_bh(&table->tb6_lock); -	if (!rt->n && !(rt->rt6i_flags & RTF_NONEXTHOP)) +	if (!rt->n && !(rt->rt6i_flags & (RTF_NONEXTHOP | RTF_LOCAL)))  		nrt = rt6_alloc_cow(rt, &fl6->daddr, &fl6->saddr);  	else if (!(rt->dst.flags & DST_HOST))  		nrt = rt6_alloc_clone(rt, &fl6->daddr); diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 93825dd3a7c..4f43537197e 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -423,6 +423,7 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,  		}  		inet_csk_reqsk_queue_drop(sk, req, prev); +		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);  		goto out;  	case TCP_SYN_SENT: @@ -958,8 +959,10 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)  			goto drop;  	} -	if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1) +	if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1) { +		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);  		goto drop; +	}  	req = inet6_reqsk_alloc(&tcp6_request_sock_ops);  	if (req == NULL) @@ -1108,6 +1111,7 @@ drop_and_release:  drop_and_free:  	reqsk_free(req);  drop: +	NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);  	return 0; /* don't send reset */  } diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index dfaa29b8b29..fb083295ff0 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@ -443,7 +443,7 @@ try_again:  			ip_cmsg_recv(msg, skb);  	} else {  		if (np->rxopt.all) -			datagram_recv_ctl(sk, msg, skb); +			ip6_datagram_recv_ctl(sk, msg, skb);  	}  	err = copied; @@ -1153,8 +1153,8 @@ do_udp_sendmsg:  		memset(opt, 0, sizeof(struct ipv6_txoptions));  		opt->tot_len = sizeof(*opt); -		err = datagram_send_ctl(sock_net(sk), sk, msg, &fl6, opt, -					&hlimit, &tclass, &dontfrag); +		err = ip6_datagram_send_ctl(sock_net(sk), sk, msg, &fl6, opt, +					    &hlimit, &tclass, &dontfrag);  		if (err < 0) {  			fl6_sock_release(flowlabel);  			return err; diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c index 1a9f3723c13..2ac884d0e89 100644 --- a/net/l2tp/l2tp_core.c +++ b/net/l2tp/l2tp_core.c @@ -168,6 +168,51 @@ l2tp_session_id_hash_2(struct l2tp_net *pn, u32 session_id)  } +/* Lookup the tunnel socket, possibly involving the fs code if the socket is + * owned by userspace.  A struct sock returned from this function must be + * released using l2tp_tunnel_sock_put once you're done with it. + */ +struct sock *l2tp_tunnel_sock_lookup(struct l2tp_tunnel *tunnel) +{ +	int err = 0; +	struct socket *sock = NULL; +	struct sock *sk = NULL; + +	if (!tunnel) +		goto out; + +	if (tunnel->fd >= 0) { +		/* Socket is owned by userspace, who might be in the process +		 * of closing it.  Look the socket up using the fd to ensure +		 * consistency. +		 */ +		sock = sockfd_lookup(tunnel->fd, &err); +		if (sock) +			sk = sock->sk; +	} else { +		/* Socket is owned by kernelspace */ +		sk = tunnel->sock; +	} + +out: +	return sk; +} +EXPORT_SYMBOL_GPL(l2tp_tunnel_sock_lookup); + +/* Drop a reference to a tunnel socket obtained via. l2tp_tunnel_sock_put */ +void l2tp_tunnel_sock_put(struct sock *sk) +{ +	struct l2tp_tunnel *tunnel = l2tp_sock_to_tunnel(sk); +	if (tunnel) { +		if (tunnel->fd >= 0) { +			/* Socket is owned by userspace */ +			sockfd_put(sk->sk_socket); +		} +		sock_put(sk); +	} +} +EXPORT_SYMBOL_GPL(l2tp_tunnel_sock_put); +  /* Lookup a session by id in the global session list   */  static struct l2tp_session *l2tp_session_find_2(struct net *net, u32 session_id) @@ -1123,8 +1168,6 @@ int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len  	struct udphdr *uh;  	struct inet_sock *inet;  	__wsum csum; -	int old_headroom; -	int new_headroom;  	int headroom;  	int uhlen = (tunnel->encap == L2TP_ENCAPTYPE_UDP) ? sizeof(struct udphdr) : 0;  	int udp_len; @@ -1136,16 +1179,12 @@ int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len  	 */  	headroom = NET_SKB_PAD + sizeof(struct iphdr) +  		uhlen + hdr_len; -	old_headroom = skb_headroom(skb);  	if (skb_cow_head(skb, headroom)) {  		kfree_skb(skb);  		return NET_XMIT_DROP;  	} -	new_headroom = skb_headroom(skb);  	skb_orphan(skb); -	skb->truesize += new_headroom - old_headroom; -  	/* Setup L2TP header */  	session->build_header(session, __skb_push(skb, hdr_len)); @@ -1607,6 +1646,7 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32  	tunnel->old_sk_destruct = sk->sk_destruct;  	sk->sk_destruct = &l2tp_tunnel_destruct;  	tunnel->sock = sk; +	tunnel->fd = fd;  	lockdep_set_class_and_name(&sk->sk_lock.slock, &l2tp_socket_class, "l2tp_sock");  	sk->sk_allocation = GFP_ATOMIC; @@ -1642,24 +1682,32 @@ EXPORT_SYMBOL_GPL(l2tp_tunnel_create);   */  int l2tp_tunnel_delete(struct l2tp_tunnel *tunnel)  { -	int err = 0; -	struct socket *sock = tunnel->sock ? tunnel->sock->sk_socket : NULL; +	int err = -EBADF; +	struct socket *sock = NULL; +	struct sock *sk = NULL; + +	sk = l2tp_tunnel_sock_lookup(tunnel); +	if (!sk) +		goto out; + +	sock = sk->sk_socket; +	BUG_ON(!sock);  	/* Force the tunnel socket to close. This will eventually  	 * cause the tunnel to be deleted via the normal socket close  	 * mechanisms when userspace closes the tunnel socket.  	 */ -	if (sock != NULL) { -		err = inet_shutdown(sock, 2); +	err = inet_shutdown(sock, 2); -		/* If the tunnel's socket was created by the kernel, -		 * close the socket here since the socket was not -		 * created by userspace. -		 */ -		if (sock->file == NULL) -			err = inet_release(sock); -	} +	/* If the tunnel's socket was created by the kernel, +	 * close the socket here since the socket was not +	 * created by userspace. +	 */ +	if (sock->file == NULL) +		err = inet_release(sock); +	l2tp_tunnel_sock_put(sk); +out:  	return err;  }  EXPORT_SYMBOL_GPL(l2tp_tunnel_delete); diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h index 56d583e083a..e62204cad4f 100644 --- a/net/l2tp/l2tp_core.h +++ b/net/l2tp/l2tp_core.h @@ -188,7 +188,8 @@ struct l2tp_tunnel {  	int (*recv_payload_hook)(struct sk_buff *skb);  	void (*old_sk_destruct)(struct sock *);  	struct sock		*sock;		/* Parent socket */ -	int			fd; +	int			fd;		/* Parent fd, if tunnel socket +						 * was created by userspace */  	uint8_t			priv[0];	/* private data */  }; @@ -228,6 +229,8 @@ out:  	return tunnel;  } +extern struct sock *l2tp_tunnel_sock_lookup(struct l2tp_tunnel *tunnel); +extern void l2tp_tunnel_sock_put(struct sock *sk);  extern struct l2tp_session *l2tp_session_find(struct net *net, struct l2tp_tunnel *tunnel, u32 session_id);  extern struct l2tp_session *l2tp_session_find_nth(struct l2tp_tunnel *tunnel, int nth);  extern struct l2tp_session *l2tp_session_find_by_ifname(struct net *net, char *ifname); diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c index 927547171bc..8ee4a86ae99 100644 --- a/net/l2tp/l2tp_ip6.c +++ b/net/l2tp/l2tp_ip6.c @@ -554,8 +554,8 @@ static int l2tp_ip6_sendmsg(struct kiocb *iocb, struct sock *sk,  		memset(opt, 0, sizeof(struct ipv6_txoptions));  		opt->tot_len = sizeof(struct ipv6_txoptions); -		err = datagram_send_ctl(sock_net(sk), sk, msg, &fl6, opt, -					&hlimit, &tclass, &dontfrag); +		err = ip6_datagram_send_ctl(sock_net(sk), sk, msg, &fl6, opt, +					    &hlimit, &tclass, &dontfrag);  		if (err < 0) {  			fl6_sock_release(flowlabel);  			return err; @@ -646,7 +646,7 @@ static int l2tp_ip6_recvmsg(struct kiocb *iocb, struct sock *sk,  			    struct msghdr *msg, size_t len, int noblock,  			    int flags, int *addr_len)  { -	struct inet_sock *inet = inet_sk(sk); +	struct ipv6_pinfo *np = inet6_sk(sk);  	struct sockaddr_l2tpip6 *lsa = (struct sockaddr_l2tpip6 *)msg->msg_name;  	size_t copied = 0;  	int err = -EOPNOTSUPP; @@ -688,8 +688,8 @@ static int l2tp_ip6_recvmsg(struct kiocb *iocb, struct sock *sk,  			lsa->l2tp_scope_id = IP6CB(skb)->iif;  	} -	if (inet->cmsg_flags) -		ip_cmsg_recv(msg, skb); +	if (np->rxopt.all) +		ip6_datagram_recv_ctl(sk, msg, skb);  	if (flags & MSG_TRUNC)  		copied = skb->len; diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c index 286366ef893..716605c241f 100644 --- a/net/l2tp/l2tp_ppp.c +++ b/net/l2tp/l2tp_ppp.c @@ -388,8 +388,6 @@ static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb)  	struct l2tp_session *session;  	struct l2tp_tunnel *tunnel;  	struct pppol2tp_session *ps; -	int old_headroom; -	int new_headroom;  	int uhlen, headroom;  	if (sock_flag(sk, SOCK_DEAD) || !(sk->sk_state & PPPOX_CONNECTED)) @@ -408,7 +406,6 @@ static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb)  	if (tunnel == NULL)  		goto abort_put_sess; -	old_headroom = skb_headroom(skb);  	uhlen = (tunnel->encap == L2TP_ENCAPTYPE_UDP) ? sizeof(struct udphdr) : 0;  	headroom = NET_SKB_PAD +  		   sizeof(struct iphdr) + /* IP header */ @@ -418,9 +415,6 @@ static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb)  	if (skb_cow_head(skb, headroom))  		goto abort_put_sess_tun; -	new_headroom = skb_headroom(skb); -	skb->truesize += new_headroom - old_headroom; -  	/* Setup PPP header */  	__skb_push(skb, sizeof(ppph));  	skb->data[0] = ppph[0]; diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index 47e0aca614b..0479c64aa83 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c @@ -164,7 +164,17 @@ static int ieee80211_add_key(struct wiphy *wiphy, struct net_device *dev,  			sta = sta_info_get(sdata, mac_addr);  		else  			sta = sta_info_get_bss(sdata, mac_addr); -		if (!sta) { +		/* +		 * The ASSOC test makes sure the driver is ready to +		 * receive the key. When wpa_supplicant has roamed +		 * using FT, it attempts to set the key before +		 * association has completed, this rejects that attempt +		 * so it will set the key again after assocation. +		 * +		 * TODO: accept the key if we have a station entry and +		 *       add it to the device after the station. +		 */ +		if (!sta || !test_sta_flag(sta, WLAN_STA_ASSOC)) {  			ieee80211_key_free(sdata->local, key);  			err = -ENOENT;  			goto out_unlock; @@ -1994,7 +2004,8 @@ static int ieee80211_set_mcast_rate(struct wiphy *wiphy, struct net_device *dev,  {  	struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); -	memcpy(sdata->vif.bss_conf.mcast_rate, rate, sizeof(rate)); +	memcpy(sdata->vif.bss_conf.mcast_rate, rate, +	       sizeof(int) * IEEE80211_NUM_BANDS);  	return 0;  } diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index 8563b9a5cac..2ed065c0956 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h @@ -1358,10 +1358,8 @@ int ieee80211_request_sched_scan_stop(struct ieee80211_sub_if_data *sdata);  void ieee80211_sched_scan_stopped_work(struct work_struct *work);  /* off-channel helpers */ -void ieee80211_offchannel_stop_vifs(struct ieee80211_local *local, -				    bool offchannel_ps_enable); -void ieee80211_offchannel_return(struct ieee80211_local *local, -				 bool offchannel_ps_disable); +void ieee80211_offchannel_stop_vifs(struct ieee80211_local *local); +void ieee80211_offchannel_return(struct ieee80211_local *local);  void ieee80211_roc_setup(struct ieee80211_local *local);  void ieee80211_start_next_roc(struct ieee80211_local *local);  void ieee80211_roc_purge(struct ieee80211_sub_if_data *sdata); diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c index 47aeee2d8db..2659e428b80 100644 --- a/net/mac80211/mesh_hwmp.c +++ b/net/mac80211/mesh_hwmp.c @@ -215,6 +215,7 @@ static void prepare_frame_for_deferred_tx(struct ieee80211_sub_if_data *sdata,  	skb->priority = 7;  	info->control.vif = &sdata->vif; +	info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING;  	ieee80211_set_qos_hdr(sdata, skb);  } @@ -246,11 +247,13 @@ int mesh_path_error_tx(u8 ttl, u8 *target, __le32 target_sn,  		return -EAGAIN;  	skb = dev_alloc_skb(local->tx_headroom + +			    IEEE80211_ENCRYPT_HEADROOM + +			    IEEE80211_ENCRYPT_TAILROOM +  			    hdr_len +  			    2 + 15 /* PERR IE */);  	if (!skb)  		return -1; -	skb_reserve(skb, local->tx_headroom); +	skb_reserve(skb, local->tx_headroom + IEEE80211_ENCRYPT_HEADROOM);  	mgmt = (struct ieee80211_mgmt *) skb_put(skb, hdr_len);  	memset(mgmt, 0, hdr_len);  	mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index a3552929a21..5107248af7f 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -3400,6 +3400,7 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata,  	ret = 0; +out:  	while (!cfg80211_chandef_usable(sdata->local->hw.wiphy, chandef,  					IEEE80211_CHAN_DISABLED)) {  		if (WARN_ON(chandef->width == NL80211_CHAN_WIDTH_20_NOHT)) { @@ -3408,14 +3409,13 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata,  			goto out;  		} -		ret = chandef_downgrade(chandef); +		ret |= chandef_downgrade(chandef);  	}  	if (chandef->width != vht_chandef.width)  		sdata_info(sdata, -			   "local regulatory prevented using AP HT/VHT configuration, downgraded\n"); +			   "capabilities/regulatory prevented using AP HT/VHT configuration, downgraded\n"); -out:  	WARN_ON_ONCE(!cfg80211_chandef_valid(chandef));  	return ret;  } @@ -3529,8 +3529,11 @@ static int ieee80211_prep_channel(struct ieee80211_sub_if_data *sdata,  	 */  	ret = ieee80211_vif_use_channel(sdata, &chandef,  					IEEE80211_CHANCTX_SHARED); -	while (ret && chandef.width != NL80211_CHAN_WIDTH_20_NOHT) +	while (ret && chandef.width != NL80211_CHAN_WIDTH_20_NOHT) {  		ifmgd->flags |= chandef_downgrade(&chandef); +		ret = ieee80211_vif_use_channel(sdata, &chandef, +						IEEE80211_CHANCTX_SHARED); +	}  	return ret;  } diff --git a/net/mac80211/offchannel.c b/net/mac80211/offchannel.c index a5379aea7d0..a3ad4c3c80a 100644 --- a/net/mac80211/offchannel.c +++ b/net/mac80211/offchannel.c @@ -102,8 +102,7 @@ static void ieee80211_offchannel_ps_disable(struct ieee80211_sub_if_data *sdata)  	ieee80211_sta_reset_conn_monitor(sdata);  } -void ieee80211_offchannel_stop_vifs(struct ieee80211_local *local, -				    bool offchannel_ps_enable) +void ieee80211_offchannel_stop_vifs(struct ieee80211_local *local)  {  	struct ieee80211_sub_if_data *sdata; @@ -134,8 +133,7 @@ void ieee80211_offchannel_stop_vifs(struct ieee80211_local *local,  		if (sdata->vif.type != NL80211_IFTYPE_MONITOR) {  			netif_tx_stop_all_queues(sdata->dev); -			if (offchannel_ps_enable && -			    (sdata->vif.type == NL80211_IFTYPE_STATION) && +			if (sdata->vif.type == NL80211_IFTYPE_STATION &&  			    sdata->u.mgd.associated)  				ieee80211_offchannel_ps_enable(sdata);  		} @@ -143,8 +141,7 @@ void ieee80211_offchannel_stop_vifs(struct ieee80211_local *local,  	mutex_unlock(&local->iflist_mtx);  } -void ieee80211_offchannel_return(struct ieee80211_local *local, -				 bool offchannel_ps_disable) +void ieee80211_offchannel_return(struct ieee80211_local *local)  {  	struct ieee80211_sub_if_data *sdata; @@ -163,11 +160,9 @@ void ieee80211_offchannel_return(struct ieee80211_local *local,  			continue;  		/* Tell AP we're back */ -		if (offchannel_ps_disable && -		    sdata->vif.type == NL80211_IFTYPE_STATION) { -			if (sdata->u.mgd.associated) -				ieee80211_offchannel_ps_disable(sdata); -		} +		if (sdata->vif.type == NL80211_IFTYPE_STATION && +		    sdata->u.mgd.associated) +			ieee80211_offchannel_ps_disable(sdata);  		if (sdata->vif.type != NL80211_IFTYPE_MONITOR) {  			/* @@ -385,7 +380,7 @@ void ieee80211_sw_roc_work(struct work_struct *work)  			local->tmp_channel = NULL;  			ieee80211_hw_config(local, 0); -			ieee80211_offchannel_return(local, true); +			ieee80211_offchannel_return(local);  		}  		ieee80211_recalc_idle(local); diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c index d59fc6818b1..bf82e69d060 100644 --- a/net/mac80211/scan.c +++ b/net/mac80211/scan.c @@ -292,7 +292,7 @@ static void __ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted,  	if (!was_hw_scan) {  		ieee80211_configure_filter(local);  		drv_sw_scan_complete(local); -		ieee80211_offchannel_return(local, true); +		ieee80211_offchannel_return(local);  	}  	ieee80211_recalc_idle(local); @@ -341,7 +341,7 @@ static int ieee80211_start_sw_scan(struct ieee80211_local *local)  	local->next_scan_state = SCAN_DECISION;  	local->scan_channel_idx = 0; -	ieee80211_offchannel_stop_vifs(local, true); +	ieee80211_offchannel_stop_vifs(local);  	ieee80211_configure_filter(local); @@ -678,12 +678,8 @@ static void ieee80211_scan_state_suspend(struct ieee80211_local *local,  	local->scan_channel = NULL;  	ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL); -	/* -	 * Re-enable vifs and beaconing.  Leave PS -	 * in off-channel state..will put that back -	 * on-channel at the end of scanning. -	 */ -	ieee80211_offchannel_return(local, false); +	/* disable PS */ +	ieee80211_offchannel_return(local);  	*next_delay = HZ / 5;  	/* afterwards, resume scan & go to next channel */ @@ -693,8 +689,7 @@ static void ieee80211_scan_state_suspend(struct ieee80211_local *local,  static void ieee80211_scan_state_resume(struct ieee80211_local *local,  					unsigned long *next_delay)  { -	/* PS already is in off-channel mode */ -	ieee80211_offchannel_stop_vifs(local, false); +	ieee80211_offchannel_stop_vifs(local);  	if (local->ops->flush) {  		drv_flush(local, false); diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index e9eadc40c09..467c1d1b66f 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c @@ -1673,10 +1673,13 @@ netdev_tx_t ieee80211_monitor_start_xmit(struct sk_buff *skb,  			chanctx_conf =  				rcu_dereference(tmp_sdata->vif.chanctx_conf);  	} -	if (!chanctx_conf) -		goto fail_rcu; -	chan = chanctx_conf->def.chan; +	if (chanctx_conf) +		chan = chanctx_conf->def.chan; +	else if (!local->use_chanctx) +		chan = local->_oper_channel; +	else +		goto fail_rcu;  	/*  	 * Frame injection is not allowed if beaconing is not allowed diff --git a/net/netfilter/ipvs/ip_vs_proto_sctp.c b/net/netfilter/ipvs/ip_vs_proto_sctp.c index 746048b13ef..ae8ec6f2768 100644 --- a/net/netfilter/ipvs/ip_vs_proto_sctp.c +++ b/net/netfilter/ipvs/ip_vs_proto_sctp.c @@ -61,14 +61,27 @@ sctp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd,  	return 1;  } +static void sctp_nat_csum(struct sk_buff *skb, sctp_sctphdr_t *sctph, +			  unsigned int sctphoff) +{ +	__u32 crc32; +	struct sk_buff *iter; + +	crc32 = sctp_start_cksum((__u8 *)sctph, skb_headlen(skb) - sctphoff); +	skb_walk_frags(skb, iter) +		crc32 = sctp_update_cksum((u8 *) iter->data, +					  skb_headlen(iter), crc32); +	sctph->checksum = sctp_end_cksum(crc32); + +	skb->ip_summed = CHECKSUM_UNNECESSARY; +} +  static int  sctp_snat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp,  		  struct ip_vs_conn *cp, struct ip_vs_iphdr *iph)  {  	sctp_sctphdr_t *sctph;  	unsigned int sctphoff = iph->len; -	struct sk_buff *iter; -	__be32 crc32;  #ifdef CONFIG_IP_VS_IPV6  	if (cp->af == AF_INET6 && iph->fragoffs) @@ -92,13 +105,7 @@ sctp_snat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp,  	sctph = (void *) skb_network_header(skb) + sctphoff;  	sctph->source = cp->vport; -	/* Calculate the checksum */ -	crc32 = sctp_start_cksum((u8 *) sctph, skb_headlen(skb) - sctphoff); -	skb_walk_frags(skb, iter) -		crc32 = sctp_update_cksum((u8 *) iter->data, skb_headlen(iter), -				          crc32); -	crc32 = sctp_end_cksum(crc32); -	sctph->checksum = crc32; +	sctp_nat_csum(skb, sctph, sctphoff);  	return 1;  } @@ -109,8 +116,6 @@ sctp_dnat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp,  {  	sctp_sctphdr_t *sctph;  	unsigned int sctphoff = iph->len; -	struct sk_buff *iter; -	__be32 crc32;  #ifdef CONFIG_IP_VS_IPV6  	if (cp->af == AF_INET6 && iph->fragoffs) @@ -134,13 +139,7 @@ sctp_dnat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp,  	sctph = (void *) skb_network_header(skb) + sctphoff;  	sctph->dest = cp->dport; -	/* Calculate the checksum */ -	crc32 = sctp_start_cksum((u8 *) sctph, skb_headlen(skb) - sctphoff); -	skb_walk_frags(skb, iter) -		crc32 = sctp_update_cksum((u8 *) iter->data, skb_headlen(iter), -					  crc32); -	crc32 = sctp_end_cksum(crc32); -	sctph->checksum = crc32; +	sctp_nat_csum(skb, sctph, sctphoff);  	return 1;  } diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c index effa10c9e4e..44fd10c539a 100644 --- a/net/netfilter/ipvs/ip_vs_sync.c +++ b/net/netfilter/ipvs/ip_vs_sync.c @@ -1795,6 +1795,8 @@ int start_sync_thread(struct net *net, int state, char *mcast_ifn, __u8 syncid)  					     GFP_KERNEL);  			if (!tinfo->buf)  				goto outtinfo; +		} else { +			tinfo->buf = NULL;  		}  		tinfo->id = id; diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index 016d95ead93..e4a0c4fb3a7 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c @@ -1376,11 +1376,12 @@ void nf_conntrack_cleanup(struct net *net)  	synchronize_net();  	nf_conntrack_proto_fini(net);  	nf_conntrack_cleanup_net(net); +} -	if (net_eq(net, &init_net)) { -		RCU_INIT_POINTER(nf_ct_destroy, NULL); -		nf_conntrack_cleanup_init_net(); -	} +void nf_conntrack_cleanup_end(void) +{ +	RCU_INIT_POINTER(nf_ct_destroy, NULL); +	nf_conntrack_cleanup_init_net();  }  void *nf_ct_alloc_hashtable(unsigned int *sizep, int nulls) diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c index 363285d544a..e7185c68481 100644 --- a/net/netfilter/nf_conntrack_standalone.c +++ b/net/netfilter/nf_conntrack_standalone.c @@ -575,6 +575,7 @@ static int __init nf_conntrack_standalone_init(void)  static void __exit nf_conntrack_standalone_fini(void)  {  	unregister_pernet_subsys(&nf_conntrack_net_ops); +	nf_conntrack_cleanup_end();  }  module_init(nf_conntrack_standalone_init); diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c index 8d987c3573f..7b3a9e5999c 100644 --- a/net/netfilter/x_tables.c +++ b/net/netfilter/x_tables.c @@ -345,19 +345,27 @@ int xt_find_revision(u8 af, const char *name, u8 revision, int target,  }  EXPORT_SYMBOL_GPL(xt_find_revision); -static char *textify_hooks(char *buf, size_t size, unsigned int mask) +static char * +textify_hooks(char *buf, size_t size, unsigned int mask, uint8_t nfproto)  { -	static const char *const names[] = { +	static const char *const inetbr_names[] = {  		"PREROUTING", "INPUT", "FORWARD",  		"OUTPUT", "POSTROUTING", "BROUTING",  	}; -	unsigned int i; +	static const char *const arp_names[] = { +		"INPUT", "FORWARD", "OUTPUT", +	}; +	const char *const *names; +	unsigned int i, max;  	char *p = buf;  	bool np = false;  	int res; +	names = (nfproto == NFPROTO_ARP) ? arp_names : inetbr_names; +	max   = (nfproto == NFPROTO_ARP) ? ARRAY_SIZE(arp_names) : +	                                   ARRAY_SIZE(inetbr_names);  	*p = '\0'; -	for (i = 0; i < ARRAY_SIZE(names); ++i) { +	for (i = 0; i < max; ++i) {  		if (!(mask & (1 << i)))  			continue;  		res = snprintf(p, size, "%s%s", np ? "/" : "", names[i]); @@ -402,8 +410,10 @@ int xt_check_match(struct xt_mtchk_param *par,  		pr_err("%s_tables: %s match: used from hooks %s, but only "  		       "valid from %s\n",  		       xt_prefix[par->family], par->match->name, -		       textify_hooks(used, sizeof(used), par->hook_mask), -		       textify_hooks(allow, sizeof(allow), par->match->hooks)); +		       textify_hooks(used, sizeof(used), par->hook_mask, +		                     par->family), +		       textify_hooks(allow, sizeof(allow), par->match->hooks, +		                     par->family));  		return -EINVAL;  	}  	if (par->match->proto && (par->match->proto != proto || inv_proto)) { @@ -575,8 +585,10 @@ int xt_check_target(struct xt_tgchk_param *par,  		pr_err("%s_tables: %s target: used from hooks %s, but only "  		       "usable from %s\n",  		       xt_prefix[par->family], par->target->name, -		       textify_hooks(used, sizeof(used), par->hook_mask), -		       textify_hooks(allow, sizeof(allow), par->target->hooks)); +		       textify_hooks(used, sizeof(used), par->hook_mask, +		                     par->family), +		       textify_hooks(allow, sizeof(allow), par->target->hooks, +		                     par->family));  		return -EINVAL;  	}  	if (par->target->proto && (par->target->proto != proto || inv_proto)) { diff --git a/net/netfilter/xt_CT.c b/net/netfilter/xt_CT.c index 2a084308184..bde009ed8d3 100644 --- a/net/netfilter/xt_CT.c +++ b/net/netfilter/xt_CT.c @@ -109,7 +109,7 @@ static int xt_ct_tg_check_v0(const struct xt_tgchk_param *par)  	struct xt_ct_target_info *info = par->targinfo;  	struct nf_conntrack_tuple t;  	struct nf_conn *ct; -	int ret; +	int ret = -EOPNOTSUPP;  	if (info->flags & ~XT_CT_NOTRACK)  		return -EINVAL; @@ -247,7 +247,7 @@ static int xt_ct_tg_check_v1(const struct xt_tgchk_param *par)  	struct xt_ct_target_info_v1 *info = par->targinfo;  	struct nf_conntrack_tuple t;  	struct nf_conn *ct; -	int ret; +	int ret = -EOPNOTSUPP;  	if (info->flags & ~XT_CT_NOTRACK)  		return -EINVAL; diff --git a/net/openvswitch/vport-netdev.c b/net/openvswitch/vport-netdev.c index a9327e2e48c..670cbc3518d 100644 --- a/net/openvswitch/vport-netdev.c +++ b/net/openvswitch/vport-netdev.c @@ -35,10 +35,11 @@  /* Must be called with rcu_read_lock. */  static void netdev_port_receive(struct vport *vport, struct sk_buff *skb)  { -	if (unlikely(!vport)) { -		kfree_skb(skb); -		return; -	} +	if (unlikely(!vport)) +		goto error; + +	if (unlikely(skb_warn_if_lro(skb))) +		goto error;  	/* Make our own copy of the packet.  Otherwise we will mangle the  	 * packet for anyone who came before us (e.g. tcpdump via AF_PACKET). @@ -50,6 +51,10 @@ static void netdev_port_receive(struct vport *vport, struct sk_buff *skb)  	skb_push(skb, ETH_HLEN);  	ovs_vport_receive(vport, skb); +	return; + +error: +	kfree_skb(skb);  }  /* Called with rcu_read_lock and bottom-halves disabled. */ @@ -169,9 +174,6 @@ static int netdev_send(struct vport *vport, struct sk_buff *skb)  		goto error;  	} -	if (unlikely(skb_warn_if_lro(skb))) -		goto error; -  	skb->dev = netdev_vport->dev;  	len = skb->len;  	dev_queue_xmit(skb); diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index e639645e8fe..c111bd0e083 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -2361,13 +2361,15 @@ static int packet_release(struct socket *sock)  	packet_flush_mclist(sk); -	memset(&req_u, 0, sizeof(req_u)); - -	if (po->rx_ring.pg_vec) +	if (po->rx_ring.pg_vec) { +		memset(&req_u, 0, sizeof(req_u));  		packet_set_ring(sk, &req_u, 1, 0); +	} -	if (po->tx_ring.pg_vec) +	if (po->tx_ring.pg_vec) { +		memset(&req_u, 0, sizeof(req_u));  		packet_set_ring(sk, &req_u, 1, 1); +	}  	fanout_release(sk); diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c index 51561eafcb7..79e8ed4ac7c 100644 --- a/net/sched/sch_htb.c +++ b/net/sched/sch_htb.c @@ -1135,9 +1135,9 @@ static int htb_dump_class(struct Qdisc *sch, unsigned long arg,  	memset(&opt, 0, sizeof(opt));  	opt.rate.rate = cl->rate.rate_bps >> 3; -	opt.buffer = cl->buffer; +	opt.buffer = PSCHED_NS2TICKS(cl->buffer);  	opt.ceil.rate = cl->ceil.rate_bps >> 3; -	opt.cbuffer = cl->cbuffer; +	opt.cbuffer = PSCHED_NS2TICKS(cl->cbuffer);  	opt.quantum = cl->quantum;  	opt.prio = cl->prio;  	opt.level = cl->level; diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c index 298c0ddfb57..3d2acc7a9c8 100644 --- a/net/sched/sch_netem.c +++ b/net/sched/sch_netem.c @@ -438,18 +438,18 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)  		if (q->rate) {  			struct sk_buff_head *list = &sch->q; -			delay += packet_len_2_sched_time(skb->len, q); -  			if (!skb_queue_empty(list)) {  				/* -				 * Last packet in queue is reference point (now). -				 * First packet in queue is already in flight, -				 * calculate this time bonus and substract +				 * Last packet in queue is reference point (now), +				 * calculate this time bonus and subtract  				 * from delay.  				 */ -				delay -= now - netem_skb_cb(skb_peek(list))->time_to_send; +				delay -= netem_skb_cb(skb_peek_tail(list))->time_to_send - now; +				delay = max_t(psched_tdiff_t, 0, delay);  				now = netem_skb_cb(skb_peek_tail(list))->time_to_send;  			} + +			delay += packet_len_2_sched_time(skb->len, q);  		}  		cb->time_to_send = now + delay; diff --git a/net/sctp/Kconfig b/net/sctp/Kconfig index 7521d944c0f..cf4852814e0 100644 --- a/net/sctp/Kconfig +++ b/net/sctp/Kconfig @@ -3,8 +3,8 @@  #  menuconfig IP_SCTP -	tristate "The SCTP Protocol (EXPERIMENTAL)" -	depends on INET && EXPERIMENTAL +	tristate "The SCTP Protocol" +	depends on INET  	depends on IPV6 || IPV6=n  	select CRYPTO  	select CRYPTO_HMAC diff --git a/net/sctp/auth.c b/net/sctp/auth.c index 159b9bc5d63..d8420ae614d 100644 --- a/net/sctp/auth.c +++ b/net/sctp/auth.c @@ -71,7 +71,7 @@ void sctp_auth_key_put(struct sctp_auth_bytes *key)  		return;  	if (atomic_dec_and_test(&key->refcnt)) { -		kfree(key); +		kzfree(key);  		SCTP_DBG_OBJCNT_DEC(keys);  	}  } diff --git a/net/sctp/endpointola.c b/net/sctp/endpointola.c index 17a001bac2c..1a9c5fb7731 100644 --- a/net/sctp/endpointola.c +++ b/net/sctp/endpointola.c @@ -249,6 +249,8 @@ void sctp_endpoint_free(struct sctp_endpoint *ep)  /* Final destructor for endpoint.  */  static void sctp_endpoint_destroy(struct sctp_endpoint *ep)  { +	int i; +  	SCTP_ASSERT(ep->base.dead, "Endpoint is not dead", return);  	/* Free up the HMAC transform. */ @@ -271,6 +273,9 @@ static void sctp_endpoint_destroy(struct sctp_endpoint *ep)  	sctp_inq_free(&ep->base.inqueue);  	sctp_bind_addr_free(&ep->base.bind_addr); +	for (i = 0; i < SCTP_HOW_MANY_SECRETS; ++i) +		memset(&ep->secret_key[i], 0, SCTP_SECRET_SIZE); +  	/* Remove and free the port */  	if (sctp_sk(ep->base.sk)->bind_hash)  		sctp_put_port(ep->base.sk); diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c index f3f0f4dc31d..391a245d520 100644 --- a/net/sctp/ipv6.c +++ b/net/sctp/ipv6.c @@ -326,9 +326,10 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr,  	 */  	rcu_read_lock();  	list_for_each_entry_rcu(laddr, &bp->address_list, list) { -		if (!laddr->valid && laddr->state != SCTP_ADDR_SRC) +		if (!laddr->valid)  			continue; -		if ((laddr->a.sa.sa_family == AF_INET6) && +		if ((laddr->state == SCTP_ADDR_SRC) && +		    (laddr->a.sa.sa_family == AF_INET6) &&  		    (scope <= sctp_scope(&laddr->a))) {  			bmatchlen = sctp_v6_addr_match_len(daddr, &laddr->a);  			if (!baddr || (matchlen < bmatchlen)) { diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c index 379c81dee9d..9bcdbd02d77 100644 --- a/net/sctp/outqueue.c +++ b/net/sctp/outqueue.c @@ -224,7 +224,7 @@ void sctp_outq_init(struct sctp_association *asoc, struct sctp_outq *q)  /* Free the outqueue structure and any related pending chunks.   */ -void sctp_outq_teardown(struct sctp_outq *q) +static void __sctp_outq_teardown(struct sctp_outq *q)  {  	struct sctp_transport *transport;  	struct list_head *lchunk, *temp; @@ -277,8 +277,6 @@ void sctp_outq_teardown(struct sctp_outq *q)  		sctp_chunk_free(chunk);  	} -	q->error = 0; -  	/* Throw away any leftover control chunks. */  	list_for_each_entry_safe(chunk, tmp, &q->control_chunk_list, list) {  		list_del_init(&chunk->list); @@ -286,11 +284,17 @@ void sctp_outq_teardown(struct sctp_outq *q)  	}  } +void sctp_outq_teardown(struct sctp_outq *q) +{ +	__sctp_outq_teardown(q); +	sctp_outq_init(q->asoc, q); +} +  /* Free the outqueue structure and any related pending chunks.  */  void sctp_outq_free(struct sctp_outq *q)  {  	/* Throw away leftover chunks. */ -	sctp_outq_teardown(q); +	__sctp_outq_teardown(q);  	/* If we were kmalloc()'d, free the memory.  */  	if (q->malloced) diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c index 618ec7e216c..5131fcfedb0 100644 --- a/net/sctp/sm_statefuns.c +++ b/net/sctp/sm_statefuns.c @@ -1779,8 +1779,10 @@ static sctp_disposition_t sctp_sf_do_dupcook_a(struct net *net,  	/* Update the content of current association. */  	sctp_add_cmd_sf(commands, SCTP_CMD_UPDATE_ASSOC, SCTP_ASOC(new_asoc)); -	sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl));  	sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, SCTP_ULPEVENT(ev)); +	sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, +			SCTP_STATE(SCTP_STATE_ESTABLISHED)); +	sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl));  	return SCTP_DISPOSITION_CONSUME;  nomem_ev: diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 9e65758cb03..cedd9bf67b8 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -3390,7 +3390,7 @@ static int sctp_setsockopt_auth_key(struct sock *sk,  	ret = sctp_auth_set_key(sctp_sk(sk)->ep, asoc, authkey);  out: -	kfree(authkey); +	kzfree(authkey);  	return ret;  } diff --git a/net/sctp/sysctl.c b/net/sctp/sysctl.c index 043889ac86c..bf3c6e8fc40 100644 --- a/net/sctp/sysctl.c +++ b/net/sctp/sysctl.c @@ -366,7 +366,11 @@ int sctp_sysctl_net_register(struct net *net)  void sctp_sysctl_net_unregister(struct net *net)  { +	struct ctl_table *table; + +	table = net->sctp.sysctl_header->ctl_table_arg;  	unregister_net_sysctl_table(net->sctp.sysctl_header); +	kfree(table);  }  static struct ctl_table_header * sctp_sysctl_header; diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c index bfa31714581..fb20f25ddec 100644 --- a/net/sunrpc/sched.c +++ b/net/sunrpc/sched.c @@ -98,9 +98,25 @@ __rpc_add_timer(struct rpc_wait_queue *queue, struct rpc_task *task)  	list_add(&task->u.tk_wait.timer_list, &queue->timer_list.list);  } +static void rpc_rotate_queue_owner(struct rpc_wait_queue *queue) +{ +	struct list_head *q = &queue->tasks[queue->priority]; +	struct rpc_task *task; + +	if (!list_empty(q)) { +		task = list_first_entry(q, struct rpc_task, u.tk_wait.list); +		if (task->tk_owner == queue->owner) +			list_move_tail(&task->u.tk_wait.list, q); +	} +} +  static void rpc_set_waitqueue_priority(struct rpc_wait_queue *queue, int priority)  { -	queue->priority = priority; +	if (queue->priority != priority) { +		/* Fairness: rotate the list when changing priority */ +		rpc_rotate_queue_owner(queue); +		queue->priority = priority; +	}  }  static void rpc_set_waitqueue_owner(struct rpc_wait_queue *queue, pid_t pid) diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c index 0a148c9d2a5..0f679df7d07 100644 --- a/net/sunrpc/svcsock.c +++ b/net/sunrpc/svcsock.c @@ -465,7 +465,7 @@ static int svc_udp_get_dest_address4(struct svc_rqst *rqstp,  }  /* - * See net/ipv6/datagram.c : datagram_recv_ctl + * See net/ipv6/datagram.c : ip6_datagram_recv_ctl   */  static int svc_udp_get_dest_address6(struct svc_rqst *rqstp,  				     struct cmsghdr *cmh) diff --git a/net/wireless/scan.c b/net/wireless/scan.c index 01592d7d478..45f1618c8e2 100644 --- a/net/wireless/scan.c +++ b/net/wireless/scan.c @@ -1358,7 +1358,7 @@ ieee80211_bss(struct wiphy *wiphy, struct iw_request_info *info,  						  &iwe, IW_EV_UINT_LEN);  	} -	buf = kmalloc(30, GFP_ATOMIC); +	buf = kmalloc(31, GFP_ATOMIC);  	if (buf) {  		memset(&iwe, 0, sizeof(iwe));  		iwe.cmd = IWEVCUSTOM; diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index 41eabc46f11..07c585756d2 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c @@ -2656,7 +2656,7 @@ static void xfrm_policy_fini(struct net *net)  		WARN_ON(!hlist_empty(&net->xfrm.policy_inexact[dir]));  		htab = &net->xfrm.policy_bydst[dir]; -		sz = (htab->hmask + 1); +		sz = (htab->hmask + 1) * sizeof(struct hlist_head);  		WARN_ON(!hlist_empty(htab->table));  		xfrm_hash_free(htab->table, sz);  	} diff --git a/net/xfrm/xfrm_replay.c b/net/xfrm/xfrm_replay.c index 765f6fe951e..35754cc8a9e 100644 --- a/net/xfrm/xfrm_replay.c +++ b/net/xfrm/xfrm_replay.c @@ -242,11 +242,13 @@ static void xfrm_replay_advance_bmp(struct xfrm_state *x, __be32 net_seq)  	u32 diff;  	struct xfrm_replay_state_esn *replay_esn = x->replay_esn;  	u32 seq = ntohl(net_seq); -	u32 pos = (replay_esn->seq - 1) % replay_esn->replay_window; +	u32 pos;  	if (!replay_esn->replay_window)  		return; +	pos = (replay_esn->seq - 1) % replay_esn->replay_window; +  	if (seq > replay_esn->seq) {  		diff = seq - replay_esn->seq; diff --git a/samples/seccomp/Makefile b/samples/seccomp/Makefile index bbbd276659b..7203e66dcd6 100644 --- a/samples/seccomp/Makefile +++ b/samples/seccomp/Makefile @@ -19,6 +19,7 @@ bpf-direct-objs := bpf-direct.o  # Try to match the kernel target.  ifndef CONFIG_64BIT +ifndef CROSS_COMPILE  # s390 has -m31 flag to build 31 bit binaries  ifndef CONFIG_S390 @@ -35,6 +36,7 @@ HOSTLOADLIBES_bpf-direct += $(MFLAG)  HOSTLOADLIBES_bpf-fancy += $(MFLAG)  HOSTLOADLIBES_dropper += $(MFLAG)  endif +endif  # Tell kbuild to always build the programs  always := $(hostprogs-y) diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl index 4d2c7dfdaab..2bb08a962ce 100755 --- a/scripts/checkpatch.pl +++ b/scripts/checkpatch.pl @@ -230,12 +230,12 @@ our $Inline	= qr{inline|__always_inline|noinline};  our $Member	= qr{->$Ident|\.$Ident|\[[^]]*\]};  our $Lval	= qr{$Ident(?:$Member)*}; -our $Float_hex	= qr{(?i:0x[0-9a-f]+p-?[0-9]+[fl]?)}; -our $Float_dec	= qr{(?i:((?:[0-9]+\.[0-9]*|[0-9]*\.[0-9]+)(?:e-?[0-9]+)?[fl]?))}; -our $Float_int	= qr{(?i:[0-9]+e-?[0-9]+[fl]?)}; +our $Float_hex	= qr{(?i)0x[0-9a-f]+p-?[0-9]+[fl]?}; +our $Float_dec	= qr{(?i)(?:[0-9]+\.[0-9]*|[0-9]*\.[0-9]+)(?:e-?[0-9]+)?[fl]?}; +our $Float_int	= qr{(?i)[0-9]+e-?[0-9]+[fl]?};  our $Float	= qr{$Float_hex|$Float_dec|$Float_int}; -our $Constant	= qr{(?:$Float|(?i:(?:0x[0-9a-f]+|[0-9]+)[ul]*))}; -our $Assignment	= qr{(?:\*\=|/=|%=|\+=|-=|<<=|>>=|&=|\^=|\|=|=)}; +our $Constant	= qr{$Float|(?i)(?:0x[0-9a-f]+|[0-9]+)[ul]*}; +our $Assignment	= qr{\*\=|/=|%=|\+=|-=|<<=|>>=|&=|\^=|\|=|=};  our $Compare    = qr{<=|>=|==|!=|<|>};  our $Operators	= qr{  			<=|>=|==|!=| diff --git a/security/capability.c b/security/capability.c index 0fe5a026aef..57977508896 100644 --- a/security/capability.c +++ b/security/capability.c @@ -709,16 +709,31 @@ static void cap_req_classify_flow(const struct request_sock *req,  {  } +static int cap_tun_dev_alloc_security(void **security) +{ +	return 0; +} + +static void cap_tun_dev_free_security(void *security) +{ +} +  static int cap_tun_dev_create(void)  {  	return 0;  } -static void cap_tun_dev_post_create(struct sock *sk) +static int cap_tun_dev_attach_queue(void *security) +{ +	return 0; +} + +static int cap_tun_dev_attach(struct sock *sk, void *security)  { +	return 0;  } -static int cap_tun_dev_attach(struct sock *sk) +static int cap_tun_dev_open(void *security)  {  	return 0;  } @@ -1050,8 +1065,11 @@ void __init security_fixup_ops(struct security_operations *ops)  	set_to_cap_if_null(ops, secmark_refcount_inc);  	set_to_cap_if_null(ops, secmark_refcount_dec);  	set_to_cap_if_null(ops, req_classify_flow); +	set_to_cap_if_null(ops, tun_dev_alloc_security); +	set_to_cap_if_null(ops, tun_dev_free_security);  	set_to_cap_if_null(ops, tun_dev_create); -	set_to_cap_if_null(ops, tun_dev_post_create); +	set_to_cap_if_null(ops, tun_dev_open); +	set_to_cap_if_null(ops, tun_dev_attach_queue);  	set_to_cap_if_null(ops, tun_dev_attach);  #endif	/* CONFIG_SECURITY_NETWORK */  #ifdef CONFIG_SECURITY_NETWORK_XFRM diff --git a/security/device_cgroup.c b/security/device_cgroup.c index 19ecc8de9e6..d794abcc4b3 100644 --- a/security/device_cgroup.c +++ b/security/device_cgroup.c @@ -215,7 +215,9 @@ static void devcgroup_css_free(struct cgroup *cgroup)  	struct dev_cgroup *dev_cgroup;  	dev_cgroup = cgroup_to_devcgroup(cgroup); +	mutex_lock(&devcgroup_mutex);  	dev_exception_clean(dev_cgroup); +	mutex_unlock(&devcgroup_mutex);  	kfree(dev_cgroup);  } diff --git a/security/integrity/evm/evm_crypto.c b/security/integrity/evm/evm_crypto.c index dfb26918699..7dd538ef5b8 100644 --- a/security/integrity/evm/evm_crypto.c +++ b/security/integrity/evm/evm_crypto.c @@ -205,9 +205,9 @@ int evm_update_evmxattr(struct dentry *dentry, const char *xattr_name,  		rc = __vfs_setxattr_noperm(dentry, XATTR_NAME_EVM,  					   &xattr_data,  					   sizeof(xattr_data), 0); -	} -	else if (rc == -ENODATA) +	} else if (rc == -ENODATA && inode->i_op->removexattr) {  		rc = inode->i_op->removexattr(dentry, XATTR_NAME_EVM); +	}  	return rc;  } diff --git a/security/security.c b/security/security.c index daa97f4ac9d..7b88c6aeaed 100644 --- a/security/security.c +++ b/security/security.c @@ -1254,24 +1254,42 @@ void security_secmark_refcount_dec(void)  }  EXPORT_SYMBOL(security_secmark_refcount_dec); +int security_tun_dev_alloc_security(void **security) +{ +	return security_ops->tun_dev_alloc_security(security); +} +EXPORT_SYMBOL(security_tun_dev_alloc_security); + +void security_tun_dev_free_security(void *security) +{ +	security_ops->tun_dev_free_security(security); +} +EXPORT_SYMBOL(security_tun_dev_free_security); +  int security_tun_dev_create(void)  {  	return security_ops->tun_dev_create();  }  EXPORT_SYMBOL(security_tun_dev_create); -void security_tun_dev_post_create(struct sock *sk) +int security_tun_dev_attach_queue(void *security)  { -	return security_ops->tun_dev_post_create(sk); +	return security_ops->tun_dev_attach_queue(security);  } -EXPORT_SYMBOL(security_tun_dev_post_create); +EXPORT_SYMBOL(security_tun_dev_attach_queue); -int security_tun_dev_attach(struct sock *sk) +int security_tun_dev_attach(struct sock *sk, void *security)  { -	return security_ops->tun_dev_attach(sk); +	return security_ops->tun_dev_attach(sk, security);  }  EXPORT_SYMBOL(security_tun_dev_attach); +int security_tun_dev_open(void *security) +{ +	return security_ops->tun_dev_open(security); +} +EXPORT_SYMBOL(security_tun_dev_open); +  #endif	/* CONFIG_SECURITY_NETWORK */  #ifdef CONFIG_SECURITY_NETWORK_XFRM diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c index 61a53367d02..ef26e9611ff 100644 --- a/security/selinux/hooks.c +++ b/security/selinux/hooks.c @@ -4399,6 +4399,24 @@ static void selinux_req_classify_flow(const struct request_sock *req,  	fl->flowi_secid = req->secid;  } +static int selinux_tun_dev_alloc_security(void **security) +{ +	struct tun_security_struct *tunsec; + +	tunsec = kzalloc(sizeof(*tunsec), GFP_KERNEL); +	if (!tunsec) +		return -ENOMEM; +	tunsec->sid = current_sid(); + +	*security = tunsec; +	return 0; +} + +static void selinux_tun_dev_free_security(void *security) +{ +	kfree(security); +} +  static int selinux_tun_dev_create(void)  {  	u32 sid = current_sid(); @@ -4414,8 +4432,17 @@ static int selinux_tun_dev_create(void)  			    NULL);  } -static void selinux_tun_dev_post_create(struct sock *sk) +static int selinux_tun_dev_attach_queue(void *security)  { +	struct tun_security_struct *tunsec = security; + +	return avc_has_perm(current_sid(), tunsec->sid, SECCLASS_TUN_SOCKET, +			    TUN_SOCKET__ATTACH_QUEUE, NULL); +} + +static int selinux_tun_dev_attach(struct sock *sk, void *security) +{ +	struct tun_security_struct *tunsec = security;  	struct sk_security_struct *sksec = sk->sk_security;  	/* we don't currently perform any NetLabel based labeling here and it @@ -4425,20 +4452,19 @@ static void selinux_tun_dev_post_create(struct sock *sk)  	 * cause confusion to the TUN user that had no idea network labeling  	 * protocols were being used */ -	/* see the comments in selinux_tun_dev_create() about why we don't use -	 * the sockcreate SID here */ - -	sksec->sid = current_sid(); +	sksec->sid = tunsec->sid;  	sksec->sclass = SECCLASS_TUN_SOCKET; + +	return 0;  } -static int selinux_tun_dev_attach(struct sock *sk) +static int selinux_tun_dev_open(void *security)  { -	struct sk_security_struct *sksec = sk->sk_security; +	struct tun_security_struct *tunsec = security;  	u32 sid = current_sid();  	int err; -	err = avc_has_perm(sid, sksec->sid, SECCLASS_TUN_SOCKET, +	err = avc_has_perm(sid, tunsec->sid, SECCLASS_TUN_SOCKET,  			   TUN_SOCKET__RELABELFROM, NULL);  	if (err)  		return err; @@ -4446,8 +4472,7 @@ static int selinux_tun_dev_attach(struct sock *sk)  			   TUN_SOCKET__RELABELTO, NULL);  	if (err)  		return err; - -	sksec->sid = sid; +	tunsec->sid = sid;  	return 0;  } @@ -5642,9 +5667,12 @@ static struct security_operations selinux_ops = {  	.secmark_refcount_inc =		selinux_secmark_refcount_inc,  	.secmark_refcount_dec =		selinux_secmark_refcount_dec,  	.req_classify_flow =		selinux_req_classify_flow, +	.tun_dev_alloc_security =	selinux_tun_dev_alloc_security, +	.tun_dev_free_security =	selinux_tun_dev_free_security,  	.tun_dev_create =		selinux_tun_dev_create, -	.tun_dev_post_create = 		selinux_tun_dev_post_create, +	.tun_dev_attach_queue =		selinux_tun_dev_attach_queue,  	.tun_dev_attach =		selinux_tun_dev_attach, +	.tun_dev_open =			selinux_tun_dev_open,  #ifdef CONFIG_SECURITY_NETWORK_XFRM  	.xfrm_policy_alloc_security =	selinux_xfrm_policy_alloc, diff --git a/security/selinux/include/classmap.h b/security/selinux/include/classmap.h index df2de54a958..14d04e63b1f 100644 --- a/security/selinux/include/classmap.h +++ b/security/selinux/include/classmap.h @@ -150,6 +150,6 @@ struct security_class_mapping secclass_map[] = {  	    NULL } },  	{ "kernel_service", { "use_as_override", "create_files_as", NULL } },  	{ "tun_socket", -	  { COMMON_SOCK_PERMS, NULL } }, +	  { COMMON_SOCK_PERMS, "attach_queue", NULL } },  	{ NULL }    }; diff --git a/security/selinux/include/objsec.h b/security/selinux/include/objsec.h index 26c7eee1c30..aa47bcabb5f 100644 --- a/security/selinux/include/objsec.h +++ b/security/selinux/include/objsec.h @@ -110,6 +110,10 @@ struct sk_security_struct {  	u16 sclass;			/* sock security class */  }; +struct tun_security_struct { +	u32 sid;			/* SID for the tun device sockets */ +}; +  struct key_security_struct {  	u32 sid;	/* SID of key */  }; diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c index b8fb0a5adb9..822df971972 100644 --- a/sound/pci/hda/hda_codec.c +++ b/sound/pci/hda/hda_codec.c @@ -3654,6 +3654,7 @@ static void hda_call_codec_resume(struct hda_codec *codec)  	hda_set_power_state(codec, AC_PWRST_D0);  	restore_shutup_pins(codec);  	hda_exec_init_verbs(codec); +	snd_hda_jack_set_dirty_all(codec);  	if (codec->patch_ops.resume)  		codec->patch_ops.resume(codec);  	else { @@ -3665,10 +3666,8 @@ static void hda_call_codec_resume(struct hda_codec *codec)  	if (codec->jackpoll_interval)  		hda_jackpoll_work(&codec->jackpoll_work.work); -	else { -		snd_hda_jack_set_dirty_all(codec); +	else  		snd_hda_jack_report_sync(codec); -	}  	codec->in_pm = 0;  	snd_hda_power_down(codec); /* flag down before returning */ diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index 0b6aebacc56..c78286f6e5d 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c @@ -656,29 +656,43 @@ static char *driver_short_names[] = {  #define get_azx_dev(substream) (substream->runtime->private_data)  #ifdef CONFIG_X86 -static void __mark_pages_wc(struct azx *chip, void *addr, size_t size, bool on) +static void __mark_pages_wc(struct azx *chip, struct snd_dma_buffer *dmab, bool on)  { +	int pages; +  	if (azx_snoop(chip))  		return; -	if (addr && size) { -		int pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; +	if (!dmab || !dmab->area || !dmab->bytes) +		return; + +#ifdef CONFIG_SND_DMA_SGBUF +	if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_SG) { +		struct snd_sg_buf *sgbuf = dmab->private_data;  		if (on) -			set_memory_wc((unsigned long)addr, pages); +			set_pages_array_wc(sgbuf->page_table, sgbuf->pages);  		else -			set_memory_wb((unsigned long)addr, pages); +			set_pages_array_wb(sgbuf->page_table, sgbuf->pages); +		return;  	} +#endif + +	pages = (dmab->bytes + PAGE_SIZE - 1) >> PAGE_SHIFT; +	if (on) +		set_memory_wc((unsigned long)dmab->area, pages); +	else +		set_memory_wb((unsigned long)dmab->area, pages);  }  static inline void mark_pages_wc(struct azx *chip, struct snd_dma_buffer *buf,  				 bool on)  { -	__mark_pages_wc(chip, buf->area, buf->bytes, on); +	__mark_pages_wc(chip, buf, on);  }  static inline void mark_runtime_wc(struct azx *chip, struct azx_dev *azx_dev, -				   struct snd_pcm_runtime *runtime, bool on) +				   struct snd_pcm_substream *substream, bool on)  {  	if (azx_dev->wc_marked != on) { -		__mark_pages_wc(chip, runtime->dma_area, runtime->dma_bytes, on); +		__mark_pages_wc(chip, snd_pcm_get_dma_buf(substream), on);  		azx_dev->wc_marked = on;  	}  } @@ -689,7 +703,7 @@ static inline void mark_pages_wc(struct azx *chip, struct snd_dma_buffer *buf,  {  }  static inline void mark_runtime_wc(struct azx *chip, struct azx_dev *azx_dev, -				   struct snd_pcm_runtime *runtime, bool on) +				   struct snd_pcm_substream *substream, bool on)  {  }  #endif @@ -1968,11 +1982,10 @@ static int azx_pcm_hw_params(struct snd_pcm_substream *substream,  {  	struct azx_pcm *apcm = snd_pcm_substream_chip(substream);  	struct azx *chip = apcm->chip; -	struct snd_pcm_runtime *runtime = substream->runtime;  	struct azx_dev *azx_dev = get_azx_dev(substream);  	int ret; -	mark_runtime_wc(chip, azx_dev, runtime, false); +	mark_runtime_wc(chip, azx_dev, substream, false);  	azx_dev->bufsize = 0;  	azx_dev->period_bytes = 0;  	azx_dev->format_val = 0; @@ -1980,7 +1993,7 @@ static int azx_pcm_hw_params(struct snd_pcm_substream *substream,  					params_buffer_bytes(hw_params));  	if (ret < 0)  		return ret; -	mark_runtime_wc(chip, azx_dev, runtime, true); +	mark_runtime_wc(chip, azx_dev, substream, true);  	return ret;  } @@ -1989,7 +2002,6 @@ static int azx_pcm_hw_free(struct snd_pcm_substream *substream)  	struct azx_pcm *apcm = snd_pcm_substream_chip(substream);  	struct azx_dev *azx_dev = get_azx_dev(substream);  	struct azx *chip = apcm->chip; -	struct snd_pcm_runtime *runtime = substream->runtime;  	struct hda_pcm_stream *hinfo = apcm->hinfo[substream->stream];  	/* reset BDL address */ @@ -2002,7 +2014,7 @@ static int azx_pcm_hw_free(struct snd_pcm_substream *substream)  	snd_hda_codec_cleanup(apcm->codec, hinfo, substream); -	mark_runtime_wc(chip, azx_dev, runtime, false); +	mark_runtime_wc(chip, azx_dev, substream, false);  	return snd_pcm_lib_free_pages(substream);  } @@ -3613,13 +3625,12 @@ static DEFINE_PCI_DEVICE_TABLE(azx_ids) = {  	/* 5 Series/3400 */  	{ PCI_DEVICE(0x8086, 0x3b56),  	  .driver_data = AZX_DRIVER_SCH | AZX_DCAPS_INTEL_PCH }, -	/* SCH */ +	/* Poulsbo */  	{ PCI_DEVICE(0x8086, 0x811b), -	  .driver_data = AZX_DRIVER_SCH | AZX_DCAPS_SCH_SNOOP | -	  AZX_DCAPS_BUFSIZE | AZX_DCAPS_POSFIX_LPIB }, /* Poulsbo */ +	  .driver_data = AZX_DRIVER_SCH | AZX_DCAPS_INTEL_PCH_NOPM }, +	/* Oaktrail */  	{ PCI_DEVICE(0x8086, 0x080a), -	  .driver_data = AZX_DRIVER_SCH | AZX_DCAPS_SCH_SNOOP | -	  AZX_DCAPS_BUFSIZE | AZX_DCAPS_POSFIX_LPIB }, /* Oaktrail */ +	  .driver_data = AZX_DRIVER_SCH | AZX_DCAPS_INTEL_PCH_NOPM },  	/* ICH */  	{ PCI_DEVICE(0x8086, 0x2668),  	  .driver_data = AZX_DRIVER_ICH | AZX_DCAPS_OLD_SSYNC | diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c index dd798c3196f..009b77a693c 100644 --- a/sound/pci/hda/patch_conexant.c +++ b/sound/pci/hda/patch_conexant.c @@ -4636,6 +4636,12 @@ static const struct hda_codec_preset snd_hda_preset_conexant[] = {  	  .patch = patch_conexant_auto },  	{ .id = 0x14f15111, .name = "CX20753/4",  	  .patch = patch_conexant_auto }, +	{ .id = 0x14f15113, .name = "CX20755", +	  .patch = patch_conexant_auto }, +	{ .id = 0x14f15114, .name = "CX20756", +	  .patch = patch_conexant_auto }, +	{ .id = 0x14f15115, .name = "CX20757", +	  .patch = patch_conexant_auto },  	{} /* terminator */  }; @@ -4659,6 +4665,9 @@ MODULE_ALIAS("snd-hda-codec-id:14f150b9");  MODULE_ALIAS("snd-hda-codec-id:14f1510f");  MODULE_ALIAS("snd-hda-codec-id:14f15110");  MODULE_ALIAS("snd-hda-codec-id:14f15111"); +MODULE_ALIAS("snd-hda-codec-id:14f15113"); +MODULE_ALIAS("snd-hda-codec-id:14f15114"); +MODULE_ALIAS("snd-hda-codec-id:14f15115");  MODULE_LICENSE("GPL");  MODULE_DESCRIPTION("Conexant HD-audio codec"); diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index f5196277b6e..5faaad219a7 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -4694,6 +4694,7 @@ static const struct snd_pci_quirk alc880_fixup_tbl[] = {  	SND_PCI_QUIRK(0x1584, 0x9077, "Uniwill P53", ALC880_FIXUP_VOL_KNOB),  	SND_PCI_QUIRK(0x161f, 0x203d, "W810", ALC880_FIXUP_W810),  	SND_PCI_QUIRK(0x161f, 0x205d, "Medion Rim 2150", ALC880_FIXUP_MEDION_RIM), +	SND_PCI_QUIRK(0x1631, 0xe011, "PB 13201056", ALC880_FIXUP_6ST),  	SND_PCI_QUIRK(0x1734, 0x107c, "FSC F1734", ALC880_FIXUP_F1734),  	SND_PCI_QUIRK(0x1734, 0x1094, "FSC Amilo M1451G", ALC880_FIXUP_FUJITSU),  	SND_PCI_QUIRK(0x1734, 0x10ac, "FSC AMILO Xi 1526", ALC880_FIXUP_F1734), @@ -5708,6 +5709,7 @@ static const struct alc_model_fixup alc268_fixup_models[] = {  };  static const struct snd_pci_quirk alc268_fixup_tbl[] = { +	SND_PCI_QUIRK(0x1025, 0x015b, "Acer AOA 150 (ZG5)", ALC268_FIXUP_INV_DMIC),  	/* below is codec SSID since multiple Toshiba laptops have the  	 * same PCI SSID 1179:ff00  	 */ @@ -6251,6 +6253,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {  	SND_PCI_QUIRK(0x1025, 0x0349, "Acer AOD260", ALC269_FIXUP_INV_DMIC),  	SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_MIC2_MUTE_LED),  	SND_PCI_QUIRK(0x103c, 0x1972, "HP Pavilion 17", ALC269_FIXUP_MIC1_MUTE_LED), +	SND_PCI_QUIRK(0x103c, 0x1977, "HP Pavilion 14", ALC269_FIXUP_MIC1_MUTE_LED),  	SND_PCI_QUIRK(0x1043, 0x1427, "Asus Zenbook UX31E", ALC269VB_FIXUP_DMIC),  	SND_PCI_QUIRK(0x1043, 0x1517, "Asus Zenbook UX31A", ALC269VB_FIXUP_DMIC),  	SND_PCI_QUIRK(0x1043, 0x1a13, "Asus G73Jw", ALC269_FIXUP_ASUS_G73JW), @@ -6265,6 +6268,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {  	SND_PCI_QUIRK(0x104d, 0x9084, "Sony VAIO", ALC275_FIXUP_SONY_HWEQ),  	SND_PCI_QUIRK_VENDOR(0x104d, "Sony VAIO", ALC269_FIXUP_SONY_VAIO),  	SND_PCI_QUIRK(0x1028, 0x0470, "Dell M101z", ALC269_FIXUP_DELL_M101Z), +	SND_PCI_QUIRK(0x1025, 0x0740, "Acer AO725", ALC271_FIXUP_HP_GATE_MIC_JACK),  	SND_PCI_QUIRK(0x1025, 0x0742, "Acer AO756", ALC271_FIXUP_HP_GATE_MIC_JACK),  	SND_PCI_QUIRK_VENDOR(0x1025, "Acer Aspire", ALC271_FIXUP_DMIC),  	SND_PCI_QUIRK(0x10cf, 0x1475, "Lifebook", ALC269_FIXUP_LIFEBOOK), diff --git a/sound/soc/codecs/arizona.c b/sound/soc/codecs/arizona.c index 1d8bb591759..ef62c435848 100644 --- a/sound/soc/codecs/arizona.c +++ b/sound/soc/codecs/arizona.c @@ -685,7 +685,7 @@ static int arizona_hw_params(struct snd_pcm_substream *substream,  	}  	sr_val = i; -	lrclk = snd_soc_params_to_bclk(params) / params_rate(params); +	lrclk = rates[bclk] / params_rate(params);  	arizona_aif_dbg(dai, "BCLK %dHz LRCLK %dHz\n",  			rates[bclk], rates[bclk] / lrclk); @@ -1082,6 +1082,9 @@ int arizona_init_fll(struct arizona *arizona, int id, int base, int lock_irq,  			id, ret);  	} +	regmap_update_bits(arizona->regmap, fll->base + 1, +			   ARIZONA_FLL1_FREERUN, 0); +  	return 0;  }  EXPORT_SYMBOL_GPL(arizona_init_fll); diff --git a/sound/soc/codecs/wm2200.c b/sound/soc/codecs/wm2200.c index e6cefe1ac67..d8c65f57465 100644 --- a/sound/soc/codecs/wm2200.c +++ b/sound/soc/codecs/wm2200.c @@ -1019,8 +1019,6 @@ static const char *wm2200_mixer_texts[] = {  	"EQR",  	"LHPF1",  	"LHPF2", -	"LHPF3", -	"LHPF4",  	"DSP1.1",  	"DSP1.2",  	"DSP1.3", @@ -1053,7 +1051,6 @@ static int wm2200_mixer_values[] = {  	0x25,  	0x50,   /* EQ */  	0x51, -	0x52,  	0x60,   /* LHPF1 */  	0x61,   /* LHPF2 */  	0x68,   /* DSP1 */ diff --git a/sound/soc/codecs/wm5102.c b/sound/soc/codecs/wm5102.c index 7a9048dad1c..1440b3f9b7b 100644 --- a/sound/soc/codecs/wm5102.c +++ b/sound/soc/codecs/wm5102.c @@ -896,8 +896,7 @@ static const unsigned int wm5102_aec_loopback_values[] = {  static const struct soc_enum wm5102_aec_loopback =  	SOC_VALUE_ENUM_SINGLE(ARIZONA_DAC_AEC_CONTROL_1, -			      ARIZONA_AEC_LOOPBACK_SRC_SHIFT, -			      ARIZONA_AEC_LOOPBACK_SRC_MASK, +			      ARIZONA_AEC_LOOPBACK_SRC_SHIFT, 0xf,  			      ARRAY_SIZE(wm5102_aec_loopback_texts),  			      wm5102_aec_loopback_texts,  			      wm5102_aec_loopback_values); diff --git a/sound/soc/codecs/wm5110.c b/sound/soc/codecs/wm5110.c index ae80c8c2853..7a090968c4f 100644 --- a/sound/soc/codecs/wm5110.c +++ b/sound/soc/codecs/wm5110.c @@ -344,8 +344,7 @@ static const unsigned int wm5110_aec_loopback_values[] = {  static const struct soc_enum wm5110_aec_loopback =  	SOC_VALUE_ENUM_SINGLE(ARIZONA_DAC_AEC_CONTROL_1, -			      ARIZONA_AEC_LOOPBACK_SRC_SHIFT, -			      ARIZONA_AEC_LOOPBACK_SRC_MASK, +			      ARIZONA_AEC_LOOPBACK_SRC_SHIFT, 0xf,  			      ARRAY_SIZE(wm5110_aec_loopback_texts),  			      wm5110_aec_loopback_texts,  			      wm5110_aec_loopback_values); diff --git a/sound/soc/codecs/wm_adsp.c b/sound/soc/codecs/wm_adsp.c index 7b198c38f3e..b6b65483758 100644 --- a/sound/soc/codecs/wm_adsp.c +++ b/sound/soc/codecs/wm_adsp.c @@ -324,7 +324,7 @@ static int wm_adsp_load(struct wm_adsp *dsp)  		if (reg) {  			buf = kmemdup(region->data, le32_to_cpu(region->len), -				      GFP_KERNEL); +				      GFP_KERNEL | GFP_DMA);  			if (!buf) {  				adsp_err(dsp, "Out of memory\n");  				return -ENOMEM; @@ -396,7 +396,7 @@ static int wm_adsp_load_coeff(struct wm_adsp *dsp)  	hdr = (void*)&firmware->data[0];  	if (memcmp(hdr->magic, "WMDR", 4) != 0) {  		adsp_err(dsp, "%s: invalid magic\n", file); -		return -EINVAL; +		goto out_fw;  	}  	adsp_dbg(dsp, "%s: v%d.%d.%d\n", file, @@ -439,7 +439,7 @@ static int wm_adsp_load_coeff(struct wm_adsp *dsp)  		if (reg) {  			buf = kmemdup(blk->data, le32_to_cpu(blk->len), -				      GFP_KERNEL); +				      GFP_KERNEL | GFP_DMA);  			if (!buf) {  				adsp_err(dsp, "Out of memory\n");  				return -ENOMEM; diff --git a/sound/soc/fsl/imx-pcm-dma.c b/sound/soc/fsl/imx-pcm-dma.c index bf363d8d044..500f8ce55d7 100644 --- a/sound/soc/fsl/imx-pcm-dma.c +++ b/sound/soc/fsl/imx-pcm-dma.c @@ -154,26 +154,7 @@ static struct snd_soc_platform_driver imx_soc_platform_mx2 = {  	.pcm_free	= imx_pcm_free,  }; -static int imx_soc_platform_probe(struct platform_device *pdev) +int imx_pcm_dma_init(struct platform_device *pdev)  {  	return snd_soc_register_platform(&pdev->dev, &imx_soc_platform_mx2);  } - -static int imx_soc_platform_remove(struct platform_device *pdev) -{ -	snd_soc_unregister_platform(&pdev->dev); -	return 0; -} - -static struct platform_driver imx_pcm_driver = { -	.driver = { -			.name = "imx-pcm-audio", -			.owner = THIS_MODULE, -	}, -	.probe = imx_soc_platform_probe, -	.remove = imx_soc_platform_remove, -}; - -module_platform_driver(imx_pcm_driver); -MODULE_LICENSE("GPL"); -MODULE_ALIAS("platform:imx-pcm-audio"); diff --git a/sound/soc/fsl/imx-pcm-fiq.c b/sound/soc/fsl/imx-pcm-fiq.c index 5ec362ae4d0..920f945cb2f 100644 --- a/sound/soc/fsl/imx-pcm-fiq.c +++ b/sound/soc/fsl/imx-pcm-fiq.c @@ -281,7 +281,7 @@ static struct snd_soc_platform_driver imx_soc_platform_fiq = {  	.pcm_free	= imx_pcm_fiq_free,  }; -static int imx_soc_platform_probe(struct platform_device *pdev) +int imx_pcm_fiq_init(struct platform_device *pdev)  {  	struct imx_ssi *ssi = platform_get_drvdata(pdev);  	int ret; @@ -314,23 +314,3 @@ failed_register:  	return ret;  } - -static int imx_soc_platform_remove(struct platform_device *pdev) -{ -	snd_soc_unregister_platform(&pdev->dev); -	return 0; -} - -static struct platform_driver imx_pcm_driver = { -	.driver = { -			.name = "imx-fiq-pcm-audio", -			.owner = THIS_MODULE, -	}, - -	.probe = imx_soc_platform_probe, -	.remove = imx_soc_platform_remove, -}; - -module_platform_driver(imx_pcm_driver); - -MODULE_LICENSE("GPL"); diff --git a/sound/soc/fsl/imx-pcm.c b/sound/soc/fsl/imx-pcm.c index d5cd9eff3b4..0d0625bfcb6 100644 --- a/sound/soc/fsl/imx-pcm.c +++ b/sound/soc/fsl/imx-pcm.c @@ -104,6 +104,38 @@ void imx_pcm_free(struct snd_pcm *pcm)  }  EXPORT_SYMBOL_GPL(imx_pcm_free); +static int imx_pcm_probe(struct platform_device *pdev) +{ +	if (strcmp(pdev->id_entry->name, "imx-fiq-pcm-audio") == 0) +		return imx_pcm_fiq_init(pdev); + +	return imx_pcm_dma_init(pdev); +} + +static int imx_pcm_remove(struct platform_device *pdev) +{ +	snd_soc_unregister_platform(&pdev->dev); +	return 0; +} + +static struct platform_device_id imx_pcm_devtype[] = { +	{ .name = "imx-pcm-audio", }, +	{ .name = "imx-fiq-pcm-audio", }, +	{ /* sentinel */ } +}; +MODULE_DEVICE_TABLE(platform, imx_pcm_devtype); + +static struct platform_driver imx_pcm_driver = { +	.driver = { +			.name = "imx-pcm", +			.owner = THIS_MODULE, +	}, +	.id_table = imx_pcm_devtype, +	.probe = imx_pcm_probe, +	.remove = imx_pcm_remove, +}; +module_platform_driver(imx_pcm_driver); +  MODULE_DESCRIPTION("Freescale i.MX PCM driver");  MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de>");  MODULE_LICENSE("GPL"); diff --git a/sound/soc/fsl/imx-pcm.h b/sound/soc/fsl/imx-pcm.h index 83c0ed7d55c..5ae13a13a35 100644 --- a/sound/soc/fsl/imx-pcm.h +++ b/sound/soc/fsl/imx-pcm.h @@ -30,4 +30,22 @@ int snd_imx_pcm_mmap(struct snd_pcm_substream *substream,  int imx_pcm_new(struct snd_soc_pcm_runtime *rtd);  void imx_pcm_free(struct snd_pcm *pcm); +#ifdef CONFIG_SND_SOC_IMX_PCM_DMA +int imx_pcm_dma_init(struct platform_device *pdev); +#else +static inline int imx_pcm_dma_init(struct platform_device *pdev) +{ +	return -ENODEV; +} +#endif + +#ifdef CONFIG_SND_SOC_IMX_PCM_FIQ +int imx_pcm_fiq_init(struct platform_device *pdev); +#else +static inline int imx_pcm_fiq_init(struct platform_device *pdev) +{ +	return -ENODEV; +} +#endif +  #endif /* _IMX_PCM_H */ diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c index 1e36bc81e5a..258acadb9e7 100644 --- a/sound/soc/soc-dapm.c +++ b/sound/soc/soc-dapm.c @@ -1023,7 +1023,7 @@ int dapm_regulator_event(struct snd_soc_dapm_widget *w,  	if (SND_SOC_DAPM_EVENT_ON(event)) {  		if (w->invert & SND_SOC_DAPM_REGULATOR_BYPASS) { -			ret = regulator_allow_bypass(w->regulator, true); +			ret = regulator_allow_bypass(w->regulator, false);  			if (ret != 0)  				dev_warn(w->dapm->dev,  					 "ASoC: Failed to bypass %s: %d\n", @@ -1033,7 +1033,7 @@ int dapm_regulator_event(struct snd_soc_dapm_widget *w,  		return regulator_enable(w->regulator);  	} else {  		if (w->invert & SND_SOC_DAPM_REGULATOR_BYPASS) { -			ret = regulator_allow_bypass(w->regulator, false); +			ret = regulator_allow_bypass(w->regulator, true);  			if (ret != 0)  				dev_warn(w->dapm->dev,  					 "ASoC: Failed to unbypass %s: %d\n", @@ -3039,6 +3039,14 @@ snd_soc_dapm_new_control(struct snd_soc_dapm_context *dapm,  				w->name, ret);  			return NULL;  		} + +		if (w->invert & SND_SOC_DAPM_REGULATOR_BYPASS) { +			ret = regulator_allow_bypass(w->regulator, true); +			if (ret != 0) +				dev_warn(w->dapm->dev, +					 "ASoC: Failed to unbypass %s: %d\n", +					 w->name, ret); +		}  		break;  	case snd_soc_dapm_clock_supply:  #ifdef CONFIG_CLKDEV_LOOKUP diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c index ed4d89c8b52..e90daf8cdaa 100644 --- a/sound/usb/mixer.c +++ b/sound/usb/mixer.c @@ -1331,16 +1331,23 @@ static int parse_audio_feature_unit(struct mixer_build *state, int unitid, void  		}  		channels = (hdr->bLength - 7) / csize - 1;  		bmaControls = hdr->bmaControls; +		if (hdr->bLength < 7 + csize) { +			snd_printk(KERN_ERR "usbaudio: unit %u: " +				   "invalid UAC_FEATURE_UNIT descriptor\n", +				   unitid); +			return -EINVAL; +		}  	} else {  		struct uac2_feature_unit_descriptor *ftr = _ftr;  		csize = 4;  		channels = (hdr->bLength - 6) / 4 - 1;  		bmaControls = ftr->bmaControls; -	} - -	if (hdr->bLength < 7 || !csize || hdr->bLength < 7 + csize) { -		snd_printk(KERN_ERR "usbaudio: unit %u: invalid UAC_FEATURE_UNIT descriptor\n", unitid); -		return -EINVAL; +		if (hdr->bLength < 6 + csize) { +			snd_printk(KERN_ERR "usbaudio: unit %u: " +				   "invalid UAC_FEATURE_UNIT descriptor\n", +				   unitid); +			return -EINVAL; +		}  	}  	/* parse the source unit */ diff --git a/tools/perf/MANIFEST b/tools/perf/MANIFEST index 80db3f4bcf7..39d41068484 100644 --- a/tools/perf/MANIFEST +++ b/tools/perf/MANIFEST @@ -11,11 +11,21 @@ lib/rbtree.c  include/linux/swab.h  arch/*/include/asm/unistd*.h  arch/*/include/asm/perf_regs.h +arch/*/include/uapi/asm/unistd*.h +arch/*/include/uapi/asm/perf_regs.h  arch/*/lib/memcpy*.S  arch/*/lib/memset*.S  include/linux/poison.h  include/linux/magic.h  include/linux/hw_breakpoint.h +include/linux/rbtree_augmented.h +include/uapi/linux/perf_event.h +include/uapi/linux/const.h +include/uapi/linux/swab.h +include/uapi/linux/hw_breakpoint.h  arch/x86/include/asm/svm.h  arch/x86/include/asm/vmx.h  arch/x86/include/asm/kvm_host.h +arch/x86/include/uapi/asm/svm.h +arch/x86/include/uapi/asm/vmx.h +arch/x86/include/uapi/asm/kvm.h diff --git a/tools/perf/Makefile b/tools/perf/Makefile index 891bc77bdb2..8ab05e543ef 100644 --- a/tools/perf/Makefile +++ b/tools/perf/Makefile @@ -58,7 +58,7 @@ ARCH ?= $(shell echo $(uname_M) | sed -e s/i.86/i386/ -e s/sun4u/sparc64/ \  				  -e s/arm.*/arm/ -e s/sa110/arm/ \  				  -e s/s390x/s390/ -e s/parisc64/parisc/ \  				  -e s/ppc.*/powerpc/ -e s/mips.*/mips/ \ -				  -e s/sh[234].*/sh/ ) +				  -e s/sh[234].*/sh/ -e s/aarch64.*/arm64/ )  NO_PERF_REGS := 1  CC = $(CROSS_COMPILE)gcc diff --git a/tools/vm/.gitignore b/tools/vm/.gitignore new file mode 100644 index 00000000000..44f095fa260 --- /dev/null +++ b/tools/vm/.gitignore @@ -0,0 +1,2 @@ +slabinfo +page-types  |